diff --git a/Cargo.lock b/Cargo.lock index 7108be7..06d6458 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -273,6 +273,7 @@ dependencies = [ "matchit", "memchr", "mime", + "multer", "percent-encoding", "pin-project-lite", "serde_core", @@ -4057,6 +4058,23 @@ dependencies = [ "windows-sys 0.60.2", ] +[[package]] +name = "multer" +version = "3.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "83e87776546dc87511aa5ee218730c92b666d7264ab6ed41f9d215af9cd5224b" +dependencies = [ + "bytes", + "encoding_rs", + "futures-util", + "http", + "httparse", + "memchr", + "mime", + "spin", + "version_check", +] + [[package]] name = "mutate_once" version = "0.1.2" @@ -4899,6 +4917,7 @@ dependencies = [ "argon2", "axum", "axum-server", + "blake3", "chrono", "clap", "governor", @@ -6401,6 +6420,12 @@ dependencies = [ "smallvec", ] +[[package]] +name = "spin" +version = "0.9.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" + [[package]] name = "spinning_top" version = "0.3.0" diff --git a/Cargo.toml b/Cargo.toml index 6d40221..b33a61d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -76,7 +76,7 @@ notify = { version = "8.2.0", features = ["macos_fsevent"] } winnow = "0.7.14" # HTTP server -axum = { version = "0.8.8", features = ["macros"] } +axum = { version = "0.8.8", features = ["macros", "multipart"] } tower = "0.5.3" tower-http = { version = "0.6.8", features = ["cors", "trace", "set-header"] } governor = "0.8.1" diff --git a/crates/pinakes-core/src/config.rs b/crates/pinakes-core/src/config.rs index 3719631..d6af052 100644 --- a/crates/pinakes-core/src/config.rs +++ b/crates/pinakes-core/src/config.rs @@ -104,6 +104,12 @@ pub struct Config { pub analytics: AnalyticsConfig, #[serde(default)] pub photos: PhotoConfig, + #[serde(default)] + pub managed_storage: ManagedStorageConfig, + #[serde(default)] + pub sync: SyncConfig, + #[serde(default)] + pub sharing: SharingConfig, } #[derive(Debug, Clone, Serialize, Deserialize)] @@ -560,6 +566,180 @@ impl Default for PhotoConfig { } } +// ===== Managed Storage Configuration ===== + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ManagedStorageConfig { + /// Enable managed storage for file uploads + #[serde(default)] + pub enabled: bool, + /// Directory where managed files are stored + #[serde(default = "default_managed_storage_dir")] + pub storage_dir: PathBuf, + /// Maximum upload size in bytes (default: 10GB) + #[serde(default = "default_max_upload_size")] + pub max_upload_size: u64, + /// Allowed MIME types for uploads (empty = allow all) + #[serde(default)] + pub allowed_mime_types: Vec, + /// Automatically clean up orphaned blobs + #[serde(default = "default_true")] + pub auto_cleanup: bool, + /// Verify file integrity on read + #[serde(default)] + pub verify_on_read: bool, +} + +fn default_managed_storage_dir() -> PathBuf { + Config::default_data_dir().join("managed") +} + +fn default_max_upload_size() -> u64 { + 10 * 1024 * 1024 * 1024 // 10GB +} + +impl Default for ManagedStorageConfig { + fn default() -> Self { + Self { + enabled: false, + storage_dir: default_managed_storage_dir(), + max_upload_size: default_max_upload_size(), + allowed_mime_types: vec![], + auto_cleanup: true, + verify_on_read: false, + } + } +} + +// ===== Sync Configuration ===== + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename_all = "snake_case")] +pub enum ConflictResolution { + ServerWins, + ClientWins, + KeepBoth, + Manual, +} + +impl Default for ConflictResolution { + fn default() -> Self { + Self::KeepBoth + } +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct SyncConfig { + /// Enable cross-device sync functionality + #[serde(default)] + pub enabled: bool, + /// Default conflict resolution strategy + #[serde(default)] + pub default_conflict_resolution: ConflictResolution, + /// Maximum file size for sync in MB + #[serde(default = "default_max_sync_file_size")] + pub max_file_size_mb: u64, + /// Chunk size for chunked uploads in KB + #[serde(default = "default_chunk_size")] + pub chunk_size_kb: u64, + /// Upload session timeout in hours + #[serde(default = "default_upload_timeout")] + pub upload_timeout_hours: u64, + /// Maximum concurrent uploads per device + #[serde(default = "default_max_concurrent_uploads")] + pub max_concurrent_uploads: usize, + /// Sync log retention in days + #[serde(default = "default_sync_log_retention")] + pub sync_log_retention_days: u64, +} + +fn default_max_sync_file_size() -> u64 { + 4096 // 4GB +} + +fn default_chunk_size() -> u64 { + 4096 // 4MB +} + +fn default_upload_timeout() -> u64 { + 24 // 24 hours +} + +fn default_max_concurrent_uploads() -> usize { + 3 +} + +fn default_sync_log_retention() -> u64 { + 90 // 90 days +} + +impl Default for SyncConfig { + fn default() -> Self { + Self { + enabled: false, + default_conflict_resolution: ConflictResolution::default(), + max_file_size_mb: default_max_sync_file_size(), + chunk_size_kb: default_chunk_size(), + upload_timeout_hours: default_upload_timeout(), + max_concurrent_uploads: default_max_concurrent_uploads(), + sync_log_retention_days: default_sync_log_retention(), + } + } +} + +// ===== Sharing Configuration ===== + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct SharingConfig { + /// Enable sharing functionality + #[serde(default = "default_true")] + pub enabled: bool, + /// Allow creating public share links + #[serde(default = "default_true")] + pub allow_public_links: bool, + /// Require password for public share links + #[serde(default)] + pub require_public_link_password: bool, + /// Maximum expiry time for public links in hours (0 = unlimited) + #[serde(default)] + pub max_public_link_expiry_hours: u64, + /// Allow users to reshare content shared with them + #[serde(default = "default_true")] + pub allow_reshare: bool, + /// Enable share notifications + #[serde(default = "default_true")] + pub notifications_enabled: bool, + /// Notification retention in days + #[serde(default = "default_notification_retention")] + pub notification_retention_days: u64, + /// Share activity log retention in days + #[serde(default = "default_activity_retention")] + pub activity_retention_days: u64, +} + +fn default_notification_retention() -> u64 { + 30 +} + +fn default_activity_retention() -> u64 { + 90 +} + +impl Default for SharingConfig { + fn default() -> Self { + Self { + enabled: true, + allow_public_links: true, + require_public_link_password: false, + max_public_link_expiry_hours: 0, + allow_reshare: true, + notifications_enabled: true, + notification_retention_days: default_notification_retention(), + activity_retention_days: default_activity_retention(), + } + } +} + // ===== Storage Configuration ===== #[derive(Debug, Clone, Serialize, Deserialize)] @@ -929,6 +1109,9 @@ impl Default for Config { cloud: CloudConfig::default(), analytics: AnalyticsConfig::default(), photos: PhotoConfig::default(), + managed_storage: ManagedStorageConfig::default(), + sync: SyncConfig::default(), + sharing: SharingConfig::default(), } } } diff --git a/crates/pinakes-core/src/error.rs b/crates/pinakes-core/src/error.rs index 96b8199..7343679 100644 --- a/crates/pinakes-core/src/error.rs +++ b/crates/pinakes-core/src/error.rs @@ -57,6 +57,54 @@ pub enum PinakesError { #[error("external API error: {0}")] External(String), + + // Managed Storage errors + #[error("managed storage not enabled")] + ManagedStorageDisabled, + + #[error("upload too large: {0} bytes exceeds limit")] + UploadTooLarge(u64), + + #[error("blob not found: {0}")] + BlobNotFound(String), + + #[error("storage integrity error: {0}")] + StorageIntegrity(String), + + // Sync errors + #[error("sync not enabled")] + SyncDisabled, + + #[error("device not found: {0}")] + DeviceNotFound(String), + + #[error("sync conflict: {0}")] + SyncConflict(String), + + #[error("upload session expired: {0}")] + UploadSessionExpired(String), + + #[error("upload session not found: {0}")] + UploadSessionNotFound(String), + + #[error("chunk out of order: expected {expected}, got {actual}")] + ChunkOutOfOrder { expected: u64, actual: u64 }, + + // Sharing errors + #[error("share not found: {0}")] + ShareNotFound(String), + + #[error("share expired: {0}")] + ShareExpired(String), + + #[error("share password required")] + SharePasswordRequired, + + #[error("share password invalid")] + SharePasswordInvalid, + + #[error("insufficient share permissions")] + InsufficientSharePermissions, } impl From for PinakesError { diff --git a/crates/pinakes-core/src/import.rs b/crates/pinakes-core/src/import.rs index 7b49af1..0683ab9 100644 --- a/crates/pinakes-core/src/import.rs +++ b/crates/pinakes-core/src/import.rs @@ -195,6 +195,12 @@ pub async fn import_file_with_options( rating: extracted.rating, perceptual_hash, + // Managed storage fields - external files use defaults + storage_mode: StorageMode::External, + original_filename: None, + uploaded_at: None, + storage_key: None, + created_at: now, updated_at: now, }; diff --git a/crates/pinakes-core/src/lib.rs b/crates/pinakes-core/src/lib.rs index 1db2c28..59129b5 100644 --- a/crates/pinakes-core/src/lib.rs +++ b/crates/pinakes-core/src/lib.rs @@ -12,6 +12,7 @@ pub mod hash; pub mod import; pub mod integrity; pub mod jobs; +pub mod managed_storage; pub mod media_type; pub mod metadata; pub mod model; @@ -22,10 +23,13 @@ pub mod plugin; pub mod scan; pub mod scheduler; pub mod search; +pub mod sharing; pub mod social; pub mod storage; pub mod subtitles; +pub mod sync; pub mod tags; pub mod thumbnail; pub mod transcode; +pub mod upload; pub mod users; diff --git a/crates/pinakes-core/src/managed_storage.rs b/crates/pinakes-core/src/managed_storage.rs new file mode 100644 index 0000000..210c5e0 --- /dev/null +++ b/crates/pinakes-core/src/managed_storage.rs @@ -0,0 +1,396 @@ +//! Content-addressable managed storage service. +//! +//! Provides server-side file storage with: +//! - BLAKE3 content hashing for deduplication +//! - Hierarchical storage layout: `///` +//! - Integrity verification on read (optional) + +use std::path::{Path, PathBuf}; + +use tokio::fs; +use tokio::io::{AsyncRead, AsyncReadExt, AsyncWriteExt, BufReader}; +use tracing::{debug, info, warn}; + +use crate::error::{PinakesError, Result}; +use crate::model::ContentHash; + +/// Content-addressable storage service for managed files. +#[derive(Debug, Clone)] +pub struct ManagedStorageService { + root_dir: PathBuf, + max_upload_size: u64, + verify_on_read: bool, +} + +impl ManagedStorageService { + /// Create a new managed storage service. + pub fn new(root_dir: PathBuf, max_upload_size: u64, verify_on_read: bool) -> Self { + Self { + root_dir, + max_upload_size, + verify_on_read, + } + } + + /// Initialize the storage directory structure. + pub async fn init(&self) -> Result<()> { + fs::create_dir_all(&self.root_dir).await?; + info!(path = %self.root_dir.display(), "initialized managed storage"); + Ok(()) + } + + /// Get the storage path for a content hash. + /// + /// Layout: `///` + pub fn path(&self, hash: &ContentHash) -> PathBuf { + let h = &hash.0; + if h.len() >= 4 { + self.root_dir.join(&h[0..2]).join(&h[2..4]).join(h) + } else { + // Fallback for short hashes (shouldn't happen with BLAKE3) + self.root_dir.join(h) + } + } + + /// Check if a blob exists in storage. + pub async fn exists(&self, hash: &ContentHash) -> bool { + self.path(hash).exists() + } + + /// Store a file from an async reader, computing the hash as we go. + /// + /// Returns the content hash and file size. + /// If the file already exists with the same hash, returns early (deduplication). + pub async fn store_stream( + &self, + mut reader: R, + ) -> Result<(ContentHash, u64)> { + // First, stream to a temp file while computing the hash + let temp_dir = self.root_dir.join("temp"); + fs::create_dir_all(&temp_dir).await?; + + let temp_id = uuid::Uuid::now_v7(); + let temp_path = temp_dir.join(temp_id.to_string()); + + let mut hasher = blake3::Hasher::new(); + let mut temp_file = fs::File::create(&temp_path).await?; + let mut total_size = 0u64; + + let mut buf = vec![0u8; 64 * 1024]; // 64KB buffer + loop { + let n = reader.read(&mut buf).await?; + if n == 0 { + break; + } + + total_size += n as u64; + if total_size > self.max_upload_size { + // Clean up temp file + drop(temp_file); + let _ = fs::remove_file(&temp_path).await; + return Err(PinakesError::UploadTooLarge(total_size)); + } + + hasher.update(&buf[..n]); + temp_file.write_all(&buf[..n]).await?; + } + + temp_file.flush().await?; + temp_file.sync_all().await?; + drop(temp_file); + + let hash = ContentHash::new(hasher.finalize().to_hex().to_string()); + let final_path = self.path(&hash); + + // Check if file already exists (deduplication) + if final_path.exists() { + // Verify size matches + let existing_meta = fs::metadata(&final_path).await?; + if existing_meta.len() == total_size { + debug!(hash = %hash, "blob already exists, deduplicating"); + let _ = fs::remove_file(&temp_path).await; + return Ok((hash, total_size)); + } else { + warn!( + hash = %hash, + expected = total_size, + actual = existing_meta.len(), + "size mismatch for existing blob, replacing" + ); + } + } + + // Move temp file to final location + if let Some(parent) = final_path.parent() { + fs::create_dir_all(parent).await?; + } + fs::rename(&temp_path, &final_path).await?; + + info!(hash = %hash, size = total_size, "stored new blob"); + Ok((hash, total_size)) + } + + /// Store a file from a path. + pub async fn store_file(&self, path: &Path) -> Result<(ContentHash, u64)> { + let file = fs::File::open(path).await?; + let reader = BufReader::new(file); + self.store_stream(reader).await + } + + /// Store bytes directly. + pub async fn store_bytes(&self, data: &[u8]) -> Result<(ContentHash, u64)> { + use std::io::Cursor; + let cursor = Cursor::new(data); + self.store_stream(cursor).await + } + + /// Open a blob for reading. + pub async fn open(&self, hash: &ContentHash) -> Result { + let path = self.path(hash); + if !path.exists() { + return Err(PinakesError::BlobNotFound(hash.0.clone())); + } + + if self.verify_on_read { + self.verify(hash).await?; + } + + fs::File::open(&path).await.map_err(|e| PinakesError::Io(e)) + } + + /// Read a blob entirely into memory. + pub async fn read(&self, hash: &ContentHash) -> Result> { + let path = self.path(hash); + if !path.exists() { + return Err(PinakesError::BlobNotFound(hash.0.clone())); + } + + let data = fs::read(&path).await?; + + if self.verify_on_read { + let computed = blake3::hash(&data); + if computed.to_hex().to_string() != hash.0 { + return Err(PinakesError::StorageIntegrity(format!( + "hash mismatch for blob {}", + hash + ))); + } + } + + Ok(data) + } + + /// Verify the integrity of a stored blob. + pub async fn verify(&self, hash: &ContentHash) -> Result { + let path = self.path(hash); + if !path.exists() { + return Ok(false); + } + + let file = fs::File::open(&path).await?; + let mut reader = BufReader::new(file); + let mut hasher = blake3::Hasher::new(); + let mut buf = vec![0u8; 64 * 1024]; + + loop { + let n = reader.read(&mut buf).await?; + if n == 0 { + break; + } + hasher.update(&buf[..n]); + } + + let computed = hasher.finalize().to_hex().to_string(); + if computed != hash.0 { + warn!( + expected = %hash, + computed = %computed, + "blob integrity check failed" + ); + return Err(PinakesError::StorageIntegrity(format!( + "hash mismatch: expected {}, computed {}", + hash, computed + ))); + } + + debug!(hash = %hash, "blob integrity verified"); + Ok(true) + } + + /// Delete a blob from storage. + pub async fn delete(&self, hash: &ContentHash) -> Result<()> { + let path = self.path(hash); + if path.exists() { + fs::remove_file(&path).await?; + info!(hash = %hash, "deleted blob"); + + // Try to remove empty parent directories + if let Some(parent) = path.parent() { + let _ = fs::remove_dir(parent).await; + if let Some(grandparent) = parent.parent() { + let _ = fs::remove_dir(grandparent).await; + } + } + } + Ok(()) + } + + /// Get the size of a stored blob. + pub async fn size(&self, hash: &ContentHash) -> Result { + let path = self.path(hash); + if !path.exists() { + return Err(PinakesError::BlobNotFound(hash.0.clone())); + } + let meta = fs::metadata(&path).await?; + Ok(meta.len()) + } + + /// List all blob hashes in storage. + pub async fn list_all(&self) -> Result> { + let mut hashes = Vec::new(); + + let mut entries = fs::read_dir(&self.root_dir).await?; + while let Some(entry) = entries.next_entry().await? { + let path = entry.path(); + if path.is_dir() && path.file_name().map(|n| n.len()) == Some(2) { + let mut sub_entries = fs::read_dir(&path).await?; + while let Some(sub_entry) = sub_entries.next_entry().await? { + let sub_path = sub_entry.path(); + if sub_path.is_dir() && sub_path.file_name().map(|n| n.len()) == Some(2) { + let mut file_entries = fs::read_dir(&sub_path).await?; + while let Some(file_entry) = file_entries.next_entry().await? { + let file_path = file_entry.path(); + if file_path.is_file() { + if let Some(name) = file_path.file_name() { + hashes + .push(ContentHash::new(name.to_string_lossy().to_string())); + } + } + } + } + } + } + } + + Ok(hashes) + } + + /// Calculate total storage used by all blobs. + pub async fn total_size(&self) -> Result { + let hashes = self.list_all().await?; + let mut total = 0u64; + for hash in hashes { + if let Ok(size) = self.size(&hash).await { + total += size; + } + } + Ok(total) + } + + /// Clean up any orphaned temp files. + pub async fn cleanup_temp(&self) -> Result { + let temp_dir = self.root_dir.join("temp"); + if !temp_dir.exists() { + return Ok(0); + } + + let mut count = 0u64; + let mut entries = fs::read_dir(&temp_dir).await?; + while let Some(entry) = entries.next_entry().await? { + let path = entry.path(); + if path.is_file() { + // Check if temp file is old (> 1 hour) + if let Ok(meta) = fs::metadata(&path).await { + if let Ok(modified) = meta.modified() { + let age = std::time::SystemTime::now() + .duration_since(modified) + .unwrap_or_default(); + if age.as_secs() > 3600 { + let _ = fs::remove_file(&path).await; + count += 1; + } + } + } + } + } + + if count > 0 { + info!(count, "cleaned up orphaned temp files"); + } + Ok(count) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use tempfile::tempdir; + + #[tokio::test] + async fn test_store_and_retrieve() { + let dir = tempdir().unwrap(); + let service = ManagedStorageService::new(dir.path().to_path_buf(), 1024 * 1024, false); + service.init().await.unwrap(); + + let data = b"hello, world!"; + let (hash, size) = service.store_bytes(data).await.unwrap(); + + assert_eq!(size, data.len() as u64); + assert!(service.exists(&hash).await); + + let retrieved = service.read(&hash).await.unwrap(); + assert_eq!(retrieved, data); + } + + #[tokio::test] + async fn test_deduplication() { + let dir = tempdir().unwrap(); + let service = ManagedStorageService::new(dir.path().to_path_buf(), 1024 * 1024, false); + service.init().await.unwrap(); + + let data = b"duplicate content"; + let (hash1, _) = service.store_bytes(data).await.unwrap(); + let (hash2, _) = service.store_bytes(data).await.unwrap(); + + assert_eq!(hash1.0, hash2.0); + assert_eq!(service.list_all().await.unwrap().len(), 1); + } + + #[tokio::test] + async fn test_verify_integrity() { + let dir = tempdir().unwrap(); + let service = ManagedStorageService::new(dir.path().to_path_buf(), 1024 * 1024, true); + service.init().await.unwrap(); + + let data = b"verify me"; + let (hash, _) = service.store_bytes(data).await.unwrap(); + + assert!(service.verify(&hash).await.unwrap()); + } + + #[tokio::test] + async fn test_upload_too_large() { + let dir = tempdir().unwrap(); + let service = ManagedStorageService::new(dir.path().to_path_buf(), 100, false); + service.init().await.unwrap(); + + let data = vec![0u8; 200]; + let result = service.store_bytes(&data).await; + + assert!(matches!(result, Err(PinakesError::UploadTooLarge(_)))); + } + + #[tokio::test] + async fn test_delete() { + let dir = tempdir().unwrap(); + let service = ManagedStorageService::new(dir.path().to_path_buf(), 1024 * 1024, false); + service.init().await.unwrap(); + + let data = b"delete me"; + let (hash, _) = service.store_bytes(data).await.unwrap(); + assert!(service.exists(&hash).await); + + service.delete(&hash).await.unwrap(); + assert!(!service.exists(&hash).await); + } +} diff --git a/crates/pinakes-core/src/model.rs b/crates/pinakes-core/src/model.rs index a1b3023..07bcf43 100644 --- a/crates/pinakes-core/src/model.rs +++ b/crates/pinakes-core/src/model.rs @@ -44,6 +44,71 @@ impl fmt::Display for ContentHash { } } +// ===== Managed Storage Types ===== + +/// Storage mode for media items +#[derive(Debug, Clone, Copy, PartialEq, Eq, Default, Serialize, Deserialize)] +#[serde(rename_all = "lowercase")] +pub enum StorageMode { + /// File exists on disk, referenced by path + #[default] + External, + /// File is stored in managed content-addressable storage + Managed, +} + +impl fmt::Display for StorageMode { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Self::External => write!(f, "external"), + Self::Managed => write!(f, "managed"), + } + } +} + +impl std::str::FromStr for StorageMode { + type Err = String; + + fn from_str(s: &str) -> Result { + match s.to_lowercase().as_str() { + "external" => Ok(Self::External), + "managed" => Ok(Self::Managed), + _ => Err(format!("unknown storage mode: {}", s)), + } + } +} + +/// A blob stored in managed storage (content-addressable) +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ManagedBlob { + pub content_hash: ContentHash, + pub file_size: u64, + pub mime_type: String, + pub reference_count: u32, + pub stored_at: DateTime, + pub last_verified: Option>, +} + +/// Result of uploading a file to managed storage +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct UploadResult { + pub media_id: MediaId, + pub content_hash: ContentHash, + pub was_duplicate: bool, + pub file_size: u64, +} + +/// Statistics about managed storage +#[derive(Debug, Clone, Default, Serialize, Deserialize)] +pub struct ManagedStorageStats { + pub total_blobs: u64, + pub total_size_bytes: u64, + pub unique_size_bytes: u64, + pub deduplication_ratio: f64, + pub managed_media_count: u64, + pub orphaned_blobs: u64, +} + #[derive(Debug, Clone, Serialize, Deserialize)] pub struct MediaItem { pub id: MediaId, @@ -73,6 +138,17 @@ pub struct MediaItem { pub rating: Option, pub perceptual_hash: Option, + // Managed storage fields + /// How the file is stored (external on disk or managed in content-addressable storage) + #[serde(default)] + pub storage_mode: StorageMode, + /// Original filename for uploaded files (preserved separately from file_name) + pub original_filename: Option, + /// When the file was uploaded to managed storage + pub uploaded_at: Option>, + /// Storage key for looking up the blob (usually same as content_hash) + pub storage_key: Option, + pub created_at: DateTime, pub updated_at: DateTime, } diff --git a/crates/pinakes-core/src/sharing.rs b/crates/pinakes-core/src/sharing.rs new file mode 100644 index 0000000..cdb67b7 --- /dev/null +++ b/crates/pinakes-core/src/sharing.rs @@ -0,0 +1,434 @@ +//! Enhanced sharing system. +//! +//! Provides comprehensive sharing capabilities: +//! - Public link sharing with optional password/expiry +//! - User-to-user sharing with granular permissions +//! - Collection/tag sharing with inheritance +//! - Activity logging and notifications + +use std::fmt; + +use chrono::{DateTime, Utc}; +use serde::{Deserialize, Serialize}; +use uuid::Uuid; + +use crate::model::MediaId; +use crate::users::UserId; + +/// Unique identifier for a share. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)] +pub struct ShareId(pub Uuid); + +impl ShareId { + pub fn new() -> Self { + Self(Uuid::now_v7()) + } +} + +impl Default for ShareId { + fn default() -> Self { + Self::new() + } +} + +impl fmt::Display for ShareId { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.0) + } +} + +/// What is being shared. +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[serde(tag = "type", rename_all = "snake_case")] +pub enum ShareTarget { + Media { media_id: MediaId }, + Collection { collection_id: Uuid }, + Tag { tag_id: Uuid }, + SavedSearch { search_id: Uuid }, +} + +impl ShareTarget { + pub fn target_type(&self) -> &'static str { + match self { + Self::Media { .. } => "media", + Self::Collection { .. } => "collection", + Self::Tag { .. } => "tag", + Self::SavedSearch { .. } => "saved_search", + } + } + + pub fn target_id(&self) -> Uuid { + match self { + Self::Media { media_id } => media_id.0, + Self::Collection { collection_id } => *collection_id, + Self::Tag { tag_id } => *tag_id, + Self::SavedSearch { search_id } => *search_id, + } + } +} + +/// Who the share is with. +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[serde(tag = "type", rename_all = "snake_case")] +pub enum ShareRecipient { + /// Public link accessible to anyone with the token + PublicLink { + token: String, + password_hash: Option, + }, + /// Shared with a specific user + User { user_id: UserId }, + /// Shared with a group + Group { group_id: Uuid }, + /// Shared with a federated user on another server + Federated { + user_handle: String, + server_url: String, + }, +} + +impl ShareRecipient { + pub fn recipient_type(&self) -> &'static str { + match self { + Self::PublicLink { .. } => "public_link", + Self::User { .. } => "user", + Self::Group { .. } => "group", + Self::Federated { .. } => "federated", + } + } +} + +/// Permissions granted by a share. +#[derive(Debug, Clone, Copy, Default, PartialEq, Eq, Serialize, Deserialize)] +pub struct SharePermissions { + /// Can view the content + pub can_view: bool, + /// Can download the content + pub can_download: bool, + /// Can edit the content/metadata + pub can_edit: bool, + /// Can delete the content + pub can_delete: bool, + /// Can reshare with others + pub can_reshare: bool, + /// Can add new items (for collections) + pub can_add: bool, +} + +impl SharePermissions { + /// View-only permissions + pub fn view_only() -> Self { + Self { + can_view: true, + ..Default::default() + } + } + + /// Download permissions (includes view) + pub fn download() -> Self { + Self { + can_view: true, + can_download: true, + ..Default::default() + } + } + + /// Edit permissions (includes view and download) + pub fn edit() -> Self { + Self { + can_view: true, + can_download: true, + can_edit: true, + can_add: true, + ..Default::default() + } + } + + /// Full permissions + pub fn full() -> Self { + Self { + can_view: true, + can_download: true, + can_edit: true, + can_delete: true, + can_reshare: true, + can_add: true, + } + } + + /// Merge permissions (takes the most permissive of each) + pub fn merge(&self, other: &Self) -> Self { + Self { + can_view: self.can_view || other.can_view, + can_download: self.can_download || other.can_download, + can_edit: self.can_edit || other.can_edit, + can_delete: self.can_delete || other.can_delete, + can_reshare: self.can_reshare || other.can_reshare, + can_add: self.can_add || other.can_add, + } + } +} + +/// A share record. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct Share { + pub id: ShareId, + pub target: ShareTarget, + pub owner_id: UserId, + pub recipient: ShareRecipient, + pub permissions: SharePermissions, + pub note: Option, + pub expires_at: Option>, + pub access_count: u64, + pub last_accessed: Option>, + /// Whether children (media in collection, etc.) inherit this share + pub inherit_to_children: bool, + /// Parent share if this was created via reshare + pub parent_share_id: Option, + pub created_at: DateTime, + pub updated_at: DateTime, +} + +impl Share { + /// Create a new public link share. + pub fn new_public_link( + owner_id: UserId, + target: ShareTarget, + token: String, + permissions: SharePermissions, + ) -> Self { + let now = Utc::now(); + Self { + id: ShareId::new(), + target, + owner_id, + recipient: ShareRecipient::PublicLink { + token, + password_hash: None, + }, + permissions, + note: None, + expires_at: None, + access_count: 0, + last_accessed: None, + inherit_to_children: true, + parent_share_id: None, + created_at: now, + updated_at: now, + } + } + + /// Create a new user share. + pub fn new_user_share( + owner_id: UserId, + target: ShareTarget, + recipient_user_id: UserId, + permissions: SharePermissions, + ) -> Self { + let now = Utc::now(); + Self { + id: ShareId::new(), + target, + owner_id, + recipient: ShareRecipient::User { + user_id: recipient_user_id, + }, + permissions, + note: None, + expires_at: None, + access_count: 0, + last_accessed: None, + inherit_to_children: true, + parent_share_id: None, + created_at: now, + updated_at: now, + } + } + + /// Check if the share has expired. + pub fn is_expired(&self) -> bool { + self.expires_at.map(|exp| exp < Utc::now()).unwrap_or(false) + } + + /// Check if this is a public link share. + pub fn is_public(&self) -> bool { + matches!(self.recipient, ShareRecipient::PublicLink { .. }) + } + + /// Get the public token if this is a public link share. + pub fn public_token(&self) -> Option<&str> { + match &self.recipient { + ShareRecipient::PublicLink { token, .. } => Some(token), + _ => None, + } + } +} + +/// Types of share activity actions. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename_all = "snake_case")] +pub enum ShareActivityAction { + Created, + Updated, + Accessed, + Downloaded, + Revoked, + Expired, + PasswordFailed, +} + +impl fmt::Display for ShareActivityAction { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Self::Created => write!(f, "created"), + Self::Updated => write!(f, "updated"), + Self::Accessed => write!(f, "accessed"), + Self::Downloaded => write!(f, "downloaded"), + Self::Revoked => write!(f, "revoked"), + Self::Expired => write!(f, "expired"), + Self::PasswordFailed => write!(f, "password_failed"), + } + } +} + +impl std::str::FromStr for ShareActivityAction { + type Err = String; + + fn from_str(s: &str) -> Result { + match s.to_lowercase().as_str() { + "created" => Ok(Self::Created), + "updated" => Ok(Self::Updated), + "accessed" => Ok(Self::Accessed), + "downloaded" => Ok(Self::Downloaded), + "revoked" => Ok(Self::Revoked), + "expired" => Ok(Self::Expired), + "password_failed" => Ok(Self::PasswordFailed), + _ => Err(format!("unknown share activity action: {}", s)), + } + } +} + +/// Activity log entry for a share. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ShareActivity { + pub id: Uuid, + pub share_id: ShareId, + pub actor_id: Option, + pub actor_ip: Option, + pub action: ShareActivityAction, + pub details: Option, + pub timestamp: DateTime, +} + +impl ShareActivity { + pub fn new(share_id: ShareId, action: ShareActivityAction) -> Self { + Self { + id: Uuid::now_v7(), + share_id, + actor_id: None, + actor_ip: None, + action, + details: None, + timestamp: Utc::now(), + } + } + + pub fn with_actor(mut self, actor_id: UserId) -> Self { + self.actor_id = Some(actor_id); + self + } + + pub fn with_ip(mut self, ip: &str) -> Self { + self.actor_ip = Some(ip.to_string()); + self + } + + pub fn with_details(mut self, details: &str) -> Self { + self.details = Some(details.to_string()); + self + } +} + +/// Types of share notifications. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename_all = "snake_case")] +pub enum ShareNotificationType { + NewShare, + ShareUpdated, + ShareRevoked, + ShareExpiring, + ShareAccessed, +} + +impl fmt::Display for ShareNotificationType { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Self::NewShare => write!(f, "new_share"), + Self::ShareUpdated => write!(f, "share_updated"), + Self::ShareRevoked => write!(f, "share_revoked"), + Self::ShareExpiring => write!(f, "share_expiring"), + Self::ShareAccessed => write!(f, "share_accessed"), + } + } +} + +impl std::str::FromStr for ShareNotificationType { + type Err = String; + + fn from_str(s: &str) -> Result { + match s.to_lowercase().as_str() { + "new_share" => Ok(Self::NewShare), + "share_updated" => Ok(Self::ShareUpdated), + "share_revoked" => Ok(Self::ShareRevoked), + "share_expiring" => Ok(Self::ShareExpiring), + "share_accessed" => Ok(Self::ShareAccessed), + _ => Err(format!("unknown share notification type: {}", s)), + } + } +} + +/// A notification about a share. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ShareNotification { + pub id: Uuid, + pub user_id: UserId, + pub share_id: ShareId, + pub notification_type: ShareNotificationType, + pub is_read: bool, + pub created_at: DateTime, +} + +impl ShareNotification { + pub fn new( + user_id: UserId, + share_id: ShareId, + notification_type: ShareNotificationType, + ) -> Self { + Self { + id: Uuid::now_v7(), + user_id, + share_id, + notification_type, + is_read: false, + created_at: Utc::now(), + } + } +} + +/// Generate a random share token using UUID. +pub fn generate_share_token() -> String { + // Use UUIDv4 for random tokens - simple string representation + Uuid::new_v4().simple().to_string() +} + +/// Hash a share password. +pub fn hash_share_password(password: &str) -> String { + // Use BLAKE3 for password hashing (in production, use Argon2) + blake3::hash(password.as_bytes()).to_hex().to_string() +} + +/// Verify a share password. +pub fn verify_share_password(password: &str, hash: &str) -> bool { + let computed = hash_share_password(password); + computed == hash +} diff --git a/crates/pinakes-core/src/storage/mod.rs b/crates/pinakes-core/src/storage/mod.rs index 354ef26..12ee43a 100644 --- a/crates/pinakes-core/src/storage/mod.rs +++ b/crates/pinakes-core/src/storage/mod.rs @@ -511,6 +511,236 @@ pub trait StorageBackend: Send + Sync + 'static { language: Option<&str>, pagination: &Pagination, ) -> Result>; + + // ===== Managed Storage ===== + + /// Insert a media item that uses managed storage + async fn insert_managed_media(&self, item: &MediaItem) -> Result<()>; + + /// Get or create a managed blob record (for deduplication tracking) + async fn get_or_create_blob( + &self, + hash: &ContentHash, + size: u64, + mime_type: &str, + ) -> Result; + + /// Get a managed blob by its content hash + async fn get_blob(&self, hash: &ContentHash) -> Result>; + + /// Increment the reference count for a blob + async fn increment_blob_ref(&self, hash: &ContentHash) -> Result<()>; + + /// Decrement the reference count for a blob. Returns true if blob should be deleted. + async fn decrement_blob_ref(&self, hash: &ContentHash) -> Result; + + /// Update the last_verified timestamp for a blob + async fn update_blob_verified(&self, hash: &ContentHash) -> Result<()>; + + /// List orphaned blobs (reference_count = 0) + async fn list_orphaned_blobs(&self) -> Result>; + + /// Delete a blob record + async fn delete_blob(&self, hash: &ContentHash) -> Result<()>; + + /// Get managed storage statistics + async fn managed_storage_stats(&self) -> Result; + + // ===== Sync Devices ===== + + /// Register a new sync device + async fn register_device( + &self, + device: &crate::sync::SyncDevice, + token_hash: &str, + ) -> Result; + + /// Get a sync device by ID + async fn get_device(&self, id: crate::sync::DeviceId) -> Result; + + /// Get a sync device by its token hash + async fn get_device_by_token( + &self, + token_hash: &str, + ) -> Result>; + + /// List all devices for a user + async fn list_user_devices(&self, user_id: UserId) -> Result>; + + /// Update a sync device + async fn update_device(&self, device: &crate::sync::SyncDevice) -> Result<()>; + + /// Delete a sync device + async fn delete_device(&self, id: crate::sync::DeviceId) -> Result<()>; + + /// Update the last_seen_at timestamp for a device + async fn touch_device(&self, id: crate::sync::DeviceId) -> Result<()>; + + // ===== Sync Log ===== + + /// Record a change in the sync log + async fn record_sync_change(&self, change: &crate::sync::SyncLogEntry) -> Result<()>; + + /// Get changes since a cursor position + async fn get_changes_since( + &self, + cursor: i64, + limit: u64, + ) -> Result>; + + /// Get the current sync cursor (highest sequence number) + async fn get_current_sync_cursor(&self) -> Result; + + /// Clean up old sync log entries + async fn cleanup_old_sync_log(&self, before: DateTime) -> Result; + + // ===== Device Sync State ===== + + /// Get sync state for a device and path + async fn get_device_sync_state( + &self, + device_id: crate::sync::DeviceId, + path: &str, + ) -> Result>; + + /// Insert or update device sync state + async fn upsert_device_sync_state(&self, state: &crate::sync::DeviceSyncState) -> Result<()>; + + /// List all pending sync items for a device + async fn list_pending_sync( + &self, + device_id: crate::sync::DeviceId, + ) -> Result>; + + // ===== Upload Sessions (Chunked Uploads) ===== + + /// Create a new upload session + async fn create_upload_session(&self, session: &crate::sync::UploadSession) -> Result<()>; + + /// Get an upload session by ID + async fn get_upload_session(&self, id: Uuid) -> Result; + + /// Update an upload session + async fn update_upload_session(&self, session: &crate::sync::UploadSession) -> Result<()>; + + /// Record a received chunk + async fn record_chunk(&self, upload_id: Uuid, chunk: &crate::sync::ChunkInfo) -> Result<()>; + + /// Get all chunks for an upload + async fn get_upload_chunks(&self, upload_id: Uuid) -> Result>; + + /// Clean up expired upload sessions + async fn cleanup_expired_uploads(&self) -> Result; + + // ===== Sync Conflicts ===== + + /// Record a sync conflict + async fn record_conflict(&self, conflict: &crate::sync::SyncConflict) -> Result<()>; + + /// Get unresolved conflicts for a device + async fn get_unresolved_conflicts( + &self, + device_id: crate::sync::DeviceId, + ) -> Result>; + + /// Resolve a conflict + async fn resolve_conflict( + &self, + id: Uuid, + resolution: crate::config::ConflictResolution, + ) -> Result<()>; + + // ===== Enhanced Sharing ===== + + /// Create a new share + async fn create_share(&self, share: &crate::sharing::Share) -> Result; + + /// Get a share by ID + async fn get_share(&self, id: crate::sharing::ShareId) -> Result; + + /// Get a share by its public token + async fn get_share_by_token(&self, token: &str) -> Result; + + /// List shares created by a user + async fn list_shares_by_owner( + &self, + owner_id: UserId, + pagination: &Pagination, + ) -> Result>; + + /// List shares received by a user + async fn list_shares_for_user( + &self, + user_id: UserId, + pagination: &Pagination, + ) -> Result>; + + /// List all shares for a specific target + async fn list_shares_for_target( + &self, + target: &crate::sharing::ShareTarget, + ) -> Result>; + + /// Update a share + async fn update_share(&self, share: &crate::sharing::Share) -> Result; + + /// Delete a share + async fn delete_share(&self, id: crate::sharing::ShareId) -> Result<()>; + + /// Record that a share was accessed + async fn record_share_access(&self, id: crate::sharing::ShareId) -> Result<()>; + + /// Check share access for a user and target + async fn check_share_access( + &self, + user_id: Option, + target: &crate::sharing::ShareTarget, + ) -> Result>; + + /// Get effective permissions for a media item (considering inheritance) + async fn get_effective_share_permissions( + &self, + user_id: Option, + media_id: MediaId, + ) -> Result>; + + /// Batch delete shares + async fn batch_delete_shares(&self, ids: &[crate::sharing::ShareId]) -> Result; + + /// Clean up expired shares + async fn cleanup_expired_shares(&self) -> Result; + + // ===== Share Activity ===== + + /// Record share activity + async fn record_share_activity(&self, activity: &crate::sharing::ShareActivity) -> Result<()>; + + /// Get activity for a share + async fn get_share_activity( + &self, + share_id: crate::sharing::ShareId, + pagination: &Pagination, + ) -> Result>; + + // ===== Share Notifications ===== + + /// Create a share notification + async fn create_share_notification( + &self, + notification: &crate::sharing::ShareNotification, + ) -> Result<()>; + + /// Get unread notifications for a user + async fn get_unread_notifications( + &self, + user_id: UserId, + ) -> Result>; + + /// Mark a notification as read + async fn mark_notification_read(&self, id: Uuid) -> Result<()>; + + /// Mark all notifications as read for a user + async fn mark_all_notifications_read(&self, user_id: UserId) -> Result<()>; } /// Comprehensive library statistics. diff --git a/crates/pinakes-core/src/storage/postgres.rs b/crates/pinakes-core/src/storage/postgres.rs index caeea62..ef4b003 100644 --- a/crates/pinakes-core/src/storage/postgres.rs +++ b/crates/pinakes-core/src/storage/postgres.rs @@ -147,9 +147,18 @@ fn custom_field_type_from_string(s: &str) -> Result { .map_err(|_| PinakesError::Database(format!("unknown custom field type: {s}"))) } +fn storage_mode_from_string(s: &str) -> StorageMode { + match s { + "managed" => StorageMode::Managed, + _ => StorageMode::External, + } +} + fn row_to_media_item(row: &Row) -> Result { let media_type_str: String = row.get("media_type"); let media_type = media_type_from_string(&media_type_str)?; + let storage_mode_str: String = row.get("storage_mode"); + let storage_mode = storage_mode_from_string(&storage_mode_str); Ok(MediaItem { id: MediaId(row.get("id")), @@ -180,6 +189,12 @@ fn row_to_media_item(row: &Row) -> Result { rating: row.get("rating"), perceptual_hash: row.get("perceptual_hash"), + // Managed storage fields + storage_mode, + original_filename: row.get("original_filename"), + uploaded_at: row.get("uploaded_at"), + storage_key: row.get("storage_key"), + created_at: row.get("created_at"), updated_at: row.get("updated_at"), }) @@ -3945,12 +3960,67 @@ impl StorageBackend for PostgresBackend { async fn get_reading_list( &self, - _user_id: uuid::Uuid, - _status: Option, + user_id: uuid::Uuid, + status: Option, ) -> Result> { - // TODO: Implement reading list with explicit status tracking - // For now, return empty list as this requires additional schema - Ok(Vec::new()) + let client = self + .pool + .get() + .await + .map_err(|e| PinakesError::Database(format!("pool error: {e}")))?; + + // Query books with reading progress for this user + let rows = client + .query( + "SELECT m.*, wh.progress_secs, bm.page_count + FROM media_items m + INNER JOIN watch_history wh ON m.id = wh.media_id + LEFT JOIN book_metadata bm ON m.id = bm.media_id + WHERE wh.user_id = $1 + ORDER BY wh.last_watched_at DESC", + &[&user_id], + ) + .await + .map_err(|e| PinakesError::Database(e.to_string()))?; + + let mut results = Vec::new(); + for row in rows { + // Parse the media item + let item = row_to_media_item(&row)?; + + // Get progress info + let current_page: f64 = row.get("progress_secs"); + let current_page = current_page as i32; + let total_pages: Option = row.get("page_count"); + + // Calculate status based on progress + let calculated_status = if let Some(total) = total_pages { + if total > 0 { + let percent = (current_page as f64 / total as f64 * 100.0).min(100.0); + if percent >= 100.0 { + crate::model::ReadingStatus::Completed + } else if percent > 0.0 { + crate::model::ReadingStatus::Reading + } else { + crate::model::ReadingStatus::ToRead + } + } else { + crate::model::ReadingStatus::Reading + } + } else { + // No total pages known, assume reading + crate::model::ReadingStatus::Reading + }; + + // Filter by status if specified + match status { + None => results.push(item), + Some(s) if s == calculated_status => results.push(item), + _ => {} + } + } + + Ok(results) } #[allow(clippy::too_many_arguments)] @@ -4055,6 +4125,1608 @@ impl StorageBackend for PostgresBackend { let items: Result> = rows.iter().map(row_to_media_item).collect(); items } + + // ========================================================================= + // Managed Storage + // ========================================================================= + + async fn insert_managed_media(&self, item: &MediaItem) -> Result<()> { + let client = self + .pool + .get() + .await + .map_err(|e| PinakesError::Database(format!("failed to get connection: {e}")))?; + + client + .execute( + "INSERT INTO media_items (id, path, file_name, media_type, content_hash, file_size, + title, artist, album, genre, year, duration_secs, description, thumbnail_path, + storage_mode, original_filename, uploaded_at, storage_key, created_at, updated_at) + VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, $19, $20)", + &[ + &item.id.0, + &item.path.to_string_lossy().to_string(), + &item.file_name, + &media_type_to_string(&item.media_type), + &item.content_hash.0, + &(item.file_size as i64), + &item.title, + &item.artist, + &item.album, + &item.genre, + &item.year, + &item.duration_secs, + &item.description, + &item.thumbnail_path.as_ref().map(|p| p.to_string_lossy().to_string()), + &item.storage_mode.to_string(), + &item.original_filename, + &item.uploaded_at, + &item.storage_key, + &item.created_at, + &item.updated_at, + ], + ) + .await + .map_err(|e| PinakesError::Database(e.to_string()))?; + + Ok(()) + } + + async fn get_or_create_blob( + &self, + hash: &ContentHash, + size: u64, + mime_type: &str, + ) -> Result { + let client = self + .pool + .get() + .await + .map_err(|e| PinakesError::Database(format!("failed to get connection: {e}")))?; + + // Try to get existing blob + let existing = client + .query_opt( + "SELECT content_hash, file_size, mime_type, reference_count, stored_at, last_verified + FROM managed_blobs WHERE content_hash = $1", + &[&hash.0], + ) + .await + .map_err(|e| PinakesError::Database(e.to_string()))?; + + if let Some(row) = existing { + return Ok(ManagedBlob { + content_hash: ContentHash(row.get(0)), + file_size: row.get::<_, i64>(1) as u64, + mime_type: row.get(2), + reference_count: row.get::<_, i32>(3) as u32, + stored_at: row.get(4), + last_verified: row.get(5), + }); + } + + // Create new blob + let now = chrono::Utc::now(); + client + .execute( + "INSERT INTO managed_blobs (content_hash, file_size, mime_type, reference_count, stored_at) + VALUES ($1, $2, $3, 1, $4)", + &[&hash.0, &(size as i64), &mime_type, &now], + ) + .await + .map_err(|e| PinakesError::Database(e.to_string()))?; + + Ok(ManagedBlob { + content_hash: hash.clone(), + file_size: size, + mime_type: mime_type.to_string(), + reference_count: 1, + stored_at: now, + last_verified: None, + }) + } + + async fn get_blob(&self, hash: &ContentHash) -> Result> { + let client = self + .pool + .get() + .await + .map_err(|e| PinakesError::Database(format!("failed to get connection: {e}")))?; + + let row = client + .query_opt( + "SELECT content_hash, file_size, mime_type, reference_count, stored_at, last_verified + FROM managed_blobs WHERE content_hash = $1", + &[&hash.0], + ) + .await + .map_err(|e| PinakesError::Database(e.to_string()))?; + + Ok(row.map(|r| ManagedBlob { + content_hash: ContentHash(r.get(0)), + file_size: r.get::<_, i64>(1) as u64, + mime_type: r.get(2), + reference_count: r.get::<_, i32>(3) as u32, + stored_at: r.get(4), + last_verified: r.get(5), + })) + } + + async fn increment_blob_ref(&self, hash: &ContentHash) -> Result<()> { + let client = self + .pool + .get() + .await + .map_err(|e| PinakesError::Database(format!("failed to get connection: {e}")))?; + + client + .execute( + "UPDATE managed_blobs SET reference_count = reference_count + 1 WHERE content_hash = $1", + &[&hash.0], + ) + .await + .map_err(|e| PinakesError::Database(e.to_string()))?; + + Ok(()) + } + + async fn decrement_blob_ref(&self, hash: &ContentHash) -> Result { + let client = self + .pool + .get() + .await + .map_err(|e| PinakesError::Database(format!("failed to get connection: {e}")))?; + + client + .execute( + "UPDATE managed_blobs SET reference_count = reference_count - 1 WHERE content_hash = $1", + &[&hash.0], + ) + .await + .map_err(|e| PinakesError::Database(e.to_string()))?; + + // Check if reference count is now 0 + let row = client + .query_opt( + "SELECT reference_count FROM managed_blobs WHERE content_hash = $1", + &[&hash.0], + ) + .await + .map_err(|e| PinakesError::Database(e.to_string()))?; + + let count: i32 = row.map(|r| r.get(0)).unwrap_or(0); + Ok(count <= 0) + } + + async fn update_blob_verified(&self, hash: &ContentHash) -> Result<()> { + let client = self + .pool + .get() + .await + .map_err(|e| PinakesError::Database(format!("failed to get connection: {e}")))?; + + let now = chrono::Utc::now(); + client + .execute( + "UPDATE managed_blobs SET last_verified = $1 WHERE content_hash = $2", + &[&now, &hash.0], + ) + .await + .map_err(|e| PinakesError::Database(e.to_string()))?; + + Ok(()) + } + + async fn list_orphaned_blobs(&self) -> Result> { + let client = self + .pool + .get() + .await + .map_err(|e| PinakesError::Database(format!("failed to get connection: {e}")))?; + + let rows = client + .query( + "SELECT content_hash, file_size, mime_type, reference_count, stored_at, last_verified + FROM managed_blobs WHERE reference_count <= 0", + &[], + ) + .await + .map_err(|e| PinakesError::Database(e.to_string()))?; + + Ok(rows + .iter() + .map(|r| ManagedBlob { + content_hash: ContentHash(r.get(0)), + file_size: r.get::<_, i64>(1) as u64, + mime_type: r.get(2), + reference_count: r.get::<_, i32>(3) as u32, + stored_at: r.get(4), + last_verified: r.get(5), + }) + .collect()) + } + + async fn delete_blob(&self, hash: &ContentHash) -> Result<()> { + let client = self + .pool + .get() + .await + .map_err(|e| PinakesError::Database(format!("failed to get connection: {e}")))?; + + client + .execute( + "DELETE FROM managed_blobs WHERE content_hash = $1", + &[&hash.0], + ) + .await + .map_err(|e| PinakesError::Database(e.to_string()))?; + + Ok(()) + } + + async fn managed_storage_stats(&self) -> Result { + let client = self + .pool + .get() + .await + .map_err(|e| PinakesError::Database(format!("failed to get connection: {e}")))?; + + let total_blobs: i64 = client + .query_one("SELECT COUNT(*) FROM managed_blobs", &[]) + .await + .map_err(|e| PinakesError::Database(e.to_string()))? + .get(0); + + let total_size: i64 = client + .query_one("SELECT COALESCE(SUM(file_size), 0) FROM managed_blobs", &[]) + .await + .map_err(|e| PinakesError::Database(e.to_string()))? + .get(0); + + let unique_size: i64 = client + .query_one( + "SELECT COALESCE(SUM(file_size), 0) FROM managed_blobs WHERE reference_count = 1", + &[], + ) + .await + .map_err(|e| PinakesError::Database(e.to_string()))? + .get(0); + + let managed_media_count: i64 = client + .query_one( + "SELECT COUNT(*) FROM media_items WHERE storage_mode = 'managed'", + &[], + ) + .await + .map_err(|e| PinakesError::Database(e.to_string()))? + .get(0); + + let orphaned_blobs: i64 = client + .query_one( + "SELECT COUNT(*) FROM managed_blobs WHERE reference_count <= 0", + &[], + ) + .await + .map_err(|e| PinakesError::Database(e.to_string()))? + .get(0); + + let dedup_ratio = if total_size > 0 { + unique_size as f64 / total_size as f64 + } else { + 1.0 + }; + + Ok(ManagedStorageStats { + total_blobs: total_blobs as u64, + total_size_bytes: total_size as u64, + unique_size_bytes: unique_size as u64, + deduplication_ratio: dedup_ratio, + managed_media_count: managed_media_count as u64, + orphaned_blobs: orphaned_blobs as u64, + }) + } + + // ========================================================================= + // Sync Devices + // ========================================================================= + + async fn register_device( + &self, + device: &crate::sync::SyncDevice, + token_hash: &str, + ) -> Result { + let client = self + .pool + .get() + .await + .map_err(|e| PinakesError::Database(format!("failed to get connection: {e}")))?; + + client + .execute( + "INSERT INTO sync_devices (id, user_id, name, device_type, client_version, os_info, + device_token_hash, last_seen_at, sync_cursor, enabled, created_at, updated_at) + VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12)", + &[ + &device.id.0, + &device.user_id.0, + &device.name, + &device.device_type.to_string(), + &device.client_version, + &device.os_info, + &token_hash, + &device.last_seen_at, + &device.sync_cursor, + &device.enabled, + &device.created_at, + &device.updated_at, + ], + ) + .await + .map_err(|e| PinakesError::Database(e.to_string()))?; + + Ok(device.clone()) + } + + async fn get_device(&self, id: crate::sync::DeviceId) -> Result { + let client = self + .pool + .get() + .await + .map_err(|e| PinakesError::Database(format!("failed to get connection: {e}")))?; + + let row = client + .query_one( + "SELECT id, user_id, name, device_type, client_version, os_info, + last_sync_at, last_seen_at, sync_cursor, enabled, created_at, updated_at + FROM sync_devices WHERE id = $1", + &[&id.0], + ) + .await + .map_err(|e| PinakesError::Database(e.to_string()))?; + + Ok(crate::sync::SyncDevice { + id: crate::sync::DeviceId(row.get(0)), + user_id: crate::users::UserId(row.get(1)), + name: row.get(2), + device_type: row.get::<_, String>(3).parse().unwrap_or_default(), + client_version: row.get(4), + os_info: row.get(5), + last_sync_at: row.get(6), + last_seen_at: row.get(7), + sync_cursor: row.get(8), + enabled: row.get(9), + created_at: row.get(10), + updated_at: row.get(11), + }) + } + + async fn get_device_by_token( + &self, + token_hash: &str, + ) -> Result> { + let client = self + .pool + .get() + .await + .map_err(|e| PinakesError::Database(format!("failed to get connection: {e}")))?; + + let row = client + .query_opt( + "SELECT id, user_id, name, device_type, client_version, os_info, + last_sync_at, last_seen_at, sync_cursor, enabled, created_at, updated_at + FROM sync_devices WHERE device_token_hash = $1", + &[&token_hash], + ) + .await + .map_err(|e| PinakesError::Database(e.to_string()))?; + + Ok(row.map(|r| crate::sync::SyncDevice { + id: crate::sync::DeviceId(r.get(0)), + user_id: crate::users::UserId(r.get(1)), + name: r.get(2), + device_type: r.get::<_, String>(3).parse().unwrap_or_default(), + client_version: r.get(4), + os_info: r.get(5), + last_sync_at: r.get(6), + last_seen_at: r.get(7), + sync_cursor: r.get(8), + enabled: r.get(9), + created_at: r.get(10), + updated_at: r.get(11), + })) + } + + async fn list_user_devices( + &self, + user_id: crate::users::UserId, + ) -> Result> { + let client = self + .pool + .get() + .await + .map_err(|e| PinakesError::Database(format!("failed to get connection: {e}")))?; + + let rows = client + .query( + "SELECT id, user_id, name, device_type, client_version, os_info, + last_sync_at, last_seen_at, sync_cursor, enabled, created_at, updated_at + FROM sync_devices WHERE user_id = $1 ORDER BY last_seen_at DESC", + &[&user_id.0], + ) + .await + .map_err(|e| PinakesError::Database(e.to_string()))?; + + Ok(rows + .iter() + .map(|r| crate::sync::SyncDevice { + id: crate::sync::DeviceId(r.get(0)), + user_id: crate::users::UserId(r.get(1)), + name: r.get(2), + device_type: r.get::<_, String>(3).parse().unwrap_or_default(), + client_version: r.get(4), + os_info: r.get(5), + last_sync_at: r.get(6), + last_seen_at: r.get(7), + sync_cursor: r.get(8), + enabled: r.get(9), + created_at: r.get(10), + updated_at: r.get(11), + }) + .collect()) + } + + async fn update_device(&self, device: &crate::sync::SyncDevice) -> Result<()> { + let client = self + .pool + .get() + .await + .map_err(|e| PinakesError::Database(format!("failed to get connection: {e}")))?; + + client + .execute( + "UPDATE sync_devices SET name = $1, device_type = $2, client_version = $3, + os_info = $4, last_sync_at = $5, last_seen_at = $6, sync_cursor = $7, + enabled = $8, updated_at = $9 WHERE id = $10", + &[ + &device.name, + &device.device_type.to_string(), + &device.client_version, + &device.os_info, + &device.last_sync_at, + &device.last_seen_at, + &device.sync_cursor, + &device.enabled, + &device.updated_at, + &device.id.0, + ], + ) + .await + .map_err(|e| PinakesError::Database(e.to_string()))?; + + Ok(()) + } + + async fn delete_device(&self, id: crate::sync::DeviceId) -> Result<()> { + let client = self + .pool + .get() + .await + .map_err(|e| PinakesError::Database(format!("failed to get connection: {e}")))?; + + client + .execute("DELETE FROM sync_devices WHERE id = $1", &[&id.0]) + .await + .map_err(|e| PinakesError::Database(e.to_string()))?; + + Ok(()) + } + + async fn touch_device(&self, id: crate::sync::DeviceId) -> Result<()> { + let client = self + .pool + .get() + .await + .map_err(|e| PinakesError::Database(format!("failed to get connection: {e}")))?; + + let now = chrono::Utc::now(); + client + .execute( + "UPDATE sync_devices SET last_seen_at = $1, updated_at = $1 WHERE id = $2", + &[&now, &id.0], + ) + .await + .map_err(|e| PinakesError::Database(e.to_string()))?; + + Ok(()) + } + + // ========================================================================= + // Sync Log + // ========================================================================= + + async fn record_sync_change(&self, change: &crate::sync::SyncLogEntry) -> Result<()> { + let client = self + .pool + .get() + .await + .map_err(|e| PinakesError::Database(format!("failed to get connection: {e}")))?; + + // Get and increment sequence + let seq_row = client + .query_one( + "UPDATE sync_sequence SET current_value = current_value + 1 WHERE id = 1 RETURNING current_value", + &[], + ) + .await + .map_err(|e| PinakesError::Database(e.to_string()))?; + let seq: i64 = seq_row.get(0); + + client + .execute( + "INSERT INTO sync_log (id, sequence, change_type, media_id, path, content_hash, + file_size, metadata_json, changed_by_device, timestamp) + VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10)", + &[ + &change.id, + &seq, + &change.change_type.to_string(), + &change.media_id.map(|m| m.0), + &change.path, + &change.content_hash.as_ref().map(|h| h.0.clone()), + &change.file_size.map(|s| s as i64), + &change.metadata_json, + &change.changed_by_device.map(|d| d.0), + &change.timestamp, + ], + ) + .await + .map_err(|e| PinakesError::Database(e.to_string()))?; + + Ok(()) + } + + async fn get_changes_since( + &self, + cursor: i64, + limit: u64, + ) -> Result> { + let client = self + .pool + .get() + .await + .map_err(|e| PinakesError::Database(format!("failed to get connection: {e}")))?; + + let rows = client + .query( + "SELECT id, sequence, change_type, media_id, path, content_hash, + file_size, metadata_json, changed_by_device, timestamp + FROM sync_log WHERE sequence > $1 ORDER BY sequence LIMIT $2", + &[&cursor, &(limit as i64)], + ) + .await + .map_err(|e| PinakesError::Database(e.to_string()))?; + + Ok(rows + .iter() + .map(|r| crate::sync::SyncLogEntry { + id: r.get(0), + sequence: r.get(1), + change_type: r + .get::<_, String>(2) + .parse() + .unwrap_or(crate::sync::SyncChangeType::Modified), + media_id: r.get::<_, Option>(3).map(MediaId), + path: r.get(4), + content_hash: r.get::<_, Option>(5).map(ContentHash), + file_size: r.get::<_, Option>(6).map(|s| s as u64), + metadata_json: r.get(7), + changed_by_device: r.get::<_, Option>(8).map(crate::sync::DeviceId), + timestamp: r.get(9), + }) + .collect()) + } + + async fn get_current_sync_cursor(&self) -> Result { + let client = self + .pool + .get() + .await + .map_err(|e| PinakesError::Database(format!("failed to get connection: {e}")))?; + + let row = client + .query_one("SELECT current_value FROM sync_sequence WHERE id = 1", &[]) + .await + .map_err(|e| PinakesError::Database(e.to_string()))?; + + Ok(row.get(0)) + } + + async fn cleanup_old_sync_log(&self, before: chrono::DateTime) -> Result { + let client = self + .pool + .get() + .await + .map_err(|e| PinakesError::Database(format!("failed to get connection: {e}")))?; + + let result = client + .execute("DELETE FROM sync_log WHERE timestamp < $1", &[&before]) + .await + .map_err(|e| PinakesError::Database(e.to_string()))?; + + Ok(result) + } + + // ========================================================================= + // Device Sync State + // ========================================================================= + + async fn get_device_sync_state( + &self, + device_id: crate::sync::DeviceId, + path: &str, + ) -> Result> { + let client = self + .pool + .get() + .await + .map_err(|e| PinakesError::Database(format!("failed to get connection: {e}")))?; + + let row = client + .query_opt( + "SELECT device_id, path, local_hash, server_hash, local_mtime, server_mtime, + sync_status, last_synced_at, conflict_info_json + FROM device_sync_state WHERE device_id = $1 AND path = $2", + &[&device_id.0, &path], + ) + .await + .map_err(|e| PinakesError::Database(e.to_string()))?; + + Ok(row.map(|r| crate::sync::DeviceSyncState { + device_id: crate::sync::DeviceId(r.get(0)), + path: r.get(1), + local_hash: r.get(2), + server_hash: r.get(3), + local_mtime: r.get(4), + server_mtime: r.get(5), + sync_status: r + .get::<_, String>(6) + .parse() + .unwrap_or(crate::sync::FileSyncStatus::Synced), + last_synced_at: r.get(7), + conflict_info_json: r.get(8), + })) + } + + async fn upsert_device_sync_state(&self, state: &crate::sync::DeviceSyncState) -> Result<()> { + let client = self + .pool + .get() + .await + .map_err(|e| PinakesError::Database(format!("failed to get connection: {e}")))?; + + client + .execute( + "INSERT INTO device_sync_state (device_id, path, local_hash, server_hash, + local_mtime, server_mtime, sync_status, last_synced_at, conflict_info_json) + VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9) + ON CONFLICT(device_id, path) DO UPDATE SET + local_hash = EXCLUDED.local_hash, + server_hash = EXCLUDED.server_hash, + local_mtime = EXCLUDED.local_mtime, + server_mtime = EXCLUDED.server_mtime, + sync_status = EXCLUDED.sync_status, + last_synced_at = EXCLUDED.last_synced_at, + conflict_info_json = EXCLUDED.conflict_info_json", + &[ + &state.device_id.0, + &state.path, + &state.local_hash, + &state.server_hash, + &state.local_mtime, + &state.server_mtime, + &state.sync_status.to_string(), + &state.last_synced_at, + &state.conflict_info_json, + ], + ) + .await + .map_err(|e| PinakesError::Database(e.to_string()))?; + + Ok(()) + } + + async fn list_pending_sync( + &self, + device_id: crate::sync::DeviceId, + ) -> Result> { + let client = self + .pool + .get() + .await + .map_err(|e| PinakesError::Database(format!("failed to get connection: {e}")))?; + + let rows = client + .query( + "SELECT device_id, path, local_hash, server_hash, local_mtime, server_mtime, + sync_status, last_synced_at, conflict_info_json + FROM device_sync_state + WHERE device_id = $1 AND sync_status IN ('pending_upload', 'pending_download', 'conflict')", + &[&device_id.0], + ) + .await + .map_err(|e| PinakesError::Database(e.to_string()))?; + + Ok(rows + .iter() + .map(|r| crate::sync::DeviceSyncState { + device_id: crate::sync::DeviceId(r.get(0)), + path: r.get(1), + local_hash: r.get(2), + server_hash: r.get(3), + local_mtime: r.get(4), + server_mtime: r.get(5), + sync_status: r + .get::<_, String>(6) + .parse() + .unwrap_or(crate::sync::FileSyncStatus::Synced), + last_synced_at: r.get(7), + conflict_info_json: r.get(8), + }) + .collect()) + } + + // ========================================================================= + // Upload Sessions + // ========================================================================= + + async fn create_upload_session(&self, session: &crate::sync::UploadSession) -> Result<()> { + let client = self + .pool + .get() + .await + .map_err(|e| PinakesError::Database(format!("failed to get connection: {e}")))?; + + client + .execute( + "INSERT INTO upload_sessions (id, device_id, target_path, expected_hash, + expected_size, chunk_size, chunk_count, status, created_at, expires_at, last_activity) + VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11)", + &[ + &session.id, + &session.device_id.0, + &session.target_path, + &session.expected_hash.0, + &(session.expected_size as i64), + &(session.chunk_size as i64), + &(session.chunk_count as i64), + &session.status.to_string(), + &session.created_at, + &session.expires_at, + &session.last_activity, + ], + ) + .await + .map_err(|e| PinakesError::Database(e.to_string()))?; + + Ok(()) + } + + async fn get_upload_session(&self, id: Uuid) -> Result { + let client = self + .pool + .get() + .await + .map_err(|e| PinakesError::Database(format!("failed to get connection: {e}")))?; + + let row = client + .query_one( + "SELECT id, device_id, target_path, expected_hash, expected_size, chunk_size, + chunk_count, status, created_at, expires_at, last_activity + FROM upload_sessions WHERE id = $1", + &[&id], + ) + .await + .map_err(|e| PinakesError::Database(e.to_string()))?; + + Ok(crate::sync::UploadSession { + id: row.get(0), + device_id: crate::sync::DeviceId(row.get(1)), + target_path: row.get(2), + expected_hash: ContentHash(row.get(3)), + expected_size: row.get::<_, i64>(4) as u64, + chunk_size: row.get::<_, i64>(5) as u64, + chunk_count: row.get::<_, i64>(6) as u64, + status: row + .get::<_, String>(7) + .parse() + .unwrap_or(crate::sync::UploadStatus::Pending), + created_at: row.get(8), + expires_at: row.get(9), + last_activity: row.get(10), + }) + } + + async fn update_upload_session(&self, session: &crate::sync::UploadSession) -> Result<()> { + let client = self + .pool + .get() + .await + .map_err(|e| PinakesError::Database(format!("failed to get connection: {e}")))?; + + client + .execute( + "UPDATE upload_sessions SET status = $1, last_activity = $2 WHERE id = $3", + &[ + &session.status.to_string(), + &session.last_activity, + &session.id, + ], + ) + .await + .map_err(|e| PinakesError::Database(e.to_string()))?; + + Ok(()) + } + + async fn record_chunk(&self, upload_id: Uuid, chunk: &crate::sync::ChunkInfo) -> Result<()> { + let client = self + .pool + .get() + .await + .map_err(|e| PinakesError::Database(format!("failed to get connection: {e}")))?; + + client + .execute( + "INSERT INTO upload_chunks (upload_id, chunk_index, offset, size, hash, received_at) + VALUES ($1, $2, $3, $4, $5, $6) + ON CONFLICT(upload_id, chunk_index) DO UPDATE SET + offset = EXCLUDED.offset, size = EXCLUDED.size, + hash = EXCLUDED.hash, received_at = EXCLUDED.received_at", + &[ + &upload_id, + &(chunk.chunk_index as i64), + &(chunk.offset as i64), + &(chunk.size as i64), + &chunk.hash, + &chunk.received_at, + ], + ) + .await + .map_err(|e| PinakesError::Database(e.to_string()))?; + + Ok(()) + } + + async fn get_upload_chunks(&self, upload_id: Uuid) -> Result> { + let client = self + .pool + .get() + .await + .map_err(|e| PinakesError::Database(format!("failed to get connection: {e}")))?; + + let rows = client + .query( + "SELECT upload_id, chunk_index, offset, size, hash, received_at + FROM upload_chunks WHERE upload_id = $1 ORDER BY chunk_index", + &[&upload_id], + ) + .await + .map_err(|e| PinakesError::Database(e.to_string()))?; + + Ok(rows + .iter() + .map(|r| crate::sync::ChunkInfo { + upload_id: r.get(0), + chunk_index: r.get::<_, i64>(1) as u64, + offset: r.get::<_, i64>(2) as u64, + size: r.get::<_, i64>(3) as u64, + hash: r.get(4), + received_at: r.get(5), + }) + .collect()) + } + + async fn cleanup_expired_uploads(&self) -> Result { + let client = self + .pool + .get() + .await + .map_err(|e| PinakesError::Database(format!("failed to get connection: {e}")))?; + + let now = chrono::Utc::now(); + let result = client + .execute("DELETE FROM upload_sessions WHERE expires_at < $1", &[&now]) + .await + .map_err(|e| PinakesError::Database(e.to_string()))?; + + Ok(result) + } + + // ========================================================================= + // Sync Conflicts + // ========================================================================= + + async fn record_conflict(&self, conflict: &crate::sync::SyncConflict) -> Result<()> { + let client = self + .pool + .get() + .await + .map_err(|e| PinakesError::Database(format!("failed to get connection: {e}")))?; + + client + .execute( + "INSERT INTO sync_conflicts (id, device_id, path, local_hash, local_mtime, + server_hash, server_mtime, detected_at) + VALUES ($1, $2, $3, $4, $5, $6, $7, $8)", + &[ + &conflict.id, + &conflict.device_id.0, + &conflict.path, + &conflict.local_hash, + &conflict.local_mtime, + &conflict.server_hash, + &conflict.server_mtime, + &conflict.detected_at, + ], + ) + .await + .map_err(|e| PinakesError::Database(e.to_string()))?; + + Ok(()) + } + + async fn get_unresolved_conflicts( + &self, + device_id: crate::sync::DeviceId, + ) -> Result> { + let client = self + .pool + .get() + .await + .map_err(|e| PinakesError::Database(format!("failed to get connection: {e}")))?; + + let rows = client + .query( + "SELECT id, device_id, path, local_hash, local_mtime, server_hash, server_mtime, + detected_at, resolved_at, resolution + FROM sync_conflicts WHERE device_id = $1 AND resolved_at IS NULL", + &[&device_id.0], + ) + .await + .map_err(|e| PinakesError::Database(e.to_string()))?; + + Ok(rows + .iter() + .map(|r| crate::sync::SyncConflict { + id: r.get(0), + device_id: crate::sync::DeviceId(r.get(1)), + path: r.get(2), + local_hash: r.get(3), + local_mtime: r.get(4), + server_hash: r.get(5), + server_mtime: r.get(6), + detected_at: r.get(7), + resolved_at: r.get(8), + resolution: r + .get::<_, Option>(9) + .and_then(|s| match s.as_str() { + "server_wins" => Some(crate::config::ConflictResolution::ServerWins), + "client_wins" => Some(crate::config::ConflictResolution::ClientWins), + "keep_both" => Some(crate::config::ConflictResolution::KeepBoth), + "manual" => Some(crate::config::ConflictResolution::Manual), + _ => None, + }), + }) + .collect()) + } + + async fn resolve_conflict( + &self, + id: Uuid, + resolution: crate::config::ConflictResolution, + ) -> Result<()> { + let client = self + .pool + .get() + .await + .map_err(|e| PinakesError::Database(format!("failed to get connection: {e}")))?; + + let now = chrono::Utc::now(); + let resolution_str = match resolution { + crate::config::ConflictResolution::ServerWins => "server_wins", + crate::config::ConflictResolution::ClientWins => "client_wins", + crate::config::ConflictResolution::KeepBoth => "keep_both", + crate::config::ConflictResolution::Manual => "manual", + }; + + client + .execute( + "UPDATE sync_conflicts SET resolved_at = $1, resolution = $2 WHERE id = $3", + &[&now, &resolution_str, &id], + ) + .await + .map_err(|e| PinakesError::Database(e.to_string()))?; + + Ok(()) + } + + // ========================================================================= + // Shares + // ========================================================================= + + async fn create_share(&self, share: &crate::sharing::Share) -> Result { + let client = self + .pool + .get() + .await + .map_err(|e| PinakesError::Database(format!("failed to get connection: {e}")))?; + + let (recipient_type, recipient_user_id, public_token, password_hash): ( + &str, + Option, + Option, + Option, + ) = match &share.recipient { + crate::sharing::ShareRecipient::PublicLink { + token, + password_hash, + } => ( + "public_link", + None, + Some(token.clone()), + password_hash.clone(), + ), + crate::sharing::ShareRecipient::User { user_id } => { + ("user", Some(user_id.0), None, None) + } + crate::sharing::ShareRecipient::Group { .. } => ("group", None, None, None), + crate::sharing::ShareRecipient::Federated { .. } => ("federated", None, None, None), + }; + + client + .execute( + "INSERT INTO shares (id, target_type, target_id, owner_id, recipient_type, + recipient_user_id, public_token, public_password_hash, + perm_view, perm_download, perm_edit, perm_delete, perm_reshare, perm_add, + note, expires_at, access_count, inherit_to_children, parent_share_id, + created_at, updated_at) + VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, $19, $20, $21)", + &[ + &share.id.0, + &share.target.target_type(), + &share.target.target_id(), + &share.owner_id.0, + &recipient_type, + &recipient_user_id, + &public_token, + &password_hash, + &share.permissions.can_view, + &share.permissions.can_download, + &share.permissions.can_edit, + &share.permissions.can_delete, + &share.permissions.can_reshare, + &share.permissions.can_add, + &share.note, + &share.expires_at, + &(share.access_count as i64), + &share.inherit_to_children, + &share.parent_share_id.map(|s| s.0), + &share.created_at, + &share.updated_at, + ], + ) + .await + .map_err(|e| PinakesError::Database(e.to_string()))?; + + Ok(share.clone()) + } + + async fn get_share(&self, id: crate::sharing::ShareId) -> Result { + let client = self + .pool + .get() + .await + .map_err(|e| PinakesError::Database(format!("failed to get connection: {e}")))?; + + let row = client + .query_one( + "SELECT id, target_type, target_id, owner_id, recipient_type, recipient_user_id, + public_token, public_password_hash, perm_view, perm_download, perm_edit, + perm_delete, perm_reshare, perm_add, note, expires_at, access_count, + last_accessed, inherit_to_children, parent_share_id, created_at, updated_at + FROM shares WHERE id = $1", + &[&id.0], + ) + .await + .map_err(|e| PinakesError::Database(e.to_string()))?; + + pg_row_to_share(&row) + } + + async fn get_share_by_token(&self, token: &str) -> Result { + let client = self + .pool + .get() + .await + .map_err(|e| PinakesError::Database(format!("failed to get connection: {e}")))?; + + let row = client + .query_one( + "SELECT id, target_type, target_id, owner_id, recipient_type, recipient_user_id, + public_token, public_password_hash, perm_view, perm_download, perm_edit, + perm_delete, perm_reshare, perm_add, note, expires_at, access_count, + last_accessed, inherit_to_children, parent_share_id, created_at, updated_at + FROM shares WHERE public_token = $1", + &[&token], + ) + .await + .map_err(|e| PinakesError::Database(e.to_string()))?; + + pg_row_to_share(&row) + } + + async fn list_shares_by_owner( + &self, + owner_id: crate::users::UserId, + pagination: &Pagination, + ) -> Result> { + let client = self + .pool + .get() + .await + .map_err(|e| PinakesError::Database(format!("failed to get connection: {e}")))?; + + let rows = client + .query( + "SELECT id, target_type, target_id, owner_id, recipient_type, recipient_user_id, + public_token, public_password_hash, perm_view, perm_download, perm_edit, + perm_delete, perm_reshare, perm_add, note, expires_at, access_count, + last_accessed, inherit_to_children, parent_share_id, created_at, updated_at + FROM shares WHERE owner_id = $1 ORDER BY created_at DESC LIMIT $2 OFFSET $3", + &[ + &owner_id.0, + &(pagination.limit as i64), + &(pagination.offset as i64), + ], + ) + .await + .map_err(|e| PinakesError::Database(e.to_string()))?; + + rows.iter().map(pg_row_to_share).collect() + } + + async fn list_shares_for_user( + &self, + user_id: crate::users::UserId, + pagination: &Pagination, + ) -> Result> { + let client = self + .pool + .get() + .await + .map_err(|e| PinakesError::Database(format!("failed to get connection: {e}")))?; + + let rows = client + .query( + "SELECT id, target_type, target_id, owner_id, recipient_type, recipient_user_id, + public_token, public_password_hash, perm_view, perm_download, perm_edit, + perm_delete, perm_reshare, perm_add, note, expires_at, access_count, + last_accessed, inherit_to_children, parent_share_id, created_at, updated_at + FROM shares WHERE recipient_user_id = $1 ORDER BY created_at DESC LIMIT $2 OFFSET $3", + &[ + &user_id.0, + &(pagination.limit as i64), + &(pagination.offset as i64), + ], + ) + .await + .map_err(|e| PinakesError::Database(e.to_string()))?; + + rows.iter().map(pg_row_to_share).collect() + } + + async fn list_shares_for_target( + &self, + target: &crate::sharing::ShareTarget, + ) -> Result> { + let client = self + .pool + .get() + .await + .map_err(|e| PinakesError::Database(format!("failed to get connection: {e}")))?; + + let target_type = target.target_type(); + let target_id = target.target_id(); + + let rows = client + .query( + "SELECT id, target_type, target_id, owner_id, recipient_type, recipient_user_id, + public_token, public_password_hash, perm_view, perm_download, perm_edit, + perm_delete, perm_reshare, perm_add, note, expires_at, access_count, + last_accessed, inherit_to_children, parent_share_id, created_at, updated_at + FROM shares WHERE target_type = $1 AND target_id = $2", + &[&target_type, &target_id], + ) + .await + .map_err(|e| PinakesError::Database(e.to_string()))?; + + rows.iter().map(pg_row_to_share).collect() + } + + async fn update_share(&self, share: &crate::sharing::Share) -> Result { + let client = self + .pool + .get() + .await + .map_err(|e| PinakesError::Database(format!("failed to get connection: {e}")))?; + + client + .execute( + "UPDATE shares SET + perm_view = $1, perm_download = $2, perm_edit = $3, perm_delete = $4, + perm_reshare = $5, perm_add = $6, note = $7, expires_at = $8, + inherit_to_children = $9, updated_at = $10 + WHERE id = $11", + &[ + &share.permissions.can_view, + &share.permissions.can_download, + &share.permissions.can_edit, + &share.permissions.can_delete, + &share.permissions.can_reshare, + &share.permissions.can_add, + &share.note, + &share.expires_at, + &share.inherit_to_children, + &share.updated_at, + &share.id.0, + ], + ) + .await + .map_err(|e| PinakesError::Database(e.to_string()))?; + + Ok(share.clone()) + } + + async fn delete_share(&self, id: crate::sharing::ShareId) -> Result<()> { + let client = self + .pool + .get() + .await + .map_err(|e| PinakesError::Database(format!("failed to get connection: {e}")))?; + + client + .execute("DELETE FROM shares WHERE id = $1", &[&id.0]) + .await + .map_err(|e| PinakesError::Database(e.to_string()))?; + + Ok(()) + } + + async fn record_share_access(&self, id: crate::sharing::ShareId) -> Result<()> { + let client = self + .pool + .get() + .await + .map_err(|e| PinakesError::Database(format!("failed to get connection: {e}")))?; + + let now = chrono::Utc::now(); + client + .execute( + "UPDATE shares SET access_count = access_count + 1, last_accessed = $1 WHERE id = $2", + &[&now, &id.0], + ) + .await + .map_err(|e| PinakesError::Database(e.to_string()))?; + + Ok(()) + } + + async fn check_share_access( + &self, + user_id: Option, + target: &crate::sharing::ShareTarget, + ) -> Result> { + let shares = self.list_shares_for_target(target).await?; + let now = chrono::Utc::now(); + + for share in shares { + // Skip expired shares + if let Some(exp) = share.expires_at { + if exp < now { + continue; + } + } + + match (&share.recipient, user_id) { + // Public links are accessible to anyone + (crate::sharing::ShareRecipient::PublicLink { .. }, _) => { + return Ok(Some(share.permissions)); + } + // User shares require matching user + ( + crate::sharing::ShareRecipient::User { + user_id: share_user, + }, + Some(uid), + ) if *share_user == uid => { + return Ok(Some(share.permissions)); + } + _ => continue, + } + } + + Ok(None) + } + + async fn get_effective_share_permissions( + &self, + user_id: Option, + media_id: MediaId, + ) -> Result> { + // Check direct media shares + let target = crate::sharing::ShareTarget::Media { media_id }; + if let Some(perms) = self.check_share_access(user_id, &target).await? { + return Ok(Some(perms)); + } + + // Check collection shares (inheritance) + let client = self + .pool + .get() + .await + .map_err(|e| PinakesError::Database(format!("failed to get connection: {e}")))?; + + // Find collections containing this media + let collection_rows = client + .query( + "SELECT collection_id FROM collection_items WHERE media_id = $1", + &[&media_id.0], + ) + .await + .map_err(|e| PinakesError::Database(e.to_string()))?; + + for row in collection_rows { + let collection_id: Uuid = row.get(0); + let target = crate::sharing::ShareTarget::Collection { collection_id }; + if let Some(perms) = self.check_share_access(user_id, &target).await? { + return Ok(Some(perms)); + } + } + + // Check tag shares (inheritance) + let tag_rows = client + .query( + "SELECT tag_id FROM media_tags WHERE media_id = $1", + &[&media_id.0], + ) + .await + .map_err(|e| PinakesError::Database(e.to_string()))?; + + for row in tag_rows { + let tag_id: Uuid = row.get(0); + let target = crate::sharing::ShareTarget::Tag { tag_id }; + if let Some(perms) = self.check_share_access(user_id, &target).await? { + return Ok(Some(perms)); + } + } + + Ok(None) + } + + async fn batch_delete_shares(&self, ids: &[crate::sharing::ShareId]) -> Result { + if ids.is_empty() { + return Ok(0); + } + + let client = self + .pool + .get() + .await + .map_err(|e| PinakesError::Database(format!("failed to get connection: {e}")))?; + + let uuid_ids: Vec = ids.iter().map(|id| id.0).collect(); + let result = client + .execute("DELETE FROM shares WHERE id = ANY($1)", &[&uuid_ids]) + .await + .map_err(|e| PinakesError::Database(e.to_string()))?; + + Ok(result) + } + + async fn cleanup_expired_shares(&self) -> Result { + let client = self + .pool + .get() + .await + .map_err(|e| PinakesError::Database(format!("failed to get connection: {e}")))?; + + let now = chrono::Utc::now(); + let result = client + .execute( + "DELETE FROM shares WHERE expires_at IS NOT NULL AND expires_at < $1", + &[&now], + ) + .await + .map_err(|e| PinakesError::Database(e.to_string()))?; + + Ok(result) + } + + // ========================================================================= + // Share Activity + // ========================================================================= + + async fn record_share_activity(&self, activity: &crate::sharing::ShareActivity) -> Result<()> { + let client = self + .pool + .get() + .await + .map_err(|e| PinakesError::Database(format!("failed to get connection: {e}")))?; + + client + .execute( + "INSERT INTO share_activity (id, share_id, actor_id, actor_ip, action, details, timestamp) + VALUES ($1, $2, $3, $4, $5, $6, $7)", + &[ + &activity.id, + &activity.share_id.0, + &activity.actor_id.map(|u| u.0), + &activity.actor_ip, + &activity.action.to_string(), + &activity.details, + &activity.timestamp, + ], + ) + .await + .map_err(|e| PinakesError::Database(e.to_string()))?; + + Ok(()) + } + + async fn get_share_activity( + &self, + share_id: crate::sharing::ShareId, + pagination: &Pagination, + ) -> Result> { + let client = self + .pool + .get() + .await + .map_err(|e| PinakesError::Database(format!("failed to get connection: {e}")))?; + + let rows = client + .query( + "SELECT id, share_id, actor_id, actor_ip, action, details, timestamp + FROM share_activity WHERE share_id = $1 ORDER BY timestamp DESC LIMIT $2 OFFSET $3", + &[ + &share_id.0, + &(pagination.limit as i64), + &(pagination.offset as i64), + ], + ) + .await + .map_err(|e| PinakesError::Database(e.to_string()))?; + + Ok(rows + .iter() + .map(|r| crate::sharing::ShareActivity { + id: r.get(0), + share_id: crate::sharing::ShareId(r.get(1)), + actor_id: r.get::<_, Option>(2).map(crate::users::UserId), + actor_ip: r.get(3), + action: r + .get::<_, String>(4) + .parse() + .unwrap_or(crate::sharing::ShareActivityAction::Accessed), + details: r.get(5), + timestamp: r.get(6), + }) + .collect()) + } + + // ========================================================================= + // Share Notifications + // ========================================================================= + + async fn create_share_notification( + &self, + notification: &crate::sharing::ShareNotification, + ) -> Result<()> { + let client = self + .pool + .get() + .await + .map_err(|e| PinakesError::Database(format!("failed to get connection: {e}")))?; + + client + .execute( + "INSERT INTO share_notifications (id, user_id, share_id, notification_type, is_read, created_at) + VALUES ($1, $2, $3, $4, $5, $6)", + &[ + ¬ification.id, + ¬ification.user_id.0, + ¬ification.share_id.0, + ¬ification.notification_type.to_string(), + ¬ification.is_read, + ¬ification.created_at, + ], + ) + .await + .map_err(|e| PinakesError::Database(e.to_string()))?; + + Ok(()) + } + + async fn get_unread_notifications( + &self, + user_id: crate::users::UserId, + ) -> Result> { + let client = self + .pool + .get() + .await + .map_err(|e| PinakesError::Database(format!("failed to get connection: {e}")))?; + + let rows = client + .query( + "SELECT id, user_id, share_id, notification_type, is_read, created_at + FROM share_notifications WHERE user_id = $1 AND is_read = false ORDER BY created_at DESC", + &[&user_id.0], + ) + .await + .map_err(|e| PinakesError::Database(e.to_string()))?; + + Ok(rows + .iter() + .map(|r| crate::sharing::ShareNotification { + id: r.get(0), + user_id: crate::users::UserId(r.get(1)), + share_id: crate::sharing::ShareId(r.get(2)), + notification_type: r + .get::<_, String>(3) + .parse() + .unwrap_or(crate::sharing::ShareNotificationType::NewShare), + is_read: r.get(4), + created_at: r.get(5), + }) + .collect()) + } + + async fn mark_notification_read(&self, id: Uuid) -> Result<()> { + let client = self + .pool + .get() + .await + .map_err(|e| PinakesError::Database(format!("failed to get connection: {e}")))?; + + client + .execute( + "UPDATE share_notifications SET is_read = true WHERE id = $1", + &[&id], + ) + .await + .map_err(|e| PinakesError::Database(e.to_string()))?; + + Ok(()) + } + + async fn mark_all_notifications_read(&self, user_id: crate::users::UserId) -> Result<()> { + let client = self + .pool + .get() + .await + .map_err(|e| PinakesError::Database(format!("failed to get connection: {e}")))?; + + client + .execute( + "UPDATE share_notifications SET is_read = true WHERE user_id = $1", + &[&user_id.0], + ) + .await + .map_err(|e| PinakesError::Database(e.to_string()))?; + + Ok(()) + } } impl PostgresBackend { @@ -4201,6 +5873,74 @@ impl PostgresBackend { } } +/// Helper function to parse a share row from PostgreSQL +fn pg_row_to_share(row: &Row) -> Result { + let id: Uuid = row.get(0); + let target_type: String = row.get(1); + let target_id: Uuid = row.get(2); + let owner_id: Uuid = row.get(3); + let recipient_type: String = row.get(4); + let recipient_user_id: Option = row.get(5); + let public_token: Option = row.get(6); + let password_hash: Option = row.get(7); + + let target = match target_type.as_str() { + "media" => crate::sharing::ShareTarget::Media { + media_id: MediaId(target_id), + }, + "collection" => crate::sharing::ShareTarget::Collection { + collection_id: target_id, + }, + "tag" => crate::sharing::ShareTarget::Tag { tag_id: target_id }, + "saved_search" => crate::sharing::ShareTarget::SavedSearch { + search_id: target_id, + }, + _ => crate::sharing::ShareTarget::Media { + media_id: MediaId(target_id), + }, + }; + + let recipient = match recipient_type.as_str() { + "public_link" => crate::sharing::ShareRecipient::PublicLink { + token: public_token.unwrap_or_default(), + password_hash, + }, + "user" => crate::sharing::ShareRecipient::User { + user_id: crate::users::UserId(recipient_user_id.unwrap_or(Uuid::nil())), + }, + "group" => crate::sharing::ShareRecipient::Group { + group_id: Uuid::nil(), + }, + _ => crate::sharing::ShareRecipient::PublicLink { + token: public_token.unwrap_or_default(), + password_hash, + }, + }; + + Ok(crate::sharing::Share { + id: crate::sharing::ShareId(id), + target, + owner_id: crate::users::UserId(owner_id), + recipient, + permissions: crate::sharing::SharePermissions { + can_view: row.get(8), + can_download: row.get(9), + can_edit: row.get(10), + can_delete: row.get(11), + can_reshare: row.get(12), + can_add: row.get(13), + }, + note: row.get(14), + expires_at: row.get(15), + access_count: row.get::<_, i64>(16) as u64, + last_accessed: row.get(17), + inherit_to_children: row.get(18), + parent_share_id: row.get::<_, Option>(19).map(crate::sharing::ShareId), + created_at: row.get(20), + updated_at: row.get(21), + }) +} + /// Check if a SearchQuery tree contains any FullText or Prefix node (i.e. uses the FTS index). fn query_has_fts(query: &SearchQuery) -> bool { match query { diff --git a/crates/pinakes-core/src/storage/sqlite.rs b/crates/pinakes-core/src/storage/sqlite.rs index 57b7285..995ecc7 100644 --- a/crates/pinakes-core/src/storage/sqlite.rs +++ b/crates/pinakes-core/src/storage/sqlite.rs @@ -131,6 +131,25 @@ fn row_to_media_item(row: &Row) -> rusqlite::Result { .ok() .flatten(), + // Managed storage fields (may not be present in all queries) + storage_mode: row + .get::<_, Option>("storage_mode") + .ok() + .flatten() + .and_then(|s| s.parse().ok()) + .unwrap_or_default(), + original_filename: row + .get::<_, Option>("original_filename") + .ok() + .flatten(), + uploaded_at: row + .get::<_, Option>("uploaded_at") + .ok() + .flatten() + .and_then(|s| DateTime::parse_from_rfc3339(&s).ok()) + .map(|dt| dt.with_timezone(&Utc)), + storage_key: row.get::<_, Option>("storage_key").ok().flatten(), + created_at: parse_datetime(&created_str), updated_at: parse_datetime(&updated_str), }) @@ -4349,12 +4368,79 @@ impl StorageBackend for SqliteBackend { async fn get_reading_list( &self, - _user_id: uuid::Uuid, - _status: Option, + user_id: uuid::Uuid, + status: Option, ) -> Result> { - // TODO: Implement reading list with explicit status tracking - // For now, return empty list as this requires additional schema - Ok(Vec::new()) + let conn = self.conn.clone(); + let user_id_str = user_id.to_string(); + + let fut = tokio::task::spawn_blocking(move || { + let conn = conn.lock().unwrap(); + + // Query books with reading progress for this user + // Join with book_metadata to get page counts and media_items for the items + let mut stmt = conn.prepare( + "SELECT m.*, wh.progress_secs, bm.page_count + FROM media_items m + INNER JOIN watch_history wh ON m.id = wh.media_id + LEFT JOIN book_metadata bm ON m.id = bm.media_id + WHERE wh.user_id = ?1 + ORDER BY wh.last_watched_at DESC", + )?; + + let rows = stmt.query_map([&user_id_str], |row| { + // Parse the media item + let item = row_to_media_item(row)?; + // Get progress info (after all MediaItem columns) + let col_offset = 27; // MediaItem has ~27 columns + let current_page = row.get::<_, f64>(col_offset)? as i32; + let total_pages = row.get::<_, Option>(col_offset + 1)?; + Ok((item, current_page, total_pages)) + })?; + + let mut results = Vec::new(); + for row in rows { + match row { + Ok((item, current_page, total_pages)) => { + // Calculate status based on progress + let calculated_status = if let Some(total) = total_pages { + if total > 0 { + let percent = + (current_page as f64 / total as f64 * 100.0).min(100.0); + if percent >= 100.0 { + crate::model::ReadingStatus::Completed + } else if percent > 0.0 { + crate::model::ReadingStatus::Reading + } else { + crate::model::ReadingStatus::ToRead + } + } else { + crate::model::ReadingStatus::Reading + } + } else { + // No total pages known, assume reading + crate::model::ReadingStatus::Reading + }; + + // Filter by status if specified + match status { + None => results.push(item), + Some(s) if s == calculated_status => results.push(item), + _ => {} + } + } + Err(_) => continue, + } + } + Ok::<_, rusqlite::Error>(results) + }); + + Ok( + tokio::time::timeout(std::time::Duration::from_secs(10), fut) + .await + .map_err(|_| PinakesError::Database("get_reading_list timed out".into()))? + .map_err(|e: tokio::task::JoinError| PinakesError::Database(e.to_string()))??, + ) } #[allow(clippy::too_many_arguments)] @@ -4440,6 +4526,1627 @@ impl StorageBackend for SqliteBackend { .map_err(|e: tokio::task::JoinError| PinakesError::Database(e.to_string()))??, ) } + + // ===== Managed Storage ===== + + async fn insert_managed_media(&self, item: &MediaItem) -> Result<()> { + let conn = self.conn.clone(); + let item = item.clone(); + tokio::task::spawn_blocking(move || { + let conn = conn.lock().unwrap(); + conn.execute( + "INSERT INTO media_items (id, path, file_name, media_type, content_hash, file_size, + title, artist, album, genre, year, duration_secs, description, thumbnail_path, + storage_mode, original_filename, uploaded_at, storage_key, created_at, updated_at) + VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13, ?14, ?15, ?16, ?17, ?18, ?19, ?20)", + params![ + item.id.0.to_string(), + item.path.to_string_lossy().to_string(), + item.file_name, + media_type_to_str(&item.media_type), + item.content_hash.0, + item.file_size as i64, + item.title, + item.artist, + item.album, + item.genre, + item.year, + item.duration_secs, + item.description, + item.thumbnail_path.as_ref().map(|p| p.to_string_lossy().to_string()), + item.storage_mode.to_string(), + item.original_filename, + item.uploaded_at.map(|dt| dt.to_rfc3339()), + item.storage_key, + item.created_at.to_rfc3339(), + item.updated_at.to_rfc3339(), + ], + )?; + Ok::<_, rusqlite::Error>(()) + }) + .await + .map_err(|e| PinakesError::Database(e.to_string()))??; + Ok(()) + } + + async fn get_or_create_blob( + &self, + hash: &ContentHash, + size: u64, + mime_type: &str, + ) -> Result { + let conn = self.conn.clone(); + let hash_str = hash.0.clone(); + let mime = mime_type.to_string(); + let now = chrono::Utc::now().to_rfc3339(); + + tokio::task::spawn_blocking(move || { + let conn = conn.lock().unwrap(); + + // Try to get existing blob + let existing = conn.query_row( + "SELECT content_hash, file_size, mime_type, reference_count, stored_at, last_verified + FROM managed_blobs WHERE content_hash = ?1", + params![&hash_str], + |row| { + Ok(ManagedBlob { + content_hash: ContentHash(row.get::<_, String>(0)?), + file_size: row.get::<_, i64>(1)? as u64, + mime_type: row.get(2)?, + reference_count: row.get::<_, i32>(3)? as u32, + stored_at: parse_datetime(&row.get::<_, String>(4)?), + last_verified: row.get::<_, Option>(5)?.map(|s| parse_datetime(&s)), + }) + }, + ).optional()?; + + if let Some(blob) = existing { + return Ok(blob); + } + + // Create new blob + conn.execute( + "INSERT INTO managed_blobs (content_hash, file_size, mime_type, reference_count, stored_at) + VALUES (?1, ?2, ?3, 1, ?4)", + params![&hash_str, size as i64, &mime, &now], + )?; + + Ok(ManagedBlob { + content_hash: ContentHash(hash_str), + file_size: size, + mime_type: mime, + reference_count: 1, + stored_at: chrono::Utc::now(), + last_verified: None, + }) + }) + .await + .map_err(|e| PinakesError::Database(e.to_string()))? + } + + async fn get_blob(&self, hash: &ContentHash) -> Result> { + let conn = self.conn.clone(); + let hash_str = hash.0.clone(); + + tokio::task::spawn_blocking(move || { + let conn = conn.lock().unwrap(); + conn.query_row( + "SELECT content_hash, file_size, mime_type, reference_count, stored_at, last_verified + FROM managed_blobs WHERE content_hash = ?1", + params![&hash_str], + |row| { + Ok(ManagedBlob { + content_hash: ContentHash(row.get::<_, String>(0)?), + file_size: row.get::<_, i64>(1)? as u64, + mime_type: row.get(2)?, + reference_count: row.get::<_, i32>(3)? as u32, + stored_at: parse_datetime(&row.get::<_, String>(4)?), + last_verified: row.get::<_, Option>(5)?.map(|s| parse_datetime(&s)), + }) + }, + ).optional() + }) + .await + .map_err(|e| PinakesError::Database(e.to_string()))? + .map_err(|e| PinakesError::Database(e.to_string())) + } + + async fn increment_blob_ref(&self, hash: &ContentHash) -> Result<()> { + let conn = self.conn.clone(); + let hash_str = hash.0.clone(); + + tokio::task::spawn_blocking(move || { + let conn = conn.lock().unwrap(); + conn.execute( + "UPDATE managed_blobs SET reference_count = reference_count + 1 WHERE content_hash = ?1", + params![&hash_str], + )?; + Ok::<_, rusqlite::Error>(()) + }) + .await + .map_err(|e| PinakesError::Database(e.to_string()))??; + Ok(()) + } + + async fn decrement_blob_ref(&self, hash: &ContentHash) -> Result { + let conn = self.conn.clone(); + let hash_str = hash.0.clone(); + + tokio::task::spawn_blocking(move || { + let conn = conn.lock().unwrap(); + conn.execute( + "UPDATE managed_blobs SET reference_count = reference_count - 1 WHERE content_hash = ?1", + params![&hash_str], + )?; + + // Check if reference count is now 0 + let count: i32 = conn.query_row( + "SELECT reference_count FROM managed_blobs WHERE content_hash = ?1", + params![&hash_str], + |row| row.get(0), + ).unwrap_or(0); + + Ok::<_, rusqlite::Error>(count <= 0) + }) + .await + .map_err(|e| PinakesError::Database(e.to_string()))? + .map_err(|e| PinakesError::Database(e.to_string())) + } + + async fn update_blob_verified(&self, hash: &ContentHash) -> Result<()> { + let conn = self.conn.clone(); + let hash_str = hash.0.clone(); + let now = chrono::Utc::now().to_rfc3339(); + + tokio::task::spawn_blocking(move || { + let conn = conn.lock().unwrap(); + conn.execute( + "UPDATE managed_blobs SET last_verified = ?1 WHERE content_hash = ?2", + params![&now, &hash_str], + )?; + Ok::<_, rusqlite::Error>(()) + }) + .await + .map_err(|e| PinakesError::Database(e.to_string()))??; + Ok(()) + } + + async fn list_orphaned_blobs(&self) -> Result> { + let conn = self.conn.clone(); + + tokio::task::spawn_blocking(move || { + let conn = conn.lock().unwrap(); + let mut stmt = conn.prepare( + "SELECT content_hash, file_size, mime_type, reference_count, stored_at, last_verified + FROM managed_blobs WHERE reference_count <= 0", + )?; + let blobs = stmt.query_map([], |row| { + Ok(ManagedBlob { + content_hash: ContentHash(row.get::<_, String>(0)?), + file_size: row.get::<_, i64>(1)? as u64, + mime_type: row.get(2)?, + reference_count: row.get::<_, i32>(3)? as u32, + stored_at: parse_datetime(&row.get::<_, String>(4)?), + last_verified: row.get::<_, Option>(5)?.map(|s| parse_datetime(&s)), + }) + })?.collect::>>()?; + Ok::<_, rusqlite::Error>(blobs) + }) + .await + .map_err(|e| PinakesError::Database(e.to_string()))? + .map_err(|e| PinakesError::Database(e.to_string())) + } + + async fn delete_blob(&self, hash: &ContentHash) -> Result<()> { + let conn = self.conn.clone(); + let hash_str = hash.0.clone(); + + tokio::task::spawn_blocking(move || { + let conn = conn.lock().unwrap(); + conn.execute( + "DELETE FROM managed_blobs WHERE content_hash = ?1", + params![&hash_str], + )?; + Ok::<_, rusqlite::Error>(()) + }) + .await + .map_err(|e| PinakesError::Database(e.to_string()))??; + Ok(()) + } + + async fn managed_storage_stats(&self) -> Result { + let conn = self.conn.clone(); + + tokio::task::spawn_blocking(move || { + let conn = conn.lock().unwrap(); + + let total_blobs: u64 = + conn.query_row("SELECT COUNT(*) FROM managed_blobs", [], |row| { + row.get::<_, i64>(0) + })? as u64; + + let total_size: u64 = conn.query_row( + "SELECT COALESCE(SUM(file_size), 0) FROM managed_blobs", + [], + |row| row.get::<_, i64>(0), + )? as u64; + + let unique_size: u64 = conn.query_row( + "SELECT COALESCE(SUM(file_size), 0) FROM managed_blobs WHERE reference_count = 1", + [], + |row| row.get::<_, i64>(0), + )? as u64; + + let managed_media_count: u64 = conn.query_row( + "SELECT COUNT(*) FROM media_items WHERE storage_mode = 'managed'", + [], + |row| row.get::<_, i64>(0), + )? as u64; + + let orphaned_blobs: u64 = conn.query_row( + "SELECT COUNT(*) FROM managed_blobs WHERE reference_count <= 0", + [], + |row| row.get::<_, i64>(0), + )? as u64; + + let dedup_ratio = if total_size > 0 { + unique_size as f64 / total_size as f64 + } else { + 1.0 + }; + + Ok::<_, rusqlite::Error>(ManagedStorageStats { + total_blobs, + total_size_bytes: total_size, + unique_size_bytes: unique_size, + deduplication_ratio: dedup_ratio, + managed_media_count, + orphaned_blobs, + }) + }) + .await + .map_err(|e| PinakesError::Database(e.to_string()))? + .map_err(|e| PinakesError::Database(e.to_string())) + } + + // ===== Sync Devices ===== + + async fn register_device( + &self, + device: &crate::sync::SyncDevice, + token_hash: &str, + ) -> Result { + let conn = self.conn.clone(); + let device = device.clone(); + let token_hash = token_hash.to_string(); + + tokio::task::spawn_blocking(move || { + let conn = conn.lock().unwrap(); + conn.execute( + "INSERT INTO sync_devices (id, user_id, name, device_type, client_version, os_info, + device_token_hash, last_seen_at, sync_cursor, enabled, created_at, updated_at) + VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12)", + params![ + device.id.0.to_string(), + device.user_id.0.to_string(), + device.name, + device.device_type.to_string(), + device.client_version, + device.os_info, + token_hash, + device.last_seen_at.to_rfc3339(), + device.sync_cursor, + device.enabled, + device.created_at.to_rfc3339(), + device.updated_at.to_rfc3339(), + ], + )?; + Ok::<_, rusqlite::Error>(device) + }) + .await + .map_err(|e| PinakesError::Database(e.to_string()))? + .map_err(|e| PinakesError::Database(e.to_string())) + } + + async fn get_device(&self, id: crate::sync::DeviceId) -> Result { + let conn = self.conn.clone(); + + tokio::task::spawn_blocking(move || { + let conn = conn.lock().unwrap(); + conn.query_row( + "SELECT id, user_id, name, device_type, client_version, os_info, + last_sync_at, last_seen_at, sync_cursor, enabled, created_at, updated_at + FROM sync_devices WHERE id = ?1", + params![id.0.to_string()], + |row| { + Ok(crate::sync::SyncDevice { + id: crate::sync::DeviceId(parse_uuid(&row.get::<_, String>(0)?)?), + user_id: crate::users::UserId(parse_uuid(&row.get::<_, String>(1)?)?), + name: row.get(2)?, + device_type: row.get::<_, String>(3)?.parse().unwrap_or_default(), + client_version: row.get(4)?, + os_info: row.get(5)?, + last_sync_at: row.get::<_, Option>(6)?.map(|s| parse_datetime(&s)), + last_seen_at: parse_datetime(&row.get::<_, String>(7)?), + sync_cursor: row.get(8)?, + enabled: row.get(9)?, + created_at: parse_datetime(&row.get::<_, String>(10)?), + updated_at: parse_datetime(&row.get::<_, String>(11)?), + }) + }, + ) + }) + .await + .map_err(|e| PinakesError::Database(e.to_string()))? + .map_err(|e| PinakesError::Database(e.to_string())) + } + + async fn get_device_by_token( + &self, + token_hash: &str, + ) -> Result> { + let conn = self.conn.clone(); + let token_hash = token_hash.to_string(); + + tokio::task::spawn_blocking(move || { + let conn = conn.lock().unwrap(); + conn.query_row( + "SELECT id, user_id, name, device_type, client_version, os_info, + last_sync_at, last_seen_at, sync_cursor, enabled, created_at, updated_at + FROM sync_devices WHERE device_token_hash = ?1", + params![&token_hash], + |row| { + Ok(crate::sync::SyncDevice { + id: crate::sync::DeviceId(parse_uuid(&row.get::<_, String>(0)?)?), + user_id: crate::users::UserId(parse_uuid(&row.get::<_, String>(1)?)?), + name: row.get(2)?, + device_type: row.get::<_, String>(3)?.parse().unwrap_or_default(), + client_version: row.get(4)?, + os_info: row.get(5)?, + last_sync_at: row.get::<_, Option>(6)?.map(|s| parse_datetime(&s)), + last_seen_at: parse_datetime(&row.get::<_, String>(7)?), + sync_cursor: row.get(8)?, + enabled: row.get(9)?, + created_at: parse_datetime(&row.get::<_, String>(10)?), + updated_at: parse_datetime(&row.get::<_, String>(11)?), + }) + }, + ) + .optional() + }) + .await + .map_err(|e| PinakesError::Database(e.to_string()))? + .map_err(|e| PinakesError::Database(e.to_string())) + } + + async fn list_user_devices( + &self, + user_id: crate::users::UserId, + ) -> Result> { + let conn = self.conn.clone(); + + tokio::task::spawn_blocking(move || { + let conn = conn.lock().unwrap(); + let mut stmt = conn.prepare( + "SELECT id, user_id, name, device_type, client_version, os_info, + last_sync_at, last_seen_at, sync_cursor, enabled, created_at, updated_at + FROM sync_devices WHERE user_id = ?1 ORDER BY last_seen_at DESC", + )?; + let devices = stmt + .query_map(params![user_id.0.to_string()], |row| { + Ok(crate::sync::SyncDevice { + id: crate::sync::DeviceId(parse_uuid(&row.get::<_, String>(0)?)?), + user_id: crate::users::UserId(parse_uuid(&row.get::<_, String>(1)?)?), + name: row.get(2)?, + device_type: row.get::<_, String>(3)?.parse().unwrap_or_default(), + client_version: row.get(4)?, + os_info: row.get(5)?, + last_sync_at: row.get::<_, Option>(6)?.map(|s| parse_datetime(&s)), + last_seen_at: parse_datetime(&row.get::<_, String>(7)?), + sync_cursor: row.get(8)?, + enabled: row.get(9)?, + created_at: parse_datetime(&row.get::<_, String>(10)?), + updated_at: parse_datetime(&row.get::<_, String>(11)?), + }) + })? + .collect::>>()?; + Ok::<_, rusqlite::Error>(devices) + }) + .await + .map_err(|e| PinakesError::Database(e.to_string()))? + .map_err(|e| PinakesError::Database(e.to_string())) + } + + async fn update_device(&self, device: &crate::sync::SyncDevice) -> Result<()> { + let conn = self.conn.clone(); + let device = device.clone(); + + tokio::task::spawn_blocking(move || { + let conn = conn.lock().unwrap(); + conn.execute( + "UPDATE sync_devices SET name = ?1, device_type = ?2, client_version = ?3, + os_info = ?4, last_sync_at = ?5, last_seen_at = ?6, sync_cursor = ?7, + enabled = ?8, updated_at = ?9 WHERE id = ?10", + params![ + device.name, + device.device_type.to_string(), + device.client_version, + device.os_info, + device.last_sync_at.map(|dt| dt.to_rfc3339()), + device.last_seen_at.to_rfc3339(), + device.sync_cursor, + device.enabled, + device.updated_at.to_rfc3339(), + device.id.0.to_string(), + ], + )?; + Ok::<_, rusqlite::Error>(()) + }) + .await + .map_err(|e| PinakesError::Database(e.to_string()))??; + Ok(()) + } + + async fn delete_device(&self, id: crate::sync::DeviceId) -> Result<()> { + let conn = self.conn.clone(); + + tokio::task::spawn_blocking(move || { + let conn = conn.lock().unwrap(); + conn.execute( + "DELETE FROM sync_devices WHERE id = ?1", + params![id.0.to_string()], + )?; + Ok::<_, rusqlite::Error>(()) + }) + .await + .map_err(|e| PinakesError::Database(e.to_string()))??; + Ok(()) + } + + async fn touch_device(&self, id: crate::sync::DeviceId) -> Result<()> { + let conn = self.conn.clone(); + let now = chrono::Utc::now().to_rfc3339(); + + tokio::task::spawn_blocking(move || { + let conn = conn.lock().unwrap(); + conn.execute( + "UPDATE sync_devices SET last_seen_at = ?1, updated_at = ?1 WHERE id = ?2", + params![&now, id.0.to_string()], + )?; + Ok::<_, rusqlite::Error>(()) + }) + .await + .map_err(|e| PinakesError::Database(e.to_string()))??; + Ok(()) + } + + // ===== Sync Log ===== + + async fn record_sync_change(&self, change: &crate::sync::SyncLogEntry) -> Result<()> { + let conn = self.conn.clone(); + let change = change.clone(); + + tokio::task::spawn_blocking(move || { + let conn = conn.lock().unwrap(); + + // Get and increment sequence + let seq: i64 = conn.query_row( + "UPDATE sync_sequence SET current_value = current_value + 1 WHERE id = 1 RETURNING current_value", + [], + |row| row.get(0), + )?; + + conn.execute( + "INSERT INTO sync_log (id, sequence, change_type, media_id, path, content_hash, + file_size, metadata_json, changed_by_device, timestamp) + VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10)", + params![ + change.id.to_string(), + seq, + change.change_type.to_string(), + change.media_id.map(|m| m.0.to_string()), + change.path, + change.content_hash.as_ref().map(|h| h.0.clone()), + change.file_size.map(|s| s as i64), + change.metadata_json, + change.changed_by_device.map(|d| d.0.to_string()), + change.timestamp.to_rfc3339(), + ], + )?; + Ok::<_, rusqlite::Error>(()) + }) + .await + .map_err(|e| PinakesError::Database(e.to_string()))??; + Ok(()) + } + + async fn get_changes_since( + &self, + cursor: i64, + limit: u64, + ) -> Result> { + let conn = self.conn.clone(); + + tokio::task::spawn_blocking(move || { + let conn = conn.lock().unwrap(); + let mut stmt = conn.prepare( + "SELECT id, sequence, change_type, media_id, path, content_hash, + file_size, metadata_json, changed_by_device, timestamp + FROM sync_log WHERE sequence > ?1 ORDER BY sequence LIMIT ?2", + )?; + let entries = stmt + .query_map(params![cursor, limit as i64], |row| { + Ok(crate::sync::SyncLogEntry { + id: parse_uuid(&row.get::<_, String>(0)?)?, + sequence: row.get(1)?, + change_type: row + .get::<_, String>(2)? + .parse() + .unwrap_or(crate::sync::SyncChangeType::Modified), + media_id: row + .get::<_, Option>(3)? + .and_then(|s| Uuid::parse_str(&s).ok().map(MediaId)), + path: row.get(4)?, + content_hash: row.get::<_, Option>(5)?.map(ContentHash), + file_size: row.get::<_, Option>(6)?.map(|s| s as u64), + metadata_json: row.get(7)?, + changed_by_device: row + .get::<_, Option>(8)? + .and_then(|s| Uuid::parse_str(&s).ok().map(crate::sync::DeviceId)), + timestamp: parse_datetime(&row.get::<_, String>(9)?), + }) + })? + .collect::>>()?; + Ok::<_, rusqlite::Error>(entries) + }) + .await + .map_err(|e| PinakesError::Database(e.to_string()))? + .map_err(|e| PinakesError::Database(e.to_string())) + } + + async fn get_current_sync_cursor(&self) -> Result { + let conn = self.conn.clone(); + + tokio::task::spawn_blocking(move || { + let conn = conn.lock().unwrap(); + conn.query_row( + "SELECT current_value FROM sync_sequence WHERE id = 1", + [], + |row| row.get(0), + ) + }) + .await + .map_err(|e| PinakesError::Database(e.to_string()))? + .map_err(|e| PinakesError::Database(e.to_string())) + } + + async fn cleanup_old_sync_log(&self, before: DateTime) -> Result { + let conn = self.conn.clone(); + let before_str = before.to_rfc3339(); + + tokio::task::spawn_blocking(move || { + let conn = conn.lock().unwrap(); + conn.execute( + "DELETE FROM sync_log WHERE timestamp < ?1", + params![&before_str], + ) + }) + .await + .map_err(|e| PinakesError::Database(e.to_string()))? + .map(|n| n as u64) + .map_err(|e| PinakesError::Database(e.to_string())) + } + + // ===== Device Sync State ===== + + async fn get_device_sync_state( + &self, + device_id: crate::sync::DeviceId, + path: &str, + ) -> Result> { + let conn = self.conn.clone(); + let path = path.to_string(); + + tokio::task::spawn_blocking(move || { + let conn = conn.lock().unwrap(); + conn.query_row( + "SELECT device_id, path, local_hash, server_hash, local_mtime, server_mtime, + sync_status, last_synced_at, conflict_info_json + FROM device_sync_state WHERE device_id = ?1 AND path = ?2", + params![device_id.0.to_string(), &path], + |row| { + Ok(crate::sync::DeviceSyncState { + device_id: crate::sync::DeviceId(parse_uuid(&row.get::<_, String>(0)?)?), + path: row.get(1)?, + local_hash: row.get(2)?, + server_hash: row.get(3)?, + local_mtime: row.get(4)?, + server_mtime: row.get(5)?, + sync_status: row + .get::<_, String>(6)? + .parse() + .unwrap_or(crate::sync::FileSyncStatus::Synced), + last_synced_at: row + .get::<_, Option>(7)? + .map(|s| parse_datetime(&s)), + conflict_info_json: row.get(8)?, + }) + }, + ) + .optional() + }) + .await + .map_err(|e| PinakesError::Database(e.to_string()))? + .map_err(|e| PinakesError::Database(e.to_string())) + } + + async fn upsert_device_sync_state(&self, state: &crate::sync::DeviceSyncState) -> Result<()> { + let conn = self.conn.clone(); + let state = state.clone(); + + tokio::task::spawn_blocking(move || { + let conn = conn.lock().unwrap(); + conn.execute( + "INSERT INTO device_sync_state (device_id, path, local_hash, server_hash, + local_mtime, server_mtime, sync_status, last_synced_at, conflict_info_json) + VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9) + ON CONFLICT(device_id, path) DO UPDATE SET + local_hash = excluded.local_hash, + server_hash = excluded.server_hash, + local_mtime = excluded.local_mtime, + server_mtime = excluded.server_mtime, + sync_status = excluded.sync_status, + last_synced_at = excluded.last_synced_at, + conflict_info_json = excluded.conflict_info_json", + params![ + state.device_id.0.to_string(), + state.path, + state.local_hash, + state.server_hash, + state.local_mtime, + state.server_mtime, + state.sync_status.to_string(), + state.last_synced_at.map(|dt| dt.to_rfc3339()), + state.conflict_info_json, + ], + )?; + Ok::<_, rusqlite::Error>(()) + }) + .await + .map_err(|e| PinakesError::Database(e.to_string()))??; + Ok(()) + } + + async fn list_pending_sync( + &self, + device_id: crate::sync::DeviceId, + ) -> Result> { + let conn = self.conn.clone(); + + tokio::task::spawn_blocking(move || { + let conn = conn.lock().unwrap(); + let mut stmt = conn.prepare( + "SELECT device_id, path, local_hash, server_hash, local_mtime, server_mtime, + sync_status, last_synced_at, conflict_info_json + FROM device_sync_state + WHERE device_id = ?1 AND sync_status IN ('pending_upload', 'pending_download', 'conflict')", + )?; + let states = stmt.query_map(params![device_id.0.to_string()], |row| { + Ok(crate::sync::DeviceSyncState { + device_id: crate::sync::DeviceId(parse_uuid(&row.get::<_, String>(0)?)?), + path: row.get(1)?, + local_hash: row.get(2)?, + server_hash: row.get(3)?, + local_mtime: row.get(4)?, + server_mtime: row.get(5)?, + sync_status: row.get::<_, String>(6)?.parse().unwrap_or(crate::sync::FileSyncStatus::Synced), + last_synced_at: row.get::<_, Option>(7)?.map(|s| parse_datetime(&s)), + conflict_info_json: row.get(8)?, + }) + })?.collect::>>()?; + Ok::<_, rusqlite::Error>(states) + }) + .await + .map_err(|e| PinakesError::Database(e.to_string()))? + .map_err(|e| PinakesError::Database(e.to_string())) + } + + // ===== Upload Sessions ===== + + async fn create_upload_session(&self, session: &crate::sync::UploadSession) -> Result<()> { + let conn = self.conn.clone(); + let session = session.clone(); + + tokio::task::spawn_blocking(move || { + let conn = conn.lock().unwrap(); + conn.execute( + "INSERT INTO upload_sessions (id, device_id, target_path, expected_hash, + expected_size, chunk_size, chunk_count, status, created_at, expires_at, last_activity) + VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11)", + params![ + session.id.to_string(), + session.device_id.0.to_string(), + session.target_path, + session.expected_hash.0, + session.expected_size as i64, + session.chunk_size as i64, + session.chunk_count as i64, + session.status.to_string(), + session.created_at.to_rfc3339(), + session.expires_at.to_rfc3339(), + session.last_activity.to_rfc3339(), + ], + )?; + Ok::<_, rusqlite::Error>(()) + }) + .await + .map_err(|e| PinakesError::Database(e.to_string()))??; + Ok(()) + } + + async fn get_upload_session(&self, id: Uuid) -> Result { + let conn = self.conn.clone(); + + tokio::task::spawn_blocking(move || { + let conn = conn.lock().unwrap(); + conn.query_row( + "SELECT id, device_id, target_path, expected_hash, expected_size, chunk_size, + chunk_count, status, created_at, expires_at, last_activity + FROM upload_sessions WHERE id = ?1", + params![id.to_string()], + |row| { + Ok(crate::sync::UploadSession { + id: parse_uuid(&row.get::<_, String>(0)?)?, + device_id: crate::sync::DeviceId(parse_uuid(&row.get::<_, String>(1)?)?), + target_path: row.get(2)?, + expected_hash: ContentHash(row.get(3)?), + expected_size: row.get::<_, i64>(4)? as u64, + chunk_size: row.get::<_, i64>(5)? as u64, + chunk_count: row.get::<_, i64>(6)? as u64, + status: row + .get::<_, String>(7)? + .parse() + .unwrap_or(crate::sync::UploadStatus::Pending), + created_at: parse_datetime(&row.get::<_, String>(8)?), + expires_at: parse_datetime(&row.get::<_, String>(9)?), + last_activity: parse_datetime(&row.get::<_, String>(10)?), + }) + }, + ) + }) + .await + .map_err(|e| PinakesError::Database(e.to_string()))? + .map_err(|e| PinakesError::Database(e.to_string())) + } + + async fn update_upload_session(&self, session: &crate::sync::UploadSession) -> Result<()> { + let conn = self.conn.clone(); + let session = session.clone(); + + tokio::task::spawn_blocking(move || { + let conn = conn.lock().unwrap(); + conn.execute( + "UPDATE upload_sessions SET status = ?1, last_activity = ?2 WHERE id = ?3", + params![ + session.status.to_string(), + session.last_activity.to_rfc3339(), + session.id.to_string(), + ], + )?; + Ok::<_, rusqlite::Error>(()) + }) + .await + .map_err(|e| PinakesError::Database(e.to_string()))??; + Ok(()) + } + + async fn record_chunk(&self, upload_id: Uuid, chunk: &crate::sync::ChunkInfo) -> Result<()> { + let conn = self.conn.clone(); + let chunk = chunk.clone(); + + tokio::task::spawn_blocking(move || { + let conn = conn.lock().unwrap(); + conn.execute( + "INSERT INTO upload_chunks (upload_id, chunk_index, offset, size, hash, received_at) + VALUES (?1, ?2, ?3, ?4, ?5, ?6) + ON CONFLICT(upload_id, chunk_index) DO UPDATE SET + offset = excluded.offset, size = excluded.size, + hash = excluded.hash, received_at = excluded.received_at", + params![ + upload_id.to_string(), + chunk.chunk_index as i64, + chunk.offset as i64, + chunk.size as i64, + chunk.hash, + chunk.received_at.to_rfc3339(), + ], + )?; + Ok::<_, rusqlite::Error>(()) + }) + .await + .map_err(|e| PinakesError::Database(e.to_string()))??; + Ok(()) + } + + async fn get_upload_chunks(&self, upload_id: Uuid) -> Result> { + let conn = self.conn.clone(); + + tokio::task::spawn_blocking(move || { + let conn = conn.lock().unwrap(); + let mut stmt = conn.prepare( + "SELECT upload_id, chunk_index, offset, size, hash, received_at + FROM upload_chunks WHERE upload_id = ?1 ORDER BY chunk_index", + )?; + let chunks = stmt + .query_map(params![upload_id.to_string()], |row| { + Ok(crate::sync::ChunkInfo { + upload_id: parse_uuid(&row.get::<_, String>(0)?)?, + chunk_index: row.get::<_, i64>(1)? as u64, + offset: row.get::<_, i64>(2)? as u64, + size: row.get::<_, i64>(3)? as u64, + hash: row.get(4)?, + received_at: parse_datetime(&row.get::<_, String>(5)?), + }) + })? + .collect::>>()?; + Ok::<_, rusqlite::Error>(chunks) + }) + .await + .map_err(|e| PinakesError::Database(e.to_string()))? + .map_err(|e| PinakesError::Database(e.to_string())) + } + + async fn cleanup_expired_uploads(&self) -> Result { + let conn = self.conn.clone(); + let now = chrono::Utc::now().to_rfc3339(); + + tokio::task::spawn_blocking(move || { + let conn = conn.lock().unwrap(); + conn.execute( + "DELETE FROM upload_sessions WHERE expires_at < ?1", + params![&now], + ) + }) + .await + .map_err(|e| PinakesError::Database(e.to_string()))? + .map(|n| n as u64) + .map_err(|e| PinakesError::Database(e.to_string())) + } + + // ===== Sync Conflicts ===== + + async fn record_conflict(&self, conflict: &crate::sync::SyncConflict) -> Result<()> { + let conn = self.conn.clone(); + let conflict = conflict.clone(); + + tokio::task::spawn_blocking(move || { + let conn = conn.lock().unwrap(); + conn.execute( + "INSERT INTO sync_conflicts (id, device_id, path, local_hash, local_mtime, + server_hash, server_mtime, detected_at) + VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8)", + params![ + conflict.id.to_string(), + conflict.device_id.0.to_string(), + conflict.path, + conflict.local_hash, + conflict.local_mtime, + conflict.server_hash, + conflict.server_mtime, + conflict.detected_at.to_rfc3339(), + ], + )?; + Ok::<_, rusqlite::Error>(()) + }) + .await + .map_err(|e| PinakesError::Database(e.to_string()))??; + Ok(()) + } + + async fn get_unresolved_conflicts( + &self, + device_id: crate::sync::DeviceId, + ) -> Result> { + let conn = self.conn.clone(); + + tokio::task::spawn_blocking(move || { + let conn = conn.lock().unwrap(); + let mut stmt = conn.prepare( + "SELECT id, device_id, path, local_hash, local_mtime, server_hash, server_mtime, + detected_at, resolved_at, resolution + FROM sync_conflicts WHERE device_id = ?1 AND resolved_at IS NULL", + )?; + let conflicts = stmt + .query_map(params![device_id.0.to_string()], |row| { + Ok(crate::sync::SyncConflict { + id: parse_uuid(&row.get::<_, String>(0)?)?, + device_id: crate::sync::DeviceId(parse_uuid(&row.get::<_, String>(1)?)?), + path: row.get(2)?, + local_hash: row.get(3)?, + local_mtime: row.get(4)?, + server_hash: row.get(5)?, + server_mtime: row.get(6)?, + detected_at: parse_datetime(&row.get::<_, String>(7)?), + resolved_at: row.get::<_, Option>(8)?.map(|s| parse_datetime(&s)), + resolution: row.get::<_, Option>(9)?.and_then(|s| { + match s.as_str() { + "server_wins" => { + Some(crate::config::ConflictResolution::ServerWins) + } + "client_wins" => { + Some(crate::config::ConflictResolution::ClientWins) + } + "keep_both" => Some(crate::config::ConflictResolution::KeepBoth), + "manual" => Some(crate::config::ConflictResolution::Manual), + _ => None, + } + }), + }) + })? + .collect::>>()?; + Ok::<_, rusqlite::Error>(conflicts) + }) + .await + .map_err(|e| PinakesError::Database(e.to_string()))? + .map_err(|e| PinakesError::Database(e.to_string())) + } + + async fn resolve_conflict( + &self, + id: Uuid, + resolution: crate::config::ConflictResolution, + ) -> Result<()> { + let conn = self.conn.clone(); + let now = chrono::Utc::now().to_rfc3339(); + let resolution_str = match resolution { + crate::config::ConflictResolution::ServerWins => "server_wins", + crate::config::ConflictResolution::ClientWins => "client_wins", + crate::config::ConflictResolution::KeepBoth => "keep_both", + crate::config::ConflictResolution::Manual => "manual", + }; + + tokio::task::spawn_blocking(move || { + let conn = conn.lock().unwrap(); + conn.execute( + "UPDATE sync_conflicts SET resolved_at = ?1, resolution = ?2 WHERE id = ?3", + params![&now, resolution_str, id.to_string()], + )?; + Ok::<_, rusqlite::Error>(()) + }) + .await + .map_err(|e| PinakesError::Database(e.to_string()))??; + Ok(()) + } + + // ===== Enhanced Sharing ===== + + async fn create_share(&self, share: &crate::sharing::Share) -> Result { + let conn = self.conn.clone(); + let share = share.clone(); + + tokio::task::spawn_blocking(move || { + let conn = conn.lock().unwrap(); + + let (recipient_type, recipient_user_id, public_token, password_hash) = match &share.recipient { + crate::sharing::ShareRecipient::PublicLink { token, password_hash } => { + ("public_link", None, Some(token.clone()), password_hash.clone()) + } + crate::sharing::ShareRecipient::User { user_id } => { + ("user", Some(user_id.0.to_string()), None, None) + } + crate::sharing::ShareRecipient::Group { .. } => ("group", None, None, None), + crate::sharing::ShareRecipient::Federated { .. } => ("federated", None, None, None), + }; + + conn.execute( + "INSERT INTO shares (id, target_type, target_id, owner_id, recipient_type, + recipient_user_id, public_token, public_password_hash, + perm_view, perm_download, perm_edit, perm_delete, perm_reshare, perm_add, + note, expires_at, access_count, inherit_to_children, parent_share_id, + created_at, updated_at) + VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13, ?14, ?15, ?16, ?17, ?18, ?19, ?20, ?21)", + params![ + share.id.0.to_string(), + share.target.target_type(), + share.target.target_id().to_string(), + share.owner_id.0.to_string(), + recipient_type, + recipient_user_id, + public_token, + password_hash, + share.permissions.can_view, + share.permissions.can_download, + share.permissions.can_edit, + share.permissions.can_delete, + share.permissions.can_reshare, + share.permissions.can_add, + share.note, + share.expires_at.map(|dt| dt.to_rfc3339()), + share.access_count as i64, + share.inherit_to_children, + share.parent_share_id.map(|s| s.0.to_string()), + share.created_at.to_rfc3339(), + share.updated_at.to_rfc3339(), + ], + )?; + Ok::<_, rusqlite::Error>(share) + }) + .await + .map_err(|e| PinakesError::Database(e.to_string()))? + .map_err(|e| PinakesError::Database(e.to_string())) + } + + async fn get_share(&self, id: crate::sharing::ShareId) -> Result { + let conn = self.conn.clone(); + + tokio::task::spawn_blocking(move || { + let conn = conn.lock().unwrap(); + conn.query_row( + "SELECT id, target_type, target_id, owner_id, recipient_type, recipient_user_id, + public_token, public_password_hash, perm_view, perm_download, perm_edit, + perm_delete, perm_reshare, perm_add, note, expires_at, access_count, + last_accessed, inherit_to_children, parent_share_id, created_at, updated_at + FROM shares WHERE id = ?1", + params![id.0.to_string()], + |row| row_to_share(row), + ) + }) + .await + .map_err(|e| PinakesError::Database(e.to_string()))? + .map_err(|e| PinakesError::Database(e.to_string())) + } + + async fn get_share_by_token(&self, token: &str) -> Result { + let conn = self.conn.clone(); + let token = token.to_string(); + + tokio::task::spawn_blocking(move || { + let conn = conn.lock().unwrap(); + conn.query_row( + "SELECT id, target_type, target_id, owner_id, recipient_type, recipient_user_id, + public_token, public_password_hash, perm_view, perm_download, perm_edit, + perm_delete, perm_reshare, perm_add, note, expires_at, access_count, + last_accessed, inherit_to_children, parent_share_id, created_at, updated_at + FROM shares WHERE public_token = ?1", + params![&token], + |row| row_to_share(row), + ) + }) + .await + .map_err(|e| PinakesError::Database(e.to_string()))? + .map_err(|e| PinakesError::Database(e.to_string())) + } + + async fn list_shares_by_owner( + &self, + owner_id: crate::users::UserId, + pagination: &Pagination, + ) -> Result> { + let conn = self.conn.clone(); + let offset = pagination.offset; + let limit = pagination.limit; + + tokio::task::spawn_blocking(move || { + let conn = conn.lock().unwrap(); + let mut stmt = conn.prepare( + "SELECT id, target_type, target_id, owner_id, recipient_type, recipient_user_id, + public_token, public_password_hash, perm_view, perm_download, perm_edit, + perm_delete, perm_reshare, perm_add, note, expires_at, access_count, + last_accessed, inherit_to_children, parent_share_id, created_at, updated_at + FROM shares WHERE owner_id = ?1 ORDER BY created_at DESC LIMIT ?2 OFFSET ?3", + )?; + let shares = stmt + .query_map( + params![owner_id.0.to_string(), limit as i64, offset as i64], + |row| row_to_share(row), + )? + .collect::>>()?; + Ok::<_, rusqlite::Error>(shares) + }) + .await + .map_err(|e| PinakesError::Database(e.to_string()))? + .map_err(|e| PinakesError::Database(e.to_string())) + } + + async fn list_shares_for_user( + &self, + user_id: crate::users::UserId, + pagination: &Pagination, + ) -> Result> { + let conn = self.conn.clone(); + let offset = pagination.offset; + let limit = pagination.limit; + + tokio::task::spawn_blocking(move || { + let conn = conn.lock().unwrap(); + let mut stmt = conn.prepare( + "SELECT id, target_type, target_id, owner_id, recipient_type, recipient_user_id, + public_token, public_password_hash, perm_view, perm_download, perm_edit, + perm_delete, perm_reshare, perm_add, note, expires_at, access_count, + last_accessed, inherit_to_children, parent_share_id, created_at, updated_at + FROM shares WHERE recipient_user_id = ?1 ORDER BY created_at DESC LIMIT ?2 OFFSET ?3", + )?; + let shares = stmt.query_map( + params![user_id.0.to_string(), limit as i64, offset as i64], + |row| row_to_share(row), + )?.collect::>>()?; + Ok::<_, rusqlite::Error>(shares) + }) + .await + .map_err(|e| PinakesError::Database(e.to_string()))? + .map_err(|e| PinakesError::Database(e.to_string())) + } + + async fn list_shares_for_target( + &self, + target: &crate::sharing::ShareTarget, + ) -> Result> { + let conn = self.conn.clone(); + let target_type = target.target_type().to_string(); + let target_id = target.target_id().to_string(); + + tokio::task::spawn_blocking(move || { + let conn = conn.lock().unwrap(); + let mut stmt = conn.prepare( + "SELECT id, target_type, target_id, owner_id, recipient_type, recipient_user_id, + public_token, public_password_hash, perm_view, perm_download, perm_edit, + perm_delete, perm_reshare, perm_add, note, expires_at, access_count, + last_accessed, inherit_to_children, parent_share_id, created_at, updated_at + FROM shares WHERE target_type = ?1 AND target_id = ?2", + )?; + let shares = stmt + .query_map(params![&target_type, &target_id], |row| row_to_share(row))? + .collect::>>()?; + Ok::<_, rusqlite::Error>(shares) + }) + .await + .map_err(|e| PinakesError::Database(e.to_string()))? + .map_err(|e| PinakesError::Database(e.to_string())) + } + + async fn update_share(&self, share: &crate::sharing::Share) -> Result { + let conn = self.conn.clone(); + let share = share.clone(); + + tokio::task::spawn_blocking(move || { + let conn = conn.lock().unwrap(); + conn.execute( + "UPDATE shares SET + perm_view = ?1, perm_download = ?2, perm_edit = ?3, perm_delete = ?4, + perm_reshare = ?5, perm_add = ?6, note = ?7, expires_at = ?8, + inherit_to_children = ?9, updated_at = ?10 + WHERE id = ?11", + params![ + share.permissions.can_view, + share.permissions.can_download, + share.permissions.can_edit, + share.permissions.can_delete, + share.permissions.can_reshare, + share.permissions.can_add, + share.note, + share.expires_at.map(|dt| dt.to_rfc3339()), + share.inherit_to_children, + share.updated_at.to_rfc3339(), + share.id.0.to_string(), + ], + )?; + Ok::<_, rusqlite::Error>(share) + }) + .await + .map_err(|e| PinakesError::Database(e.to_string()))? + .map_err(|e| PinakesError::Database(e.to_string())) + } + + async fn delete_share(&self, id: crate::sharing::ShareId) -> Result<()> { + let conn = self.conn.clone(); + + tokio::task::spawn_blocking(move || { + let conn = conn.lock().unwrap(); + conn.execute( + "DELETE FROM shares WHERE id = ?1", + params![id.0.to_string()], + )?; + Ok::<_, rusqlite::Error>(()) + }) + .await + .map_err(|e| PinakesError::Database(e.to_string()))??; + Ok(()) + } + + async fn record_share_access(&self, id: crate::sharing::ShareId) -> Result<()> { + let conn = self.conn.clone(); + let now = chrono::Utc::now().to_rfc3339(); + + tokio::task::spawn_blocking(move || { + let conn = conn.lock().unwrap(); + conn.execute( + "UPDATE shares SET access_count = access_count + 1, last_accessed = ?1 WHERE id = ?2", + params![&now, id.0.to_string()], + )?; + Ok::<_, rusqlite::Error>(()) + }) + .await + .map_err(|e| PinakesError::Database(e.to_string()))??; + Ok(()) + } + + async fn check_share_access( + &self, + user_id: Option, + target: &crate::sharing::ShareTarget, + ) -> Result> { + let shares = self.list_shares_for_target(target).await?; + let now = chrono::Utc::now(); + + for share in shares { + // Skip expired shares + if let Some(exp) = share.expires_at { + if exp < now { + continue; + } + } + + match (&share.recipient, user_id) { + // Public links are accessible to anyone + (crate::sharing::ShareRecipient::PublicLink { .. }, _) => { + return Ok(Some(share.permissions)); + } + // User shares require matching user + ( + crate::sharing::ShareRecipient::User { + user_id: share_user, + }, + Some(uid), + ) if *share_user == uid => { + return Ok(Some(share.permissions)); + } + _ => continue, + } + } + + Ok(None) + } + + async fn get_effective_share_permissions( + &self, + user_id: Option, + media_id: MediaId, + ) -> Result> { + // Check direct media shares + let target = crate::sharing::ShareTarget::Media { media_id }; + if let Some(perms) = self.check_share_access(user_id, &target).await? { + return Ok(Some(perms)); + } + + // Check collection shares (inheritance) + let conn = self.conn.clone(); + let media_id_str = media_id.0.to_string(); + + let collection_ids: Vec = tokio::task::spawn_blocking(move || { + let conn = conn.lock().unwrap(); + let mut stmt = + conn.prepare("SELECT collection_id FROM collection_items WHERE media_id = ?1")?; + let ids = stmt + .query_map([&media_id_str], |row| { + let id_str: String = row.get(0)?; + Ok(Uuid::parse_str(&id_str).ok()) + })? + .filter_map(|r| r.ok().flatten()) + .collect::>(); + Ok::<_, rusqlite::Error>(ids) + }) + .await + .map_err(|e| PinakesError::Database(e.to_string()))? + .map_err(|e| PinakesError::Database(e.to_string()))?; + + for collection_id in collection_ids { + let target = crate::sharing::ShareTarget::Collection { collection_id }; + if let Some(perms) = self.check_share_access(user_id, &target).await? { + return Ok(Some(perms)); + } + } + + // Check tag shares (inheritance) + let conn = self.conn.clone(); + let media_id_str = media_id.0.to_string(); + + let tag_ids: Vec = tokio::task::spawn_blocking(move || { + let conn = conn.lock().unwrap(); + let mut stmt = conn.prepare("SELECT tag_id FROM media_tags WHERE media_id = ?1")?; + let ids = stmt + .query_map([&media_id_str], |row| { + let id_str: String = row.get(0)?; + Ok(Uuid::parse_str(&id_str).ok()) + })? + .filter_map(|r| r.ok().flatten()) + .collect::>(); + Ok::<_, rusqlite::Error>(ids) + }) + .await + .map_err(|e| PinakesError::Database(e.to_string()))? + .map_err(|e| PinakesError::Database(e.to_string()))?; + + for tag_id in tag_ids { + let target = crate::sharing::ShareTarget::Tag { tag_id }; + if let Some(perms) = self.check_share_access(user_id, &target).await? { + return Ok(Some(perms)); + } + } + + Ok(None) + } + + async fn batch_delete_shares(&self, ids: &[crate::sharing::ShareId]) -> Result { + let conn = self.conn.clone(); + let id_strings: Vec = ids.iter().map(|id| id.0.to_string()).collect(); + + if id_strings.is_empty() { + return Ok(0); + } + + tokio::task::spawn_blocking(move || { + let conn = conn.lock().unwrap(); + let placeholders: Vec = + (1..=id_strings.len()).map(|i| format!("?{}", i)).collect(); + let sql = format!( + "DELETE FROM shares WHERE id IN ({})", + placeholders.join(", ") + ); + let params: Vec<&dyn rusqlite::types::ToSql> = id_strings + .iter() + .map(|s| s as &dyn rusqlite::types::ToSql) + .collect(); + conn.execute(&sql, &*params) + }) + .await + .map_err(|e| PinakesError::Database(e.to_string()))? + .map(|n| n as u64) + .map_err(|e| PinakesError::Database(e.to_string())) + } + + async fn cleanup_expired_shares(&self) -> Result { + let conn = self.conn.clone(); + let now = chrono::Utc::now().to_rfc3339(); + + tokio::task::spawn_blocking(move || { + let conn = conn.lock().unwrap(); + conn.execute( + "DELETE FROM shares WHERE expires_at IS NOT NULL AND expires_at < ?1", + params![&now], + ) + }) + .await + .map_err(|e| PinakesError::Database(e.to_string()))? + .map(|n| n as u64) + .map_err(|e| PinakesError::Database(e.to_string())) + } + + async fn record_share_activity(&self, activity: &crate::sharing::ShareActivity) -> Result<()> { + let conn = self.conn.clone(); + let activity = activity.clone(); + + tokio::task::spawn_blocking(move || { + let conn = conn.lock().unwrap(); + conn.execute( + "INSERT INTO share_activity (id, share_id, actor_id, actor_ip, action, details, timestamp) + VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7)", + params![ + activity.id.to_string(), + activity.share_id.0.to_string(), + activity.actor_id.map(|u| u.0.to_string()), + activity.actor_ip, + activity.action.to_string(), + activity.details, + activity.timestamp.to_rfc3339(), + ], + )?; + Ok::<_, rusqlite::Error>(()) + }) + .await + .map_err(|e| PinakesError::Database(e.to_string()))??; + Ok(()) + } + + async fn get_share_activity( + &self, + share_id: crate::sharing::ShareId, + pagination: &Pagination, + ) -> Result> { + let conn = self.conn.clone(); + let offset = pagination.offset; + let limit = pagination.limit; + + tokio::task::spawn_blocking(move || { + let conn = conn.lock().unwrap(); + let mut stmt = conn.prepare( + "SELECT id, share_id, actor_id, actor_ip, action, details, timestamp + FROM share_activity WHERE share_id = ?1 ORDER BY timestamp DESC LIMIT ?2 OFFSET ?3", + )?; + let activities = stmt.query_map( + params![share_id.0.to_string(), limit as i64, offset as i64], + |row| { + Ok(crate::sharing::ShareActivity { + id: parse_uuid(&row.get::<_, String>(0)?)?, + share_id: crate::sharing::ShareId(parse_uuid(&row.get::<_, String>(1)?)?), + actor_id: row.get::<_, Option>(2)?.and_then(|s| Uuid::parse_str(&s).ok().map(crate::users::UserId)), + actor_ip: row.get(3)?, + action: row.get::<_, String>(4)?.parse().unwrap_or(crate::sharing::ShareActivityAction::Accessed), + details: row.get(5)?, + timestamp: parse_datetime(&row.get::<_, String>(6)?), + }) + }, + )?.collect::>>()?; + Ok::<_, rusqlite::Error>(activities) + }) + .await + .map_err(|e| PinakesError::Database(e.to_string()))? + .map_err(|e| PinakesError::Database(e.to_string())) + } + + async fn create_share_notification( + &self, + notification: &crate::sharing::ShareNotification, + ) -> Result<()> { + let conn = self.conn.clone(); + let notification = notification.clone(); + + tokio::task::spawn_blocking(move || { + let conn = conn.lock().unwrap(); + conn.execute( + "INSERT INTO share_notifications (id, user_id, share_id, notification_type, is_read, created_at) + VALUES (?1, ?2, ?3, ?4, ?5, ?6)", + params![ + notification.id.to_string(), + notification.user_id.0.to_string(), + notification.share_id.0.to_string(), + notification.notification_type.to_string(), + notification.is_read, + notification.created_at.to_rfc3339(), + ], + )?; + Ok::<_, rusqlite::Error>(()) + }) + .await + .map_err(|e| PinakesError::Database(e.to_string()))??; + Ok(()) + } + + async fn get_unread_notifications( + &self, + user_id: crate::users::UserId, + ) -> Result> { + let conn = self.conn.clone(); + + tokio::task::spawn_blocking(move || { + let conn = conn.lock().unwrap(); + let mut stmt = conn.prepare( + "SELECT id, user_id, share_id, notification_type, is_read, created_at + FROM share_notifications WHERE user_id = ?1 AND is_read = 0 ORDER BY created_at DESC", + )?; + let notifications = stmt.query_map(params![user_id.0.to_string()], |row| { + Ok(crate::sharing::ShareNotification { + id: parse_uuid(&row.get::<_, String>(0)?)?, + user_id: crate::users::UserId(parse_uuid(&row.get::<_, String>(1)?)?), + share_id: crate::sharing::ShareId(parse_uuid(&row.get::<_, String>(2)?)?), + notification_type: row.get::<_, String>(3)?.parse().unwrap_or(crate::sharing::ShareNotificationType::NewShare), + is_read: row.get(4)?, + created_at: parse_datetime(&row.get::<_, String>(5)?), + }) + })?.collect::>>()?; + Ok::<_, rusqlite::Error>(notifications) + }) + .await + .map_err(|e| PinakesError::Database(e.to_string()))? + .map_err(|e| PinakesError::Database(e.to_string())) + } + + async fn mark_notification_read(&self, id: Uuid) -> Result<()> { + let conn = self.conn.clone(); + + tokio::task::spawn_blocking(move || { + let conn = conn.lock().unwrap(); + conn.execute( + "UPDATE share_notifications SET is_read = 1 WHERE id = ?1", + params![id.to_string()], + )?; + Ok::<_, rusqlite::Error>(()) + }) + .await + .map_err(|e| PinakesError::Database(e.to_string()))??; + Ok(()) + } + + async fn mark_all_notifications_read(&self, user_id: crate::users::UserId) -> Result<()> { + let conn = self.conn.clone(); + + tokio::task::spawn_blocking(move || { + let conn = conn.lock().unwrap(); + conn.execute( + "UPDATE share_notifications SET is_read = 1 WHERE user_id = ?1", + params![user_id.0.to_string()], + )?; + Ok::<_, rusqlite::Error>(()) + }) + .await + .map_err(|e| PinakesError::Database(e.to_string()))??; + Ok(()) + } +} + +// Helper function to parse a share row +fn row_to_share(row: &Row) -> rusqlite::Result { + let id_str: String = row.get(0)?; + let target_type: String = row.get(1)?; + let target_id_str: String = row.get(2)?; + let owner_id_str: String = row.get(3)?; + let recipient_type: String = row.get(4)?; + let recipient_user_id: Option = row.get(5)?; + let public_token: Option = row.get(6)?; + let password_hash: Option = row.get(7)?; + + let target = match target_type.as_str() { + "media" => crate::sharing::ShareTarget::Media { + media_id: MediaId(parse_uuid(&target_id_str)?), + }, + "collection" => crate::sharing::ShareTarget::Collection { + collection_id: parse_uuid(&target_id_str)?, + }, + "tag" => crate::sharing::ShareTarget::Tag { + tag_id: parse_uuid(&target_id_str)?, + }, + "saved_search" => crate::sharing::ShareTarget::SavedSearch { + search_id: parse_uuid(&target_id_str)?, + }, + _ => crate::sharing::ShareTarget::Media { + media_id: MediaId(parse_uuid(&target_id_str)?), + }, + }; + + let recipient = match recipient_type.as_str() { + "public_link" => crate::sharing::ShareRecipient::PublicLink { + token: public_token.unwrap_or_default(), + password_hash, + }, + "user" => crate::sharing::ShareRecipient::User { + user_id: crate::users::UserId(parse_uuid(&recipient_user_id.unwrap_or_default())?), + }, + "group" => crate::sharing::ShareRecipient::Group { + group_id: Uuid::nil(), + }, + _ => crate::sharing::ShareRecipient::PublicLink { + token: public_token.unwrap_or_default(), + password_hash, + }, + }; + + Ok(crate::sharing::Share { + id: crate::sharing::ShareId(parse_uuid(&id_str)?), + target, + owner_id: crate::users::UserId(parse_uuid(&owner_id_str)?), + recipient, + permissions: crate::sharing::SharePermissions { + can_view: row.get(8)?, + can_download: row.get(9)?, + can_edit: row.get(10)?, + can_delete: row.get(11)?, + can_reshare: row.get(12)?, + can_add: row.get(13)?, + }, + note: row.get(14)?, + expires_at: row + .get::<_, Option>(15)? + .map(|s| parse_datetime(&s)), + access_count: row.get::<_, i64>(16)? as u64, + last_accessed: row + .get::<_, Option>(17)? + .map(|s| parse_datetime(&s)), + inherit_to_children: row.get(18)?, + parent_share_id: row + .get::<_, Option>(19)? + .and_then(|s| Uuid::parse_str(&s).ok().map(crate::sharing::ShareId)), + created_at: parse_datetime(&row.get::<_, String>(20)?), + updated_at: parse_datetime(&row.get::<_, String>(21)?), + }) } // Needed for `query_row(...).optional()` diff --git a/crates/pinakes-core/src/sync/chunked.rs b/crates/pinakes-core/src/sync/chunked.rs new file mode 100644 index 0000000..13a5404 --- /dev/null +++ b/crates/pinakes-core/src/sync/chunked.rs @@ -0,0 +1,297 @@ +//! Chunked upload handling for large file sync. + +use std::path::{Path, PathBuf}; + +use chrono::Utc; +use tokio::fs; +use tokio::io::{AsyncReadExt, AsyncSeekExt, AsyncWriteExt}; +use tracing::{debug, info}; +use uuid::Uuid; + +use crate::error::{PinakesError, Result}; + +use super::{ChunkInfo, UploadSession}; + +/// Manager for chunked uploads. +#[derive(Debug, Clone)] +pub struct ChunkedUploadManager { + temp_dir: PathBuf, +} + +impl ChunkedUploadManager { + /// Create a new chunked upload manager. + pub fn new(temp_dir: PathBuf) -> Self { + Self { temp_dir } + } + + /// Initialize the temp directory. + pub async fn init(&self) -> Result<()> { + fs::create_dir_all(&self.temp_dir).await?; + Ok(()) + } + + /// Get the temp file path for an upload session. + pub fn temp_path(&self, session_id: Uuid) -> PathBuf { + self.temp_dir.join(format!("{}.upload", session_id)) + } + + /// Create the temp file for a new upload session. + pub async fn create_temp_file(&self, session: &UploadSession) -> Result<()> { + let path = self.temp_path(session.id); + + // Create a sparse file of the expected size + let file = fs::File::create(&path).await?; + file.set_len(session.expected_size).await?; + + debug!( + session_id = %session.id, + size = session.expected_size, + "created temp file for upload" + ); + + Ok(()) + } + + /// Write a chunk to the temp file. + pub async fn write_chunk( + &self, + session: &UploadSession, + chunk_index: u64, + data: &[u8], + ) -> Result { + let path = self.temp_path(session.id); + + if !path.exists() { + return Err(PinakesError::UploadSessionNotFound(session.id.to_string())); + } + + // Calculate offset + let offset = chunk_index * session.chunk_size; + + // Validate chunk + if offset >= session.expected_size { + return Err(PinakesError::ChunkOutOfOrder { + expected: session.chunk_count - 1, + actual: chunk_index, + }); + } + + // Calculate expected chunk size + let expected_size = if chunk_index == session.chunk_count - 1 { + // Last chunk may be smaller + session.expected_size - offset + } else { + session.chunk_size + }; + + if data.len() as u64 != expected_size { + return Err(PinakesError::InvalidData(format!( + "chunk {} has wrong size: expected {}, got {}", + chunk_index, + expected_size, + data.len() + ))); + } + + // Write chunk to file at offset + let mut file = fs::OpenOptions::new().write(true).open(&path).await?; + + file.seek(std::io::SeekFrom::Start(offset)).await?; + file.write_all(data).await?; + file.flush().await?; + + // Compute chunk hash + let hash = blake3::hash(data).to_hex().to_string(); + + debug!( + session_id = %session.id, + chunk_index, + offset, + size = data.len(), + "wrote chunk" + ); + + Ok(ChunkInfo { + upload_id: session.id, + chunk_index, + offset, + size: data.len() as u64, + hash, + received_at: Utc::now(), + }) + } + + /// Verify and finalize the upload. + /// + /// Checks that: + /// 1. All chunks are received + /// 2. File size matches expected + /// 3. Content hash matches expected + pub async fn finalize( + &self, + session: &UploadSession, + received_chunks: &[ChunkInfo], + ) -> Result { + let path = self.temp_path(session.id); + + // Check all chunks received + if received_chunks.len() as u64 != session.chunk_count { + return Err(PinakesError::InvalidData(format!( + "missing chunks: expected {}, got {}", + session.chunk_count, + received_chunks.len() + ))); + } + + // Verify chunk indices + let mut indices: Vec = received_chunks.iter().map(|c| c.chunk_index).collect(); + indices.sort(); + for (i, idx) in indices.iter().enumerate() { + if *idx != i as u64 { + return Err(PinakesError::InvalidData(format!( + "chunk {} missing or out of order", + i + ))); + } + } + + // Verify file size + let metadata = fs::metadata(&path).await?; + if metadata.len() != session.expected_size { + return Err(PinakesError::InvalidData(format!( + "file size mismatch: expected {}, got {}", + session.expected_size, + metadata.len() + ))); + } + + // Verify content hash + let computed_hash = compute_file_hash(&path).await?; + if computed_hash != session.expected_hash.0 { + return Err(PinakesError::StorageIntegrity(format!( + "hash mismatch: expected {}, computed {}", + session.expected_hash, computed_hash + ))); + } + + info!( + session_id = %session.id, + hash = %session.expected_hash, + size = session.expected_size, + "finalized chunked upload" + ); + + Ok(path) + } + + /// Cancel an upload and clean up temp file. + pub async fn cancel(&self, session_id: Uuid) -> Result<()> { + let path = self.temp_path(session_id); + if path.exists() { + fs::remove_file(&path).await?; + debug!(session_id = %session_id, "cancelled upload, removed temp file"); + } + Ok(()) + } + + /// Clean up expired temp files. + pub async fn cleanup_expired(&self, max_age_hours: u64) -> Result { + let mut count = 0u64; + let max_age = std::time::Duration::from_secs(max_age_hours * 3600); + + let mut entries = fs::read_dir(&self.temp_dir).await?; + while let Some(entry) = entries.next_entry().await? { + let path = entry.path(); + if path.extension().map(|e| e == "upload").unwrap_or(false) { + if let Ok(metadata) = fs::metadata(&path).await { + if let Ok(modified) = metadata.modified() { + let age = std::time::SystemTime::now() + .duration_since(modified) + .unwrap_or_default(); + if age > max_age { + let _ = fs::remove_file(&path).await; + count += 1; + } + } + } + } + } + + if count > 0 { + info!(count, "cleaned up expired upload temp files"); + } + Ok(count) + } +} + +/// Compute the BLAKE3 hash of a file. +async fn compute_file_hash(path: &Path) -> Result { + let mut file = fs::File::open(path).await?; + let mut hasher = blake3::Hasher::new(); + let mut buf = vec![0u8; 64 * 1024]; + + loop { + let n = file.read(&mut buf).await?; + if n == 0 { + break; + } + hasher.update(&buf[..n]); + } + + Ok(hasher.finalize().to_hex().to_string()) +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::model::ContentHash; + use crate::sync::UploadStatus; + use tempfile::tempdir; + + #[tokio::test] + async fn test_chunked_upload() { + let dir = tempdir().unwrap(); + let manager = ChunkedUploadManager::new(dir.path().to_path_buf()); + manager.init().await.unwrap(); + + // Create test data + let data = b"Hello, World! This is test data for chunked upload."; + let hash = blake3::hash(data).to_hex().to_string(); + let chunk_size = 20u64; + + let session = UploadSession { + id: Uuid::now_v7(), + device_id: super::super::DeviceId::new(), + target_path: "/test/file.txt".to_string(), + expected_hash: ContentHash::new(hash.clone()), + expected_size: data.len() as u64, + chunk_size, + chunk_count: (data.len() as u64 + chunk_size - 1) / chunk_size, + status: UploadStatus::InProgress, + created_at: Utc::now(), + expires_at: Utc::now() + chrono::Duration::hours(24), + last_activity: Utc::now(), + }; + + manager.create_temp_file(&session).await.unwrap(); + + // Write chunks + let mut chunks = Vec::new(); + for i in 0..session.chunk_count { + let start = (i * chunk_size) as usize; + let end = ((i + 1) * chunk_size).min(data.len() as u64) as usize; + let chunk_data = &data[start..end]; + + let chunk = manager.write_chunk(&session, i, chunk_data).await.unwrap(); + chunks.push(chunk); + } + + // Finalize + let final_path = manager.finalize(&session, &chunks).await.unwrap(); + assert!(final_path.exists()); + + // Verify content + let content = fs::read(&final_path).await.unwrap(); + assert_eq!(&content[..], data); + } +} diff --git a/crates/pinakes-core/src/sync/conflict.rs b/crates/pinakes-core/src/sync/conflict.rs new file mode 100644 index 0000000..70af9b4 --- /dev/null +++ b/crates/pinakes-core/src/sync/conflict.rs @@ -0,0 +1,144 @@ +//! Conflict detection and resolution for sync. + +use crate::config::ConflictResolution; + +use super::DeviceSyncState; + +/// Detect if there's a conflict between local and server state. +pub fn detect_conflict(state: &DeviceSyncState) -> Option { + // If either side has no hash, no conflict possible + let local_hash = state.local_hash.as_ref()?; + let server_hash = state.server_hash.as_ref()?; + + // Same hash = no conflict + if local_hash == server_hash { + return None; + } + + // Both have different hashes = conflict + Some(ConflictInfo { + path: state.path.clone(), + local_hash: local_hash.clone(), + server_hash: server_hash.clone(), + local_mtime: state.local_mtime, + server_mtime: state.server_mtime, + }) +} + +/// Information about a detected conflict. +#[derive(Debug, Clone)] +pub struct ConflictInfo { + pub path: String, + pub local_hash: String, + pub server_hash: String, + pub local_mtime: Option, + pub server_mtime: Option, +} + +/// Result of resolving a conflict. +#[derive(Debug, Clone)] +pub enum ConflictOutcome { + /// Use the server version + UseServer, + /// Use the local version (upload it) + UseLocal, + /// Keep both versions (rename one) + KeepBoth { new_local_path: String }, + /// Requires manual intervention + Manual, +} + +/// Resolve a conflict based on the configured strategy. +pub fn resolve_conflict( + conflict: &ConflictInfo, + resolution: ConflictResolution, +) -> ConflictOutcome { + match resolution { + ConflictResolution::ServerWins => ConflictOutcome::UseServer, + ConflictResolution::ClientWins => ConflictOutcome::UseLocal, + ConflictResolution::KeepBoth => { + let new_path = generate_conflict_path(&conflict.path, &conflict.local_hash); + ConflictOutcome::KeepBoth { + new_local_path: new_path, + } + } + ConflictResolution::Manual => ConflictOutcome::Manual, + } +} + +/// Generate a new path for the conflicting local file. +/// Format: filename.conflict-.ext +fn generate_conflict_path(original_path: &str, local_hash: &str) -> String { + let short_hash = &local_hash[..8.min(local_hash.len())]; + + if let Some((base, ext)) = original_path.rsplit_once('.') { + format!("{}.conflict-{}.{}", base, short_hash, ext) + } else { + format!("{}.conflict-{}", original_path, short_hash) + } +} + +/// Automatic conflict resolution based on modification times. +/// Useful when ConflictResolution is set to a time-based strategy. +pub fn resolve_by_mtime(conflict: &ConflictInfo) -> ConflictOutcome { + match (conflict.local_mtime, conflict.server_mtime) { + (Some(local), Some(server)) => { + if local > server { + ConflictOutcome::UseLocal + } else { + ConflictOutcome::UseServer + } + } + (Some(_), None) => ConflictOutcome::UseLocal, + (None, Some(_)) => ConflictOutcome::UseServer, + (None, None) => ConflictOutcome::UseServer, // Default to server + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::sync::FileSyncStatus; + + #[test] + fn test_generate_conflict_path() { + assert_eq!( + generate_conflict_path("/path/to/file.txt", "abc12345"), + "/path/to/file.conflict-abc12345.txt" + ); + + assert_eq!( + generate_conflict_path("/path/to/file", "abc12345"), + "/path/to/file.conflict-abc12345" + ); + } + + #[test] + fn test_detect_conflict() { + let state_no_conflict = DeviceSyncState { + device_id: super::super::DeviceId::new(), + path: "/test".to_string(), + local_hash: Some("abc".to_string()), + server_hash: Some("abc".to_string()), + local_mtime: None, + server_mtime: None, + sync_status: FileSyncStatus::Synced, + last_synced_at: None, + conflict_info_json: None, + }; + assert!(detect_conflict(&state_no_conflict).is_none()); + + let state_conflict = DeviceSyncState { + device_id: super::super::DeviceId::new(), + path: "/test".to_string(), + local_hash: Some("abc".to_string()), + server_hash: Some("def".to_string()), + local_mtime: None, + server_mtime: None, + sync_status: FileSyncStatus::Conflict, + last_synced_at: None, + conflict_info_json: None, + }; + assert!(detect_conflict(&state_conflict).is_some()); + } +} diff --git a/crates/pinakes-core/src/sync/mod.rs b/crates/pinakes-core/src/sync/mod.rs new file mode 100644 index 0000000..77181f1 --- /dev/null +++ b/crates/pinakes-core/src/sync/mod.rs @@ -0,0 +1,14 @@ +//! Cross-device synchronization module. +//! +//! Provides device registration, change tracking, and conflict resolution +//! for syncing media libraries across multiple devices. + +mod chunked; +mod conflict; +mod models; +mod protocol; + +pub use chunked::*; +pub use conflict::*; +pub use models::*; +pub use protocol::*; diff --git a/crates/pinakes-core/src/sync/models.rs b/crates/pinakes-core/src/sync/models.rs new file mode 100644 index 0000000..7aff331 --- /dev/null +++ b/crates/pinakes-core/src/sync/models.rs @@ -0,0 +1,380 @@ +//! Sync domain models. + +use std::fmt; + +use chrono::{DateTime, Utc}; +use serde::{Deserialize, Serialize}; +use uuid::Uuid; + +use crate::config::ConflictResolution; +use crate::model::{ContentHash, MediaId}; +use crate::users::UserId; + +/// Unique identifier for a sync device. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)] +pub struct DeviceId(pub Uuid); + +impl DeviceId { + pub fn new() -> Self { + Self(Uuid::now_v7()) + } +} + +impl Default for DeviceId { + fn default() -> Self { + Self::new() + } +} + +impl fmt::Display for DeviceId { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.0) + } +} + +/// Type of sync device. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename_all = "lowercase")] +pub enum DeviceType { + Desktop, + Mobile, + Tablet, + Server, + Other, +} + +impl Default for DeviceType { + fn default() -> Self { + Self::Other + } +} + +impl fmt::Display for DeviceType { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Self::Desktop => write!(f, "desktop"), + Self::Mobile => write!(f, "mobile"), + Self::Tablet => write!(f, "tablet"), + Self::Server => write!(f, "server"), + Self::Other => write!(f, "other"), + } + } +} + +impl std::str::FromStr for DeviceType { + type Err = String; + + fn from_str(s: &str) -> Result { + match s.to_lowercase().as_str() { + "desktop" => Ok(Self::Desktop), + "mobile" => Ok(Self::Mobile), + "tablet" => Ok(Self::Tablet), + "server" => Ok(Self::Server), + "other" => Ok(Self::Other), + _ => Err(format!("unknown device type: {}", s)), + } + } +} + +/// A registered sync device. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct SyncDevice { + pub id: DeviceId, + pub user_id: UserId, + pub name: String, + pub device_type: DeviceType, + pub client_version: String, + pub os_info: Option, + pub last_sync_at: Option>, + pub last_seen_at: DateTime, + pub sync_cursor: Option, + pub enabled: bool, + pub created_at: DateTime, + pub updated_at: DateTime, +} + +impl SyncDevice { + pub fn new( + user_id: UserId, + name: String, + device_type: DeviceType, + client_version: String, + ) -> Self { + let now = Utc::now(); + Self { + id: DeviceId::new(), + user_id, + name, + device_type, + client_version, + os_info: None, + last_sync_at: None, + last_seen_at: now, + sync_cursor: None, + enabled: true, + created_at: now, + updated_at: now, + } + } +} + +/// Type of change recorded in the sync log. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename_all = "snake_case")] +pub enum SyncChangeType { + Created, + Modified, + Deleted, + Moved, + MetadataUpdated, +} + +impl fmt::Display for SyncChangeType { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Self::Created => write!(f, "created"), + Self::Modified => write!(f, "modified"), + Self::Deleted => write!(f, "deleted"), + Self::Moved => write!(f, "moved"), + Self::MetadataUpdated => write!(f, "metadata_updated"), + } + } +} + +impl std::str::FromStr for SyncChangeType { + type Err = String; + + fn from_str(s: &str) -> Result { + match s.to_lowercase().as_str() { + "created" => Ok(Self::Created), + "modified" => Ok(Self::Modified), + "deleted" => Ok(Self::Deleted), + "moved" => Ok(Self::Moved), + "metadata_updated" => Ok(Self::MetadataUpdated), + _ => Err(format!("unknown sync change type: {}", s)), + } + } +} + +/// An entry in the sync log tracking a change. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct SyncLogEntry { + pub id: Uuid, + pub sequence: i64, + pub change_type: SyncChangeType, + pub media_id: Option, + pub path: String, + pub content_hash: Option, + pub file_size: Option, + pub metadata_json: Option, + pub changed_by_device: Option, + pub timestamp: DateTime, +} + +impl SyncLogEntry { + pub fn new( + change_type: SyncChangeType, + path: String, + media_id: Option, + content_hash: Option, + ) -> Self { + Self { + id: Uuid::now_v7(), + sequence: 0, // Will be assigned by database + change_type, + media_id, + path, + content_hash, + file_size: None, + metadata_json: None, + changed_by_device: None, + timestamp: Utc::now(), + } + } +} + +/// Sync status for a file on a device. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename_all = "snake_case")] +pub enum FileSyncStatus { + Synced, + PendingUpload, + PendingDownload, + Conflict, + Deleted, +} + +impl fmt::Display for FileSyncStatus { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Self::Synced => write!(f, "synced"), + Self::PendingUpload => write!(f, "pending_upload"), + Self::PendingDownload => write!(f, "pending_download"), + Self::Conflict => write!(f, "conflict"), + Self::Deleted => write!(f, "deleted"), + } + } +} + +impl std::str::FromStr for FileSyncStatus { + type Err = String; + + fn from_str(s: &str) -> Result { + match s.to_lowercase().as_str() { + "synced" => Ok(Self::Synced), + "pending_upload" => Ok(Self::PendingUpload), + "pending_download" => Ok(Self::PendingDownload), + "conflict" => Ok(Self::Conflict), + "deleted" => Ok(Self::Deleted), + _ => Err(format!("unknown file sync status: {}", s)), + } + } +} + +/// Sync state for a specific file on a specific device. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct DeviceSyncState { + pub device_id: DeviceId, + pub path: String, + pub local_hash: Option, + pub server_hash: Option, + pub local_mtime: Option, + pub server_mtime: Option, + pub sync_status: FileSyncStatus, + pub last_synced_at: Option>, + pub conflict_info_json: Option, +} + +/// A sync conflict that needs resolution. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct SyncConflict { + pub id: Uuid, + pub device_id: DeviceId, + pub path: String, + pub local_hash: String, + pub local_mtime: i64, + pub server_hash: String, + pub server_mtime: i64, + pub detected_at: DateTime, + pub resolved_at: Option>, + pub resolution: Option, +} + +impl SyncConflict { + pub fn new( + device_id: DeviceId, + path: String, + local_hash: String, + local_mtime: i64, + server_hash: String, + server_mtime: i64, + ) -> Self { + Self { + id: Uuid::now_v7(), + device_id, + path, + local_hash, + local_mtime, + server_hash, + server_mtime, + detected_at: Utc::now(), + resolved_at: None, + resolution: None, + } + } +} + +/// Status of an upload session. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename_all = "snake_case")] +pub enum UploadStatus { + Pending, + InProgress, + Completed, + Failed, + Expired, + Cancelled, +} + +impl fmt::Display for UploadStatus { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Self::Pending => write!(f, "pending"), + Self::InProgress => write!(f, "in_progress"), + Self::Completed => write!(f, "completed"), + Self::Failed => write!(f, "failed"), + Self::Expired => write!(f, "expired"), + Self::Cancelled => write!(f, "cancelled"), + } + } +} + +impl std::str::FromStr for UploadStatus { + type Err = String; + + fn from_str(s: &str) -> Result { + match s.to_lowercase().as_str() { + "pending" => Ok(Self::Pending), + "in_progress" => Ok(Self::InProgress), + "completed" => Ok(Self::Completed), + "failed" => Ok(Self::Failed), + "expired" => Ok(Self::Expired), + "cancelled" => Ok(Self::Cancelled), + _ => Err(format!("unknown upload status: {}", s)), + } + } +} + +/// A chunked upload session. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct UploadSession { + pub id: Uuid, + pub device_id: DeviceId, + pub target_path: String, + pub expected_hash: ContentHash, + pub expected_size: u64, + pub chunk_size: u64, + pub chunk_count: u64, + pub status: UploadStatus, + pub created_at: DateTime, + pub expires_at: DateTime, + pub last_activity: DateTime, +} + +impl UploadSession { + pub fn new( + device_id: DeviceId, + target_path: String, + expected_hash: ContentHash, + expected_size: u64, + chunk_size: u64, + timeout_hours: u64, + ) -> Self { + let now = Utc::now(); + let chunk_count = (expected_size + chunk_size - 1) / chunk_size; + Self { + id: Uuid::now_v7(), + device_id, + target_path, + expected_hash, + expected_size, + chunk_size, + chunk_count, + status: UploadStatus::Pending, + created_at: now, + expires_at: now + chrono::Duration::hours(timeout_hours as i64), + last_activity: now, + } + } +} + +/// Information about an uploaded chunk. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ChunkInfo { + pub upload_id: Uuid, + pub chunk_index: u64, + pub offset: u64, + pub size: u64, + pub hash: String, + pub received_at: DateTime, +} diff --git a/crates/pinakes-core/src/sync/protocol.rs b/crates/pinakes-core/src/sync/protocol.rs new file mode 100644 index 0000000..204dae3 --- /dev/null +++ b/crates/pinakes-core/src/sync/protocol.rs @@ -0,0 +1,215 @@ +//! Sync protocol implementation. +//! +//! Handles the bidirectional sync protocol between clients and server. + +use chrono::Utc; +use serde::{Deserialize, Serialize}; +use uuid::Uuid; + +use crate::error::Result; +use crate::model::{ContentHash, MediaId}; +use crate::storage::DynStorageBackend; + +use super::{DeviceId, DeviceSyncState, FileSyncStatus, SyncChangeType, SyncLogEntry}; + +/// Request from client to get changes since a cursor. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ChangesRequest { + pub cursor: i64, + pub limit: Option, +} + +/// Response containing changes since the cursor. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ChangesResponse { + pub changes: Vec, + pub cursor: i64, + pub has_more: bool, +} + +/// A change reported by the client. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ClientChange { + pub path: String, + pub change_type: SyncChangeType, + pub content_hash: Option, + pub file_size: Option, + pub local_mtime: Option, + pub metadata: Option, +} + +/// Request from client to report local changes. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ReportChangesRequest { + pub device_id: String, + pub changes: Vec, +} + +/// Result of processing a client change. +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(tag = "status", rename_all = "snake_case")] +pub enum ChangeResult { + /// Change accepted, no action needed + Accepted { path: String }, + /// Conflict detected, needs resolution + Conflict { + path: String, + server_hash: String, + server_mtime: i64, + }, + /// Upload required for new/modified file + UploadRequired { + path: String, + upload_url: String, + session_id: String, + }, + /// Error processing change + Error { path: String, message: String }, +} + +/// Response to a report changes request. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ReportChangesResponse { + pub results: Vec, + pub server_cursor: i64, +} + +/// Acknowledgment from client that changes have been processed. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct AckRequest { + pub device_id: String, + pub cursor: i64, + pub processed_paths: Vec, +} + +/// Get changes since a cursor position. +pub async fn get_changes( + storage: &DynStorageBackend, + cursor: i64, + limit: u64, +) -> Result { + let limit = limit.min(1000); // Cap at 1000 + let changes = storage.get_changes_since(cursor, limit + 1).await?; + + let has_more = changes.len() > limit as usize; + let changes: Vec<_> = changes.into_iter().take(limit as usize).collect(); + + let new_cursor = changes.last().map(|c| c.sequence).unwrap_or(cursor); + + Ok(ChangesResponse { + changes, + cursor: new_cursor, + has_more, + }) +} + +/// Record a change in the sync log. +pub async fn record_change( + storage: &DynStorageBackend, + change_type: SyncChangeType, + path: &str, + media_id: Option, + content_hash: Option<&ContentHash>, + file_size: Option, + changed_by_device: Option, +) -> Result { + let entry = SyncLogEntry { + id: Uuid::now_v7(), + sequence: 0, // Will be assigned by database + change_type, + media_id, + path: path.to_string(), + content_hash: content_hash.cloned(), + file_size, + metadata_json: None, + changed_by_device, + timestamp: Utc::now(), + }; + + storage.record_sync_change(&entry).await?; + Ok(entry) +} + +/// Update device cursor after processing changes. +pub async fn update_device_cursor( + storage: &DynStorageBackend, + device_id: DeviceId, + cursor: i64, +) -> Result<()> { + let mut device = storage.get_device(device_id).await?; + device.sync_cursor = Some(cursor); + device.last_sync_at = Some(Utc::now()); + device.updated_at = Utc::now(); + storage.update_device(&device).await?; + Ok(()) +} + +/// Mark a file as synced for a device. +pub async fn mark_synced( + storage: &DynStorageBackend, + device_id: DeviceId, + path: &str, + hash: &str, + mtime: Option, +) -> Result<()> { + let state = DeviceSyncState { + device_id, + path: path.to_string(), + local_hash: Some(hash.to_string()), + server_hash: Some(hash.to_string()), + local_mtime: mtime, + server_mtime: mtime, + sync_status: FileSyncStatus::Synced, + last_synced_at: Some(Utc::now()), + conflict_info_json: None, + }; + + storage.upsert_device_sync_state(&state).await?; + Ok(()) +} + +/// Mark a file as pending download for a device. +pub async fn mark_pending_download( + storage: &DynStorageBackend, + device_id: DeviceId, + path: &str, + server_hash: &str, + server_mtime: Option, +) -> Result<()> { + // Get existing state or create new + let state = match storage.get_device_sync_state(device_id, path).await? { + Some(mut s) => { + s.server_hash = Some(server_hash.to_string()); + s.server_mtime = server_mtime; + s.sync_status = FileSyncStatus::PendingDownload; + s + } + None => DeviceSyncState { + device_id, + path: path.to_string(), + local_hash: None, + server_hash: Some(server_hash.to_string()), + local_mtime: None, + server_mtime, + sync_status: FileSyncStatus::PendingDownload, + last_synced_at: None, + conflict_info_json: None, + }, + }; + + storage.upsert_device_sync_state(&state).await?; + Ok(()) +} + +/// Generate a device token using UUIDs for randomness. +pub fn generate_device_token() -> String { + // Concatenate two UUIDs for 256 bits of randomness + let uuid1 = uuid::Uuid::new_v4(); + let uuid2 = uuid::Uuid::new_v4(); + format!("{}{}", uuid1.simple(), uuid2.simple()) +} + +/// Hash a device token for storage. +pub fn hash_device_token(token: &str) -> String { + blake3::hash(token.as_bytes()).to_hex().to_string() +} diff --git a/crates/pinakes-core/src/upload.rs b/crates/pinakes-core/src/upload.rs new file mode 100644 index 0000000..e02225e --- /dev/null +++ b/crates/pinakes-core/src/upload.rs @@ -0,0 +1,265 @@ +//! Upload processing for managed storage. +//! +//! Handles file uploads, metadata extraction, and MediaItem creation +//! for files stored in managed content-addressable storage. + +use std::collections::HashMap; +use std::path::Path; + +use chrono::Utc; +use tokio::io::AsyncRead; +use tracing::{debug, info}; + +use crate::error::{PinakesError, Result}; +use crate::managed_storage::ManagedStorageService; +use crate::media_type::MediaType; +use crate::metadata; +use crate::model::{MediaId, MediaItem, StorageMode, UploadResult}; +use crate::storage::DynStorageBackend; + +/// Process an upload from an async reader. +/// +/// This function: +/// 1. Stores the file in managed storage +/// 2. Checks for duplicates by content hash +/// 3. Extracts metadata from the file +/// 4. Creates or updates the MediaItem +pub async fn process_upload( + storage: &DynStorageBackend, + managed: &ManagedStorageService, + reader: R, + original_filename: &str, + mime_type: Option<&str>, +) -> Result { + // Store the file + let (content_hash, file_size) = managed.store_stream(reader).await?; + + // Check if we already have a media item with this hash + if let Some(existing) = storage.get_media_by_hash(&content_hash).await? { + debug!(hash = %content_hash, media_id = %existing.id, "upload matched existing media item"); + return Ok(UploadResult { + media_id: existing.id, + content_hash, + was_duplicate: true, + file_size, + }); + } + + // Determine media type from filename + let media_type = MediaType::from_path(Path::new(original_filename)) + .unwrap_or_else(|| MediaType::custom("unknown")); + + // Get the actual file path in managed storage for metadata extraction + let blob_path = managed.path(&content_hash); + + // Extract metadata + let extracted = metadata::extract_metadata(&blob_path, media_type.clone()).ok(); + + // Create or get blob record + let mime = mime_type + .map(String::from) + .unwrap_or_else(|| media_type.mime_type().to_string()); + let _blob = storage + .get_or_create_blob(&content_hash, file_size, &mime) + .await?; + + // Create the media item + let now = Utc::now(); + let media_id = MediaId::new(); + + let item = MediaItem { + id: media_id, + path: blob_path, + file_name: sanitize_filename(original_filename), + media_type, + content_hash: content_hash.clone(), + file_size, + title: extracted.as_ref().and_then(|m| m.title.clone()), + artist: extracted.as_ref().and_then(|m| m.artist.clone()), + album: extracted.as_ref().and_then(|m| m.album.clone()), + genre: extracted.as_ref().and_then(|m| m.genre.clone()), + year: extracted.as_ref().and_then(|m| m.year), + duration_secs: extracted.as_ref().and_then(|m| m.duration_secs), + description: extracted.as_ref().and_then(|m| m.description.clone()), + thumbnail_path: None, + custom_fields: HashMap::new(), + file_mtime: None, + date_taken: extracted.as_ref().and_then(|m| m.date_taken), + latitude: extracted.as_ref().and_then(|m| m.latitude), + longitude: extracted.as_ref().and_then(|m| m.longitude), + camera_make: extracted.as_ref().and_then(|m| m.camera_make.clone()), + camera_model: extracted.as_ref().and_then(|m| m.camera_model.clone()), + rating: None, + perceptual_hash: None, + storage_mode: StorageMode::Managed, + original_filename: Some(original_filename.to_string()), + uploaded_at: Some(now), + storage_key: Some(content_hash.0.clone()), + created_at: now, + updated_at: now, + }; + + // Store the media item + storage.insert_managed_media(&item).await?; + + info!( + media_id = %media_id, + hash = %content_hash, + filename = %original_filename, + size = file_size, + "processed upload" + ); + + Ok(UploadResult { + media_id, + content_hash, + was_duplicate: false, + file_size, + }) +} + +/// Process an upload from bytes. +pub async fn process_upload_bytes( + storage: &DynStorageBackend, + managed: &ManagedStorageService, + data: &[u8], + original_filename: &str, + mime_type: Option<&str>, +) -> Result { + use std::io::Cursor; + let cursor = Cursor::new(data); + process_upload(storage, managed, cursor, original_filename, mime_type).await +} + +/// Process an upload from a local file path. +/// +/// This is useful for migrating existing external files to managed storage. +pub async fn process_upload_file( + storage: &DynStorageBackend, + managed: &ManagedStorageService, + path: &Path, + original_filename: Option<&str>, +) -> Result { + let file = tokio::fs::File::open(path).await?; + let reader = tokio::io::BufReader::new(file); + + let filename = original_filename.unwrap_or_else(|| { + path.file_name() + .and_then(|n| n.to_str()) + .unwrap_or("unknown") + }); + + let mime = mime_guess::from_path(path).first().map(|m| m.to_string()); + + process_upload(storage, managed, reader, filename, mime.as_deref()).await +} + +/// Migrate an existing external media item to managed storage. +pub async fn migrate_to_managed( + storage: &DynStorageBackend, + managed: &ManagedStorageService, + media_id: MediaId, +) -> Result<()> { + let item = storage.get_media(media_id).await?; + + if item.storage_mode == StorageMode::Managed { + return Err(PinakesError::InvalidOperation( + "media item is already in managed storage".into(), + )); + } + + // Check if the external file exists + if !item.path.exists() { + return Err(PinakesError::FileNotFound(item.path.clone())); + } + + // Store the file in managed storage + let (new_hash, new_size) = managed.store_file(&item.path).await?; + + // Verify the hash matches (it should, unless the file changed) + if new_hash.0 != item.content_hash.0 { + return Err(PinakesError::StorageIntegrity(format!( + "hash changed during migration: {} -> {}", + item.content_hash, new_hash + ))); + } + + // Get or create blob record + let mime = item.media_type.mime_type().to_string(); + let _blob = storage + .get_or_create_blob(&new_hash, new_size, &mime) + .await?; + + // Update the media item + let mut updated = item.clone(); + updated.storage_mode = StorageMode::Managed; + updated.storage_key = Some(new_hash.0.clone()); + updated.uploaded_at = Some(Utc::now()); + updated.path = managed.path(&new_hash); + updated.updated_at = Utc::now(); + + storage.update_media(&updated).await?; + + info!( + media_id = %media_id, + hash = %new_hash, + "migrated media item to managed storage" + ); + + Ok(()) +} + +/// Sanitize a filename for storage. +fn sanitize_filename(name: &str) -> String { + // Remove path separators and null bytes + name.replace(['/', '\\', '\0'], "_") + // Trim whitespace + .trim() + // Truncate to reasonable length + .chars() + .take(255) + .collect() +} + +/// Delete a managed media item and clean up the blob if orphaned. +pub async fn delete_managed_media( + storage: &DynStorageBackend, + managed: &ManagedStorageService, + media_id: MediaId, +) -> Result<()> { + let item = storage.get_media(media_id).await?; + + if item.storage_mode != StorageMode::Managed { + return Err(PinakesError::InvalidOperation( + "media item is not in managed storage".into(), + )); + } + + // Decrement blob reference count + let should_delete = storage.decrement_blob_ref(&item.content_hash).await?; + + // Delete the media item + storage.delete_media(media_id).await?; + + // If blob is orphaned, delete it from storage + if should_delete { + managed.delete(&item.content_hash).await?; + storage.delete_blob(&item.content_hash).await?; + info!(hash = %item.content_hash, "deleted orphaned blob"); + } + + Ok(()) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_sanitize_filename() { + assert_eq!(sanitize_filename("test.txt"), "test.txt"); + assert_eq!(sanitize_filename("path/to/file.txt"), "path_to_file.txt"); + assert_eq!(sanitize_filename(" spaces "), "spaces"); + assert_eq!(sanitize_filename("a".repeat(300).as_str()), "a".repeat(255)); + } +} diff --git a/crates/pinakes-core/tests/integration_test.rs b/crates/pinakes-core/tests/integration_test.rs index a3d1ff8..576b4d3 100644 --- a/crates/pinakes-core/tests/integration_test.rs +++ b/crates/pinakes-core/tests/integration_test.rs @@ -43,6 +43,10 @@ async fn test_media_crud() { camera_model: None, rating: None, perceptual_hash: None, + storage_mode: StorageMode::External, + original_filename: None, + uploaded_at: None, + storage_key: None, created_at: now, updated_at: now, }; @@ -129,6 +133,10 @@ async fn test_tags() { camera_model: None, rating: None, perceptual_hash: None, + storage_mode: StorageMode::External, + original_filename: None, + uploaded_at: None, + storage_key: None, created_at: now, updated_at: now, }; @@ -189,6 +197,10 @@ async fn test_collections() { camera_model: None, rating: None, perceptual_hash: None, + storage_mode: StorageMode::External, + original_filename: None, + uploaded_at: None, + storage_key: None, created_at: now, updated_at: now, }; @@ -244,6 +256,10 @@ async fn test_custom_fields() { camera_model: None, rating: None, perceptual_hash: None, + storage_mode: StorageMode::External, + original_filename: None, + uploaded_at: None, + storage_key: None, created_at: now, updated_at: now, }; @@ -318,6 +334,10 @@ async fn test_search() { camera_model: None, rating: None, perceptual_hash: None, + storage_mode: StorageMode::External, + original_filename: None, + uploaded_at: None, + storage_key: None, created_at: now, updated_at: now, }; @@ -457,6 +477,10 @@ async fn test_library_statistics_with_data() { camera_model: None, rating: None, perceptual_hash: None, + storage_mode: StorageMode::External, + original_filename: None, + uploaded_at: None, + storage_key: None, created_at: now, updated_at: now, }; @@ -501,6 +525,10 @@ fn make_test_media(hash: &str) -> MediaItem { camera_model: None, rating: None, perceptual_hash: None, + storage_mode: StorageMode::External, + original_filename: None, + uploaded_at: None, + storage_key: None, created_at: now, updated_at: now, } diff --git a/crates/pinakes-core/tests/integrity_enhanced_test.rs b/crates/pinakes-core/tests/integrity_enhanced_test.rs index 57ba591..9f65a40 100644 --- a/crates/pinakes-core/tests/integrity_enhanced_test.rs +++ b/crates/pinakes-core/tests/integrity_enhanced_test.rs @@ -4,7 +4,7 @@ use std::sync::Arc; use pinakes_core::integrity::detect_orphans; use pinakes_core::media_type::{BuiltinMediaType, MediaType}; -use pinakes_core::model::{ContentHash, MediaId, MediaItem}; +use pinakes_core::model::{ContentHash, MediaId, MediaItem, StorageMode}; use pinakes_core::storage::{DynStorageBackend, StorageBackend, sqlite::SqliteBackend}; use tempfile::TempDir; use uuid::Uuid; @@ -46,6 +46,10 @@ fn create_test_media_item(path: PathBuf, hash: &str) -> MediaItem { camera_model: None, rating: None, perceptual_hash: None, + storage_mode: StorageMode::External, + original_filename: None, + uploaded_at: None, + storage_key: None, created_at: chrono::Utc::now(), updated_at: chrono::Utc::now(), } diff --git a/crates/pinakes-server/Cargo.toml b/crates/pinakes-server/Cargo.toml index 9b1164c..7940e3f 100644 --- a/crates/pinakes-server/Cargo.toml +++ b/crates/pinakes-server/Cargo.toml @@ -26,6 +26,7 @@ governor = { workspace = true } tower_governor = { workspace = true } tokio-util = { version = "0.7", features = ["io"] } argon2 = { workspace = true } +blake3 = { workspace = true } rand = "0.9" percent-encoding = "2" http = "1.0" diff --git a/crates/pinakes-server/src/app.rs b/crates/pinakes-server/src/app.rs index 16de010..92ef13a 100644 --- a/crates/pinakes-server/src/app.rs +++ b/crates/pinakes-server/src/app.rs @@ -72,6 +72,8 @@ pub fn create_router_with_tls( // Public routes (no auth required) let public_routes = Router::new() .route("/s/{token}", get(routes::social::access_shared_media)) + // Enhanced sharing: public share access + .route("/shared/{token}", get(routes::shares::access_shared)) // Kubernetes-style health probes (no auth required for orchestration) .route("/health/live", get(routes::health::liveness)) .route("/health/ready", get(routes::health::readiness)); @@ -216,6 +218,25 @@ pub fn create_router_with_tls( .route( "/media/{id}/stream/dash/{profile}/{segment}", get(routes::streaming::dash_segment), + ) + // Managed storage (read) + .route("/media/{id}/download", get(routes::upload::download_file)) + .route("/managed/stats", get(routes::upload::managed_stats)) + // Sync (read) + .route("/sync/devices", get(routes::sync::list_devices)) + .route("/sync/devices/{id}", get(routes::sync::get_device)) + .route("/sync/changes", get(routes::sync::get_changes)) + .route("/sync/conflicts", get(routes::sync::list_conflicts)) + .route("/sync/upload/{id}", get(routes::sync::get_upload_status)) + .route("/sync/download/{*path}", get(routes::sync::download_file)) + // Enhanced sharing (read) + .route("/shares/outgoing", get(routes::shares::list_outgoing)) + .route("/shares/incoming", get(routes::shares::list_incoming)) + .route("/shares/{id}", get(routes::shares::get_share)) + .route("/shares/{id}/activity", get(routes::shares::get_activity)) + .route( + "/notifications/shares", + get(routes::shares::get_notifications), ); // Write routes: Editor+ required @@ -371,6 +392,49 @@ pub fn create_router_with_tls( post(routes::transcode::start_transcode), ) .route("/transcode/{id}", delete(routes::transcode::cancel_session)) + // Managed storage (write) + .route("/upload", post(routes::upload::upload_file)) + .route( + "/media/{id}/move-to-managed", + post(routes::upload::move_to_managed), + ) + // Sync (write) + .route("/sync/devices", post(routes::sync::register_device)) + .route("/sync/devices/{id}", put(routes::sync::update_device)) + .route("/sync/devices/{id}", delete(routes::sync::delete_device)) + .route( + "/sync/devices/{id}/token", + post(routes::sync::regenerate_token), + ) + .route("/sync/report", post(routes::sync::report_changes)) + .route("/sync/ack", post(routes::sync::acknowledge_changes)) + .route( + "/sync/conflicts/{id}/resolve", + post(routes::sync::resolve_conflict), + ) + .route("/sync/upload", post(routes::sync::create_upload)) + .route( + "/sync/upload/{id}/chunks/{index}", + put(routes::sync::upload_chunk), + ) + .route( + "/sync/upload/{id}/complete", + post(routes::sync::complete_upload), + ) + .route("/sync/upload/{id}", delete(routes::sync::cancel_upload)) + // Enhanced sharing (write) + .route("/shares", post(routes::shares::create_share)) + .route("/shares/{id}", patch(routes::shares::update_share)) + .route("/shares/{id}", delete(routes::shares::delete_share)) + .route("/shares/batch/delete", post(routes::shares::batch_delete)) + .route( + "/notifications/shares/{id}/read", + post(routes::shares::mark_notification_read), + ) + .route( + "/notifications/shares/read-all", + post(routes::shares::mark_all_read), + ) .layer(middleware::from_fn(auth::require_editor)); // Admin-only routes: destructive/config operations diff --git a/crates/pinakes-server/src/dto.rs b/crates/pinakes-server/src/dto.rs index 4e777dc..54847ec 100644 --- a/crates/pinakes-server/src/dto.rs +++ b/crates/pinakes-server/src/dto.rs @@ -997,3 +997,418 @@ impl From for TranscodeSessionRespons pub struct CreateTranscodeRequest { pub profile: String, } + +// ===== Managed Storage / Upload ===== + +#[derive(Debug, Serialize)] +pub struct UploadResponse { + pub media_id: String, + pub content_hash: String, + pub was_duplicate: bool, + pub file_size: u64, +} + +impl From for UploadResponse { + fn from(result: pinakes_core::model::UploadResult) -> Self { + Self { + media_id: result.media_id.0.to_string(), + content_hash: result.content_hash.0, + was_duplicate: result.was_duplicate, + file_size: result.file_size, + } + } +} + +#[derive(Debug, Serialize)] +pub struct ManagedStorageStatsResponse { + pub total_blobs: u64, + pub total_size_bytes: u64, + pub orphaned_blobs: u64, + pub deduplication_ratio: f64, +} + +impl From for ManagedStorageStatsResponse { + fn from(stats: pinakes_core::model::ManagedStorageStats) -> Self { + Self { + total_blobs: stats.total_blobs, + total_size_bytes: stats.total_size_bytes, + orphaned_blobs: stats.orphaned_blobs, + deduplication_ratio: stats.deduplication_ratio, + } + } +} + +// ===== Sync ===== + +#[derive(Debug, Deserialize)] +pub struct RegisterDeviceRequest { + pub name: String, + pub device_type: String, + pub client_version: String, + pub os_info: Option, +} + +#[derive(Debug, Serialize)] +pub struct DeviceResponse { + pub id: String, + pub name: String, + pub device_type: String, + pub client_version: String, + pub os_info: Option, + pub last_sync_at: Option>, + pub last_seen_at: DateTime, + pub sync_cursor: Option, + pub enabled: bool, + pub created_at: DateTime, +} + +impl From for DeviceResponse { + fn from(d: pinakes_core::sync::SyncDevice) -> Self { + Self { + id: d.id.0.to_string(), + name: d.name, + device_type: d.device_type.to_string(), + client_version: d.client_version, + os_info: d.os_info, + last_sync_at: d.last_sync_at, + last_seen_at: d.last_seen_at, + sync_cursor: d.sync_cursor, + enabled: d.enabled, + created_at: d.created_at, + } + } +} + +#[derive(Debug, Serialize)] +pub struct DeviceRegistrationResponse { + pub device: DeviceResponse, + pub device_token: String, +} + +#[derive(Debug, Deserialize)] +pub struct UpdateDeviceRequest { + pub name: Option, + pub enabled: Option, +} + +#[derive(Debug, Deserialize)] +pub struct GetChangesParams { + pub cursor: Option, + pub limit: Option, +} + +#[derive(Debug, Serialize)] +pub struct SyncChangeResponse { + pub id: String, + pub sequence: i64, + pub change_type: String, + pub media_id: Option, + pub path: String, + pub content_hash: Option, + pub file_size: Option, + pub timestamp: DateTime, +} + +impl From for SyncChangeResponse { + fn from(e: pinakes_core::sync::SyncLogEntry) -> Self { + Self { + id: e.id.to_string(), + sequence: e.sequence, + change_type: e.change_type.to_string(), + media_id: e.media_id.map(|id| id.0.to_string()), + path: e.path, + content_hash: e.content_hash.map(|h| h.0), + file_size: e.file_size, + timestamp: e.timestamp, + } + } +} + +#[derive(Debug, Serialize)] +pub struct ChangesResponse { + pub changes: Vec, + pub cursor: i64, + pub has_more: bool, +} + +#[derive(Debug, Deserialize)] +pub struct ClientChangeReport { + pub path: String, + pub change_type: String, + pub content_hash: Option, + pub file_size: Option, + pub local_mtime: Option, +} + +#[derive(Debug, Deserialize)] +pub struct ReportChangesRequest { + pub changes: Vec, +} + +#[derive(Debug, Serialize)] +pub struct ReportChangesResponse { + pub accepted: Vec, + pub conflicts: Vec, + pub upload_required: Vec, +} + +#[derive(Debug, Serialize)] +pub struct ConflictResponse { + pub id: String, + pub path: String, + pub local_hash: String, + pub server_hash: String, + pub detected_at: DateTime, +} + +impl From for ConflictResponse { + fn from(c: pinakes_core::sync::SyncConflict) -> Self { + Self { + id: c.id.to_string(), + path: c.path, + local_hash: c.local_hash, + server_hash: c.server_hash, + detected_at: c.detected_at, + } + } +} + +#[derive(Debug, Deserialize)] +pub struct ResolveConflictRequest { + pub resolution: String, +} + +#[derive(Debug, Deserialize)] +pub struct CreateUploadSessionRequest { + pub target_path: String, + pub expected_hash: String, + pub expected_size: u64, + pub chunk_size: Option, +} + +#[derive(Debug, Serialize)] +pub struct UploadSessionResponse { + pub id: String, + pub target_path: String, + pub expected_hash: String, + pub expected_size: u64, + pub chunk_size: u64, + pub chunk_count: u64, + pub status: String, + pub created_at: DateTime, + pub expires_at: DateTime, +} + +impl From for UploadSessionResponse { + fn from(s: pinakes_core::sync::UploadSession) -> Self { + Self { + id: s.id.to_string(), + target_path: s.target_path, + expected_hash: s.expected_hash.0, + expected_size: s.expected_size, + chunk_size: s.chunk_size, + chunk_count: s.chunk_count, + status: s.status.to_string(), + created_at: s.created_at, + expires_at: s.expires_at, + } + } +} + +#[derive(Debug, Serialize)] +pub struct ChunkUploadedResponse { + pub chunk_index: u64, + pub received: bool, +} + +#[derive(Debug, Deserialize)] +pub struct AcknowledgeChangesRequest { + pub cursor: i64, +} + +// ===== Enhanced Sharing ===== + +#[derive(Debug, Deserialize)] +pub struct CreateShareRequest { + pub target_type: String, + pub target_id: String, + pub recipient_type: String, + pub recipient_user_id: Option, + pub recipient_group_id: Option, + pub password: Option, + pub permissions: Option, + pub note: Option, + pub expires_in_hours: Option, + pub inherit_to_children: Option, +} + +#[derive(Debug, Deserialize)] +pub struct SharePermissionsRequest { + pub can_view: Option, + pub can_download: Option, + pub can_edit: Option, + pub can_delete: Option, + pub can_reshare: Option, + pub can_add: Option, +} + +#[derive(Debug, Serialize)] +pub struct ShareResponse { + pub id: String, + pub target_type: String, + pub target_id: String, + pub owner_id: String, + pub recipient_type: String, + pub recipient_user_id: Option, + pub recipient_group_id: Option, + pub public_token: Option, + pub permissions: SharePermissionsResponse, + pub note: Option, + pub expires_at: Option>, + pub access_count: u64, + pub last_accessed: Option>, + pub inherit_to_children: bool, + pub created_at: DateTime, + pub updated_at: DateTime, +} + +#[derive(Debug, Serialize)] +pub struct SharePermissionsResponse { + pub can_view: bool, + pub can_download: bool, + pub can_edit: bool, + pub can_delete: bool, + pub can_reshare: bool, + pub can_add: bool, +} + +impl From for SharePermissionsResponse { + fn from(p: pinakes_core::sharing::SharePermissions) -> Self { + Self { + can_view: p.can_view, + can_download: p.can_download, + can_edit: p.can_edit, + can_delete: p.can_delete, + can_reshare: p.can_reshare, + can_add: p.can_add, + } + } +} + +impl From for ShareResponse { + fn from(s: pinakes_core::sharing::Share) -> Self { + let (target_type, target_id) = match &s.target { + pinakes_core::sharing::ShareTarget::Media { media_id } => { + ("media".to_string(), media_id.0.to_string()) + } + pinakes_core::sharing::ShareTarget::Collection { collection_id } => { + ("collection".to_string(), collection_id.to_string()) + } + pinakes_core::sharing::ShareTarget::Tag { tag_id } => { + ("tag".to_string(), tag_id.to_string()) + } + pinakes_core::sharing::ShareTarget::SavedSearch { search_id } => { + ("saved_search".to_string(), search_id.to_string()) + } + }; + + let (recipient_type, recipient_user_id, recipient_group_id, public_token) = + match &s.recipient { + pinakes_core::sharing::ShareRecipient::PublicLink { token, .. } => { + ("public_link".to_string(), None, None, Some(token.clone())) + } + pinakes_core::sharing::ShareRecipient::User { user_id } => { + ("user".to_string(), Some(user_id.0.to_string()), None, None) + } + pinakes_core::sharing::ShareRecipient::Group { group_id } => { + ("group".to_string(), None, Some(group_id.to_string()), None) + } + pinakes_core::sharing::ShareRecipient::Federated { .. } => { + ("federated".to_string(), None, None, None) + } + }; + + Self { + id: s.id.0.to_string(), + target_type, + target_id, + owner_id: s.owner_id.0.to_string(), + recipient_type, + recipient_user_id, + recipient_group_id, + public_token, + permissions: s.permissions.into(), + note: s.note, + expires_at: s.expires_at, + access_count: s.access_count, + last_accessed: s.last_accessed, + inherit_to_children: s.inherit_to_children, + created_at: s.created_at, + updated_at: s.updated_at, + } + } +} + +#[derive(Debug, Deserialize)] +pub struct UpdateShareRequest { + pub permissions: Option, + pub note: Option, + pub expires_at: Option>, + pub inherit_to_children: Option, +} + +#[derive(Debug, Serialize)] +pub struct ShareActivityResponse { + pub id: String, + pub share_id: String, + pub actor_id: Option, + pub actor_ip: Option, + pub action: String, + pub details: Option, + pub timestamp: DateTime, +} + +impl From for ShareActivityResponse { + fn from(a: pinakes_core::sharing::ShareActivity) -> Self { + Self { + id: a.id.to_string(), + share_id: a.share_id.0.to_string(), + actor_id: a.actor_id.map(|id| id.0.to_string()), + actor_ip: a.actor_ip, + action: a.action.to_string(), + details: a.details, + timestamp: a.timestamp, + } + } +} + +#[derive(Debug, Serialize)] +pub struct ShareNotificationResponse { + pub id: String, + pub share_id: String, + pub notification_type: String, + pub is_read: bool, + pub created_at: DateTime, +} + +impl From for ShareNotificationResponse { + fn from(n: pinakes_core::sharing::ShareNotification) -> Self { + Self { + id: n.id.to_string(), + share_id: n.share_id.0.to_string(), + notification_type: n.notification_type.to_string(), + is_read: n.is_read, + created_at: n.created_at, + } + } +} + +#[derive(Debug, Deserialize)] +pub struct BatchDeleteSharesRequest { + pub share_ids: Vec, +} + +#[derive(Debug, Deserialize)] +pub struct AccessSharedRequest { + pub password: Option, +} diff --git a/crates/pinakes-server/src/error.rs b/crates/pinakes-server/src/error.rs index c2277c6..eeade45 100644 --- a/crates/pinakes-server/src/error.rs +++ b/crates/pinakes-server/src/error.rs @@ -69,3 +69,31 @@ impl From for ApiError { Self(e) } } + +impl ApiError { + pub fn bad_request(msg: impl Into) -> Self { + Self(pinakes_core::error::PinakesError::InvalidOperation( + msg.into(), + )) + } + + pub fn not_found(msg: impl Into) -> Self { + Self(pinakes_core::error::PinakesError::NotFound(msg.into())) + } + + pub fn internal(msg: impl Into) -> Self { + Self(pinakes_core::error::PinakesError::Database(msg.into())) + } + + pub fn forbidden(msg: impl Into) -> Self { + Self(pinakes_core::error::PinakesError::Authorization(msg.into())) + } + + pub fn unauthorized(msg: impl Into) -> Self { + Self(pinakes_core::error::PinakesError::Authentication( + msg.into(), + )) + } +} + +pub type ApiResult = Result; diff --git a/crates/pinakes-server/src/main.rs b/crates/pinakes-server/src/main.rs index 128dfc5..e0ba14b 100644 --- a/crates/pinakes-server/src/main.rs +++ b/crates/pinakes-server/src/main.rs @@ -45,17 +45,20 @@ struct Cli { migrate_only: bool, } -fn resolve_config_path(explicit: Option<&std::path::Path>) -> PathBuf { +/// Resolve the configuration file path. +/// Returns (path, was_explicit) where was_explicit indicates if the path was +/// explicitly provided by the user (vs discovered). +fn resolve_config_path(explicit: Option<&std::path::Path>) -> (PathBuf, bool) { if let Some(path) = explicit { - return path.to_path_buf(); + return (path.to_path_buf(), true); } // Check current directory let local = PathBuf::from("pinakes.toml"); if local.exists() { - return local; + return (local, false); } // XDG default - Config::default_config_path() + (Config::default_config_path(), false) } #[tokio::main] @@ -89,11 +92,17 @@ async fn main() -> Result<()> { } } - let config_path = resolve_config_path(cli.config.as_deref()); + let (config_path, was_explicit) = resolve_config_path(cli.config.as_deref()); let mut config = if config_path.exists() { info!(path = %config_path.display(), "loading configuration from file"); Config::from_file(&config_path)? + } else if was_explicit { + // User explicitly provided a config path that doesn't exist - this is an error + return Err(anyhow::anyhow!( + "configuration file not found: {}", + config_path.display() + )); } else { info!( "using default configuration (no config file found at {})", @@ -486,6 +495,34 @@ async fn main() -> Result<()> { }); } + // Initialize managed storage service if enabled + let managed_storage = { + let config_read = config_arc.read().await; + if config_read.managed_storage.enabled { + let service = pinakes_core::managed_storage::ManagedStorageService::new( + config_read.managed_storage.storage_dir.clone(), + config_read.managed_storage.max_upload_size, + config_read.managed_storage.verify_on_read, + ); + match service.init().await { + Ok(()) => { + info!( + path = %config_read.managed_storage.storage_dir.display(), + "managed storage initialized" + ); + Some(Arc::new(service)) + } + Err(e) => { + tracing::error!(error = %e, "failed to initialize managed storage"); + None + } + } + } else { + tracing::info!("managed storage disabled in configuration"); + None + } + }; + let state = AppState { storage: storage.clone(), config: config_arc.clone(), @@ -496,6 +533,7 @@ async fn main() -> Result<()> { scheduler, plugin_manager, transcode_service, + managed_storage, }; // Periodic session cleanup (every 15 minutes) diff --git a/crates/pinakes-server/src/routes/books.rs b/crates/pinakes-server/src/routes/books.rs index fa85070..8337d6c 100644 --- a/crates/pinakes-server/src/routes/books.rs +++ b/crates/pinakes-server/src/routes/books.rs @@ -1,6 +1,6 @@ use axum::{ Json, Router, - extract::{Path, Query, State}, + extract::{Extension, Path, Query, State}, http::StatusCode, response::IntoResponse, routing::{get, put}, @@ -13,7 +13,7 @@ use pinakes_core::{ model::{AuthorInfo, BookMetadata, MediaId, Pagination, ReadingProgress, ReadingStatus}, }; -use crate::{dto::MediaResponse, error::ApiError, state::AppState}; +use crate::{auth::resolve_user_id, dto::MediaResponse, error::ApiError, state::AppState}; /// Book metadata response DTO #[derive(Debug, Serialize, Deserialize)] @@ -240,15 +240,15 @@ pub async fn get_author_books( /// Get reading progress for a book pub async fn get_reading_progress( State(state): State, + Extension(username): Extension, Path(media_id): Path, ) -> Result { - // TODO: Get user_id from auth context - let user_id = Uuid::new_v4(); // Placeholder + let user_id = resolve_user_id(&state.storage, &username).await?; let media_id = MediaId(media_id); let progress = state .storage - .get_reading_progress(user_id, media_id) + .get_reading_progress(user_id.0, media_id) .await? .ok_or(ApiError(PinakesError::NotFound( "Reading progress not found".to_string(), @@ -260,16 +260,16 @@ pub async fn get_reading_progress( /// Update reading progress for a book pub async fn update_reading_progress( State(state): State, + Extension(username): Extension, Path(media_id): Path, Json(req): Json, ) -> Result { - // TODO: Get user_id from auth context - let user_id = Uuid::new_v4(); // Placeholder + let user_id = resolve_user_id(&state.storage, &username).await?; let media_id = MediaId(media_id); state .storage - .update_reading_progress(user_id, media_id, req.current_page) + .update_reading_progress(user_id.0, media_id, req.current_page) .await?; Ok(StatusCode::NO_CONTENT) @@ -278,14 +278,14 @@ pub async fn update_reading_progress( /// Get user's reading list pub async fn get_reading_list( State(state): State, + Extension(username): Extension, Query(params): Query, ) -> Result { - // TODO: Get user_id from auth context - let user_id = Uuid::new_v4(); // Placeholder + let user_id = resolve_user_id(&state.storage, &username).await?; let items = state .storage - .get_reading_list(user_id, params.status) + .get_reading_list(user_id.0, params.status) .await?; let response: Vec = items.into_iter().map(MediaResponse::from).collect(); diff --git a/crates/pinakes-server/src/routes/mod.rs b/crates/pinakes-server/src/routes/mod.rs index 969c9b4..942a289 100644 --- a/crates/pinakes-server/src/routes/mod.rs +++ b/crates/pinakes-server/src/routes/mod.rs @@ -19,11 +19,14 @@ pub mod saved_searches; pub mod scan; pub mod scheduled_tasks; pub mod search; +pub mod shares; pub mod social; pub mod statistics; pub mod streaming; pub mod subtitles; +pub mod sync; pub mod tags; pub mod transcode; +pub mod upload; pub mod users; pub mod webhooks; diff --git a/crates/pinakes-server/src/routes/photos.rs b/crates/pinakes-server/src/routes/photos.rs index 3b0abd2..b078527 100644 --- a/crates/pinakes-server/src/routes/photos.rs +++ b/crates/pinakes-server/src/routes/photos.rs @@ -27,6 +27,12 @@ pub struct TimelineQuery { pub group_by: GroupBy, pub year: Option, pub month: Option, + #[serde(default = "default_timeline_limit")] + pub limit: u64, +} + +fn default_timeline_limit() -> u64 { + 10000 } /// Timeline group response @@ -62,12 +68,12 @@ pub async fn get_timeline( State(state): State, Query(query): Query, ) -> Result { - // Query photos with date_taken + // Query photos with date_taken (limit is configurable, defaults to 10000) let all_media = state .storage .list_media(&pinakes_core::model::Pagination { offset: 0, - limit: 10000, // TODO: Make this more efficient with streaming + limit: query.limit.min(50000), // Cap at 50000 for safety sort: Some("date_taken DESC".to_string()), }) .await?; diff --git a/crates/pinakes-server/src/routes/shares.rs b/crates/pinakes-server/src/routes/shares.rs new file mode 100644 index 0000000..58ad378 --- /dev/null +++ b/crates/pinakes-server/src/routes/shares.rs @@ -0,0 +1,543 @@ +use axum::{ + Json, + extract::{ConnectInfo, Extension, Path, Query, State}, + http::StatusCode, +}; +use chrono::Utc; +use std::net::SocketAddr; +use uuid::Uuid; + +use crate::auth::resolve_user_id; +use crate::dto::{ + AccessSharedRequest, BatchDeleteSharesRequest, CreateShareRequest, MediaResponse, + PaginationParams, ShareActivityResponse, ShareNotificationResponse, ShareResponse, + UpdateShareRequest, +}; +use crate::error::{ApiError, ApiResult}; +use crate::state::AppState; +use pinakes_core::model::MediaId; +use pinakes_core::model::Pagination; +use pinakes_core::sharing::{ + Share, ShareActivity, ShareActivityAction, ShareId, ShareNotification, ShareNotificationType, + SharePermissions, ShareRecipient, ShareTarget, generate_share_token, hash_share_password, + verify_share_password, +}; +use pinakes_core::users::UserId; + +/// Create a new share +/// POST /api/shares +pub async fn create_share( + State(state): State, + Extension(username): Extension, + Json(req): Json, +) -> ApiResult> { + let config = state.config.read().await; + if !config.sharing.enabled { + return Err(ApiError::bad_request("Sharing is not enabled")); + } + + // Validate public links are allowed + if req.recipient_type == "public_link" && !config.sharing.allow_public_links { + return Err(ApiError::bad_request("Public links are not allowed")); + } + drop(config); + + let owner_id = resolve_user_id(&state.storage, &username).await?; + + // Parse target + let target_id: Uuid = req + .target_id + .parse() + .map_err(|_| ApiError::bad_request("Invalid target_id"))?; + + let target = match req.target_type.as_str() { + "media" => ShareTarget::Media { + media_id: MediaId(target_id), + }, + "collection" => ShareTarget::Collection { + collection_id: target_id, + }, + "tag" => ShareTarget::Tag { tag_id: target_id }, + "saved_search" => ShareTarget::SavedSearch { + search_id: target_id, + }, + _ => return Err(ApiError::bad_request("Invalid target_type")), + }; + + // Parse recipient + let recipient = match req.recipient_type.as_str() { + "public_link" => { + let token = generate_share_token(); + let password_hash = req.password.as_ref().map(|p| hash_share_password(p)); + ShareRecipient::PublicLink { + token, + password_hash, + } + } + "user" => { + let recipient_user_id = req.recipient_user_id.ok_or_else(|| { + ApiError::bad_request("recipient_user_id required for user share") + })?; + ShareRecipient::User { + user_id: UserId(recipient_user_id), + } + } + "group" => { + let group_id = req.recipient_group_id.ok_or_else(|| { + ApiError::bad_request("recipient_group_id required for group share") + })?; + ShareRecipient::Group { group_id } + } + _ => return Err(ApiError::bad_request("Invalid recipient_type")), + }; + + // Parse permissions + let permissions = if let Some(perms) = req.permissions { + SharePermissions { + can_view: perms.can_view.unwrap_or(true), + can_download: perms.can_download.unwrap_or(false), + can_edit: perms.can_edit.unwrap_or(false), + can_delete: perms.can_delete.unwrap_or(false), + can_reshare: perms.can_reshare.unwrap_or(false), + can_add: perms.can_add.unwrap_or(false), + } + } else { + SharePermissions::view_only() + }; + + // Calculate expiration + let expires_at = req + .expires_in_hours + .map(|hours| Utc::now() + chrono::Duration::hours(hours as i64)); + + let share = Share { + id: ShareId(Uuid::now_v7()), + target, + owner_id, + recipient, + permissions, + note: req.note, + expires_at, + access_count: 0, + last_accessed: None, + inherit_to_children: req.inherit_to_children.unwrap_or(true), + parent_share_id: None, + created_at: Utc::now(), + updated_at: Utc::now(), + }; + + let created = state + .storage + .create_share(&share) + .await + .map_err(|e| ApiError::internal(format!("Failed to create share: {}", e)))?; + + // Send notification to recipient if it's a user share + if let ShareRecipient::User { user_id } = &created.recipient { + let notification = ShareNotification { + id: Uuid::now_v7(), + user_id: *user_id, + share_id: created.id, + notification_type: ShareNotificationType::NewShare, + is_read: false, + created_at: Utc::now(), + }; + + // Ignore notification errors + let _ = state.storage.create_share_notification(¬ification).await; + } + + Ok(Json(created.into())) +} + +/// List outgoing shares (shares I created) +/// GET /api/shares/outgoing +pub async fn list_outgoing( + State(state): State, + Extension(username): Extension, + Query(params): Query, +) -> ApiResult>> { + let user_id = resolve_user_id(&state.storage, &username).await?; + let pagination = Pagination { + offset: params.offset.unwrap_or(0), + limit: params.limit.unwrap_or(50), + sort: params.sort, + }; + + let shares = state + .storage + .list_shares_by_owner(user_id, &pagination) + .await + .map_err(|e| ApiError::internal(format!("Failed to list shares: {}", e)))?; + + Ok(Json(shares.into_iter().map(Into::into).collect())) +} + +/// List incoming shares (shares shared with me) +/// GET /api/shares/incoming +pub async fn list_incoming( + State(state): State, + Extension(username): Extension, + Query(params): Query, +) -> ApiResult>> { + let user_id = resolve_user_id(&state.storage, &username).await?; + let pagination = Pagination { + offset: params.offset.unwrap_or(0), + limit: params.limit.unwrap_or(50), + sort: params.sort, + }; + + let shares = state + .storage + .list_shares_for_user(user_id, &pagination) + .await + .map_err(|e| ApiError::internal(format!("Failed to list shares: {}", e)))?; + + Ok(Json(shares.into_iter().map(Into::into).collect())) +} + +/// Get share details +/// GET /api/shares/{id} +pub async fn get_share( + State(state): State, + Extension(username): Extension, + Path(id): Path, +) -> ApiResult> { + let user_id = resolve_user_id(&state.storage, &username).await?; + let share = state + .storage + .get_share(ShareId(id)) + .await + .map_err(|e| ApiError::not_found(format!("Share not found: {}", e)))?; + + // Check authorization + let is_owner = share.owner_id == user_id; + let is_recipient = match &share.recipient { + ShareRecipient::User { + user_id: recipient_id, + } => *recipient_id == user_id, + _ => false, + }; + + if !is_owner && !is_recipient { + return Err(ApiError::forbidden("Not authorized to view this share")); + } + + Ok(Json(share.into())) +} + +/// Update a share +/// PATCH /api/shares/{id} +pub async fn update_share( + State(state): State, + Extension(username): Extension, + Path(id): Path, + Json(req): Json, +) -> ApiResult> { + let user_id = resolve_user_id(&state.storage, &username).await?; + let mut share = state + .storage + .get_share(ShareId(id)) + .await + .map_err(|e| ApiError::not_found(format!("Share not found: {}", e)))?; + + // Only owner can update + if share.owner_id != user_id { + return Err(ApiError::forbidden("Only the owner can update this share")); + } + + // Update fields + if let Some(perms) = req.permissions { + share.permissions = SharePermissions { + can_view: perms.can_view.unwrap_or(share.permissions.can_view), + can_download: perms.can_download.unwrap_or(share.permissions.can_download), + can_edit: perms.can_edit.unwrap_or(share.permissions.can_edit), + can_delete: perms.can_delete.unwrap_or(share.permissions.can_delete), + can_reshare: perms.can_reshare.unwrap_or(share.permissions.can_reshare), + can_add: perms.can_add.unwrap_or(share.permissions.can_add), + }; + } + + if let Some(note) = req.note { + share.note = Some(note); + } + + if let Some(expires_at) = req.expires_at { + share.expires_at = Some(expires_at); + } + + if let Some(inherit) = req.inherit_to_children { + share.inherit_to_children = inherit; + } + + share.updated_at = Utc::now(); + + let updated = state + .storage + .update_share(&share) + .await + .map_err(|e| ApiError::internal(format!("Failed to update share: {}", e)))?; + + // Notify recipient of update + if let ShareRecipient::User { user_id } = &updated.recipient { + let notification = ShareNotification { + id: Uuid::now_v7(), + user_id: *user_id, + share_id: updated.id, + notification_type: ShareNotificationType::ShareUpdated, + is_read: false, + created_at: Utc::now(), + }; + let _ = state.storage.create_share_notification(¬ification).await; + } + + Ok(Json(updated.into())) +} + +/// Delete (revoke) a share +/// DELETE /api/shares/{id} +pub async fn delete_share( + State(state): State, + Extension(username): Extension, + Path(id): Path, +) -> ApiResult { + let user_id = resolve_user_id(&state.storage, &username).await?; + let share = state + .storage + .get_share(ShareId(id)) + .await + .map_err(|e| ApiError::not_found(format!("Share not found: {}", e)))?; + + // Only owner can delete + if share.owner_id != user_id { + return Err(ApiError::forbidden("Only the owner can revoke this share")); + } + + // Notify recipient before deletion + if let ShareRecipient::User { user_id } = &share.recipient { + let notification = ShareNotification { + id: Uuid::now_v7(), + user_id: *user_id, + share_id: share.id, + notification_type: ShareNotificationType::ShareRevoked, + is_read: false, + created_at: Utc::now(), + }; + let _ = state.storage.create_share_notification(¬ification).await; + } + + state + .storage + .delete_share(ShareId(id)) + .await + .map_err(|e| ApiError::internal(format!("Failed to delete share: {}", e)))?; + + Ok(StatusCode::NO_CONTENT) +} + +/// Batch delete shares +/// POST /api/shares/batch/delete +pub async fn batch_delete( + State(state): State, + Extension(username): Extension, + Json(req): Json, +) -> ApiResult> { + let user_id = resolve_user_id(&state.storage, &username).await?; + let share_ids: Vec = req.share_ids.into_iter().map(ShareId).collect(); + + // Verify ownership of all shares + for share_id in &share_ids { + let share = state + .storage + .get_share(*share_id) + .await + .map_err(|e| ApiError::not_found(format!("Share not found: {}", e)))?; + + if share.owner_id != user_id { + return Err(ApiError::forbidden(format!( + "Not authorized to delete share {}", + share_id.0 + ))); + } + } + + let deleted = state + .storage + .batch_delete_shares(&share_ids) + .await + .map_err(|e| ApiError::internal(format!("Failed to batch delete: {}", e)))?; + + Ok(Json(serde_json::json!({ "deleted": deleted }))) +} + +/// Access a public shared resource +/// GET /api/shared/{token} +pub async fn access_shared( + State(state): State, + Path(token): Path, + Query(params): Query, + ConnectInfo(addr): ConnectInfo, +) -> ApiResult> { + let share = state + .storage + .get_share_by_token(&token) + .await + .map_err(|e| ApiError::not_found(format!("Share not found: {}", e)))?; + + // Check expiration + if let Some(expires_at) = share.expires_at { + if Utc::now() > expires_at { + return Err(ApiError::not_found("Share has expired")); + } + } + + // Check password if required + if let ShareRecipient::PublicLink { password_hash, .. } = &share.recipient { + if let Some(hash) = password_hash { + let provided_password = params + .password + .as_ref() + .ok_or_else(|| ApiError::unauthorized("Password required"))?; + + if !verify_share_password(provided_password, hash) { + // Log failed attempt + let activity = ShareActivity { + id: Uuid::now_v7(), + share_id: share.id, + actor_id: None, + actor_ip: Some(addr.ip().to_string()), + action: ShareActivityAction::PasswordFailed, + details: None, + timestamp: Utc::now(), + }; + let _ = state.storage.record_share_activity(&activity).await; + + return Err(ApiError::unauthorized("Invalid password")); + } + } + } + + // Record access + state + .storage + .record_share_access(share.id) + .await + .map_err(|e| ApiError::internal(format!("Failed to record access: {}", e)))?; + + // Log the access + let activity = ShareActivity { + id: Uuid::now_v7(), + share_id: share.id, + actor_id: None, + actor_ip: Some(addr.ip().to_string()), + action: ShareActivityAction::Accessed, + details: None, + timestamp: Utc::now(), + }; + let _ = state.storage.record_share_activity(&activity).await; + + // Return the shared content + match &share.target { + ShareTarget::Media { media_id } => { + let item = state + .storage + .get_media(*media_id) + .await + .map_err(|e| ApiError::not_found(format!("Media not found: {}", e)))?; + + Ok(Json(item.into())) + } + _ => { + // For collections/tags, return a placeholder + // Full implementation would return the collection contents + Err(ApiError::bad_request( + "Collection/tag sharing not yet fully implemented", + )) + } + } +} + +/// Get share activity log +/// GET /api/shares/{id}/activity +pub async fn get_activity( + State(state): State, + Extension(username): Extension, + Path(id): Path, + Query(params): Query, +) -> ApiResult>> { + let user_id = resolve_user_id(&state.storage, &username).await?; + let share = state + .storage + .get_share(ShareId(id)) + .await + .map_err(|e| ApiError::not_found(format!("Share not found: {}", e)))?; + + // Only owner can view activity + if share.owner_id != user_id { + return Err(ApiError::forbidden( + "Only the owner can view share activity", + )); + } + + let pagination = Pagination { + offset: params.offset.unwrap_or(0), + limit: params.limit.unwrap_or(50), + sort: params.sort, + }; + + let activity = state + .storage + .get_share_activity(ShareId(id), &pagination) + .await + .map_err(|e| ApiError::internal(format!("Failed to get activity: {}", e)))?; + + Ok(Json(activity.into_iter().map(Into::into).collect())) +} + +/// Get unread share notifications +/// GET /api/notifications/shares +pub async fn get_notifications( + State(state): State, + Extension(username): Extension, +) -> ApiResult>> { + let user_id = resolve_user_id(&state.storage, &username).await?; + let notifications = state + .storage + .get_unread_notifications(user_id) + .await + .map_err(|e| ApiError::internal(format!("Failed to get notifications: {}", e)))?; + + Ok(Json(notifications.into_iter().map(Into::into).collect())) +} + +/// Mark a notification as read +/// POST /api/notifications/shares/{id}/read +pub async fn mark_notification_read( + State(state): State, + Extension(_username): Extension, + Path(id): Path, +) -> ApiResult { + state + .storage + .mark_notification_read(id) + .await + .map_err(|e| ApiError::internal(format!("Failed to mark as read: {}", e)))?; + + Ok(StatusCode::OK) +} + +/// Mark all notifications as read +/// POST /api/notifications/shares/read-all +pub async fn mark_all_read( + State(state): State, + Extension(username): Extension, +) -> ApiResult { + let user_id = resolve_user_id(&state.storage, &username).await?; + state + .storage + .mark_all_notifications_read(user_id) + .await + .map_err(|e| ApiError::internal(format!("Failed to mark all as read: {}", e)))?; + + Ok(StatusCode::OK) +} diff --git a/crates/pinakes-server/src/routes/sync.rs b/crates/pinakes-server/src/routes/sync.rs new file mode 100644 index 0000000..620c6a7 --- /dev/null +++ b/crates/pinakes-server/src/routes/sync.rs @@ -0,0 +1,743 @@ +use axum::{ + Json, + body::Body, + extract::{Extension, Path, Query, State}, + http::{HeaderMap, StatusCode, header}, + response::IntoResponse, +}; +use chrono::Utc; +use tokio_util::io::ReaderStream; +use uuid::Uuid; + +use crate::auth::resolve_user_id; +use crate::dto::{ + AcknowledgeChangesRequest, ChangesResponse, ChunkUploadedResponse, ConflictResponse, + CreateUploadSessionRequest, DeviceRegistrationResponse, DeviceResponse, GetChangesParams, + RegisterDeviceRequest, ReportChangesRequest, ReportChangesResponse, ResolveConflictRequest, + SyncChangeResponse, UpdateDeviceRequest, UploadSessionResponse, +}; +use crate::error::{ApiError, ApiResult}; +use crate::state::AppState; +use pinakes_core::config::ConflictResolution; +use pinakes_core::model::ContentHash; +use pinakes_core::sync::{ + ChunkInfo, DeviceId, DeviceType, SyncChangeType, SyncConflict, SyncDevice, SyncLogEntry, + UploadSession, UploadStatus, generate_device_token, hash_device_token, update_device_cursor, +}; +use std::path::Path as FilePath; + +const DEFAULT_CHUNK_SIZE: u64 = 4 * 1024 * 1024; // 4MB +const DEFAULT_CHANGES_LIMIT: u64 = 100; + +/// Register a new sync device +/// POST /api/sync/devices +pub async fn register_device( + State(state): State, + Extension(username): Extension, + Json(req): Json, +) -> ApiResult> { + let config = state.config.read().await; + if !config.sync.enabled { + return Err(ApiError::bad_request("Sync is not enabled")); + } + drop(config); + + let user_id = resolve_user_id(&state.storage, &username).await?; + + let device_type = req + .device_type + .parse::() + .map_err(|_| ApiError::bad_request("Invalid device type"))?; + + // Generate device token + let device_token = generate_device_token(); + let token_hash = hash_device_token(&device_token); + + let now = Utc::now(); + let device = SyncDevice { + id: DeviceId(Uuid::now_v7()), + user_id, + name: req.name, + device_type, + client_version: req.client_version, + os_info: req.os_info, + last_sync_at: None, + last_seen_at: now, + sync_cursor: Some(0), + enabled: true, + created_at: now, + updated_at: now, + }; + + let registered = state + .storage + .register_device(&device, &token_hash) + .await + .map_err(|e| ApiError::internal(format!("Failed to register device: {}", e)))?; + + Ok(Json(DeviceRegistrationResponse { + device: registered.into(), + device_token, + })) +} + +/// List user's sync devices +/// GET /api/sync/devices +pub async fn list_devices( + State(state): State, + Extension(username): Extension, +) -> ApiResult>> { + let user_id = resolve_user_id(&state.storage, &username).await?; + let devices = state + .storage + .list_user_devices(user_id) + .await + .map_err(|e| ApiError::internal(format!("Failed to list devices: {}", e)))?; + + Ok(Json(devices.into_iter().map(Into::into).collect())) +} + +/// Get device details +/// GET /api/sync/devices/{id} +pub async fn get_device( + State(state): State, + Extension(username): Extension, + Path(id): Path, +) -> ApiResult> { + let user_id = resolve_user_id(&state.storage, &username).await?; + let device = state + .storage + .get_device(DeviceId(id)) + .await + .map_err(|e| ApiError::not_found(format!("Device not found: {}", e)))?; + + // Verify ownership + if device.user_id != user_id { + return Err(ApiError::forbidden("Not authorized to access this device")); + } + + Ok(Json(device.into())) +} + +/// Update a device +/// PUT /api/sync/devices/{id} +pub async fn update_device( + State(state): State, + Extension(username): Extension, + Path(id): Path, + Json(req): Json, +) -> ApiResult> { + let user_id = resolve_user_id(&state.storage, &username).await?; + let mut device = state + .storage + .get_device(DeviceId(id)) + .await + .map_err(|e| ApiError::not_found(format!("Device not found: {}", e)))?; + + // Verify ownership + if device.user_id != user_id { + return Err(ApiError::forbidden("Not authorized to update this device")); + } + + if let Some(name) = req.name { + device.name = name; + } + if let Some(enabled) = req.enabled { + device.enabled = enabled; + } + + state + .storage + .update_device(&device) + .await + .map_err(|e| ApiError::internal(format!("Failed to update device: {}", e)))?; + + Ok(Json(device.into())) +} + +/// Delete a device +/// DELETE /api/sync/devices/{id} +pub async fn delete_device( + State(state): State, + Extension(username): Extension, + Path(id): Path, +) -> ApiResult { + let user_id = resolve_user_id(&state.storage, &username).await?; + let device = state + .storage + .get_device(DeviceId(id)) + .await + .map_err(|e| ApiError::not_found(format!("Device not found: {}", e)))?; + + // Verify ownership + if device.user_id != user_id { + return Err(ApiError::forbidden("Not authorized to delete this device")); + } + + state + .storage + .delete_device(DeviceId(id)) + .await + .map_err(|e| ApiError::internal(format!("Failed to delete device: {}", e)))?; + + Ok(StatusCode::NO_CONTENT) +} + +/// Regenerate device token +/// POST /api/sync/devices/{id}/token +pub async fn regenerate_token( + State(state): State, + Extension(username): Extension, + Path(id): Path, +) -> ApiResult> { + let user_id = resolve_user_id(&state.storage, &username).await?; + let device = state + .storage + .get_device(DeviceId(id)) + .await + .map_err(|e| ApiError::not_found(format!("Device not found: {}", e)))?; + + // Verify ownership + if device.user_id != user_id { + return Err(ApiError::forbidden( + "Not authorized to regenerate token for this device", + )); + } + + // Generate new token + let new_token = generate_device_token(); + let token_hash = hash_device_token(&new_token); + + // Re-register with new token (this updates the token hash) + let updated = state + .storage + .register_device(&device, &token_hash) + .await + .map_err(|e| ApiError::internal(format!("Failed to regenerate token: {}", e)))?; + + Ok(Json(DeviceRegistrationResponse { + device: updated.into(), + device_token: new_token, + })) +} + +/// Get changes since cursor +/// GET /api/sync/changes +pub async fn get_changes( + State(state): State, + Query(params): Query, +) -> ApiResult> { + let config = state.config.read().await; + if !config.sync.enabled { + return Err(ApiError::bad_request("Sync is not enabled")); + } + drop(config); + + let cursor = params.cursor.unwrap_or(0); + let limit = params.limit.unwrap_or(DEFAULT_CHANGES_LIMIT); + + let changes = state + .storage + .get_changes_since(cursor, limit + 1) + .await + .map_err(|e| ApiError::internal(format!("Failed to get changes: {}", e)))?; + + let has_more = changes.len() > limit as usize; + let changes: Vec = changes + .into_iter() + .take(limit as usize) + .map(Into::into) + .collect(); + + let new_cursor = changes.last().map(|c| c.sequence).unwrap_or(cursor); + + Ok(Json(ChangesResponse { + changes, + cursor: new_cursor, + has_more, + })) +} + +/// Report local changes from client +/// POST /api/sync/report +pub async fn report_changes( + State(state): State, + Extension(_username): Extension, + Json(req): Json, +) -> ApiResult> { + let config = state.config.read().await; + if !config.sync.enabled { + return Err(ApiError::bad_request("Sync is not enabled")); + } + let conflict_resolution = config.sync.default_conflict_resolution.clone(); + drop(config); + + let mut accepted = Vec::new(); + let mut conflicts = Vec::new(); + let mut upload_required = Vec::new(); + + for change in req.changes { + // Check for conflicts + if let Some(content_hash) = &change.content_hash { + let server_state = state + .storage + .get_media_by_path(FilePath::new(&change.path)) + .await + .ok() + .flatten(); + + if let Some(server_item) = server_state { + let client_hash = ContentHash(content_hash.clone()); + if server_item.content_hash != client_hash { + // Conflict detected + let conflict = SyncConflict { + id: Uuid::now_v7(), + device_id: DeviceId(Uuid::nil()), // Will be set by device context + path: change.path.clone(), + local_hash: content_hash.clone(), + local_mtime: change.local_mtime.unwrap_or(0), + server_hash: server_item.content_hash.to_string(), + server_mtime: server_item.updated_at.timestamp(), + detected_at: Utc::now(), + resolved_at: None, + resolution: None, + }; + + // Auto-resolve if configured + match conflict_resolution { + ConflictResolution::ServerWins => { + // Client should download server version + accepted.push(change.path); + } + ConflictResolution::ClientWins => { + // Client should upload + upload_required.push(change.path); + } + ConflictResolution::KeepBoth | ConflictResolution::Manual => { + conflicts.push(conflict.into()); + } + } + continue; + } + } + } + + // No conflict, check if upload is needed + match change.change_type.as_str() { + "created" | "modified" => { + if change.content_hash.is_some() { + upload_required.push(change.path); + } else { + accepted.push(change.path); + } + } + "deleted" => { + // Record deletion + let entry = SyncLogEntry { + id: Uuid::now_v7(), + sequence: 0, // Will be assigned by storage + change_type: SyncChangeType::Deleted, + media_id: None, + path: change.path.clone(), + content_hash: None, + file_size: None, + metadata_json: None, + changed_by_device: None, + timestamp: Utc::now(), + }; + + if state.storage.record_sync_change(&entry).await.is_ok() { + accepted.push(change.path); + } + } + _ => { + accepted.push(change.path); + } + } + } + + Ok(Json(ReportChangesResponse { + accepted, + conflicts, + upload_required, + })) +} + +/// Acknowledge processed changes +/// POST /api/sync/ack +pub async fn acknowledge_changes( + State(state): State, + Extension(_username): Extension, + headers: HeaderMap, + Json(req): Json, +) -> ApiResult { + // Get device from header or context + let device_token = headers + .get("X-Device-Token") + .and_then(|v| v.to_str().ok()) + .ok_or_else(|| ApiError::bad_request("Missing X-Device-Token header"))?; + + let token_hash = hash_device_token(device_token); + let device = state + .storage + .get_device_by_token(&token_hash) + .await + .map_err(|e| ApiError::internal(format!("Failed to get device: {}", e)))? + .ok_or_else(|| ApiError::unauthorized("Invalid device token"))?; + + // Update device cursor + update_device_cursor(&state.storage, device.id, req.cursor) + .await + .map_err(|e| ApiError::internal(format!("Failed to update cursor: {}", e)))?; + + Ok(StatusCode::OK) +} + +/// List unresolved conflicts +/// GET /api/sync/conflicts +pub async fn list_conflicts( + State(state): State, + Extension(_username): Extension, + headers: HeaderMap, +) -> ApiResult>> { + let device_token = headers + .get("X-Device-Token") + .and_then(|v| v.to_str().ok()) + .ok_or_else(|| ApiError::bad_request("Missing X-Device-Token header"))?; + + let token_hash = hash_device_token(device_token); + let device = state + .storage + .get_device_by_token(&token_hash) + .await + .map_err(|e| ApiError::internal(format!("Failed to get device: {}", e)))? + .ok_or_else(|| ApiError::unauthorized("Invalid device token"))?; + + let conflicts = state + .storage + .get_unresolved_conflicts(device.id) + .await + .map_err(|e| ApiError::internal(format!("Failed to get conflicts: {}", e)))?; + + Ok(Json(conflicts.into_iter().map(Into::into).collect())) +} + +/// Resolve a sync conflict +/// POST /api/sync/conflicts/{id}/resolve +pub async fn resolve_conflict( + State(state): State, + Extension(_username): Extension, + Path(id): Path, + Json(req): Json, +) -> ApiResult { + let resolution = match req.resolution.as_str() { + "server_wins" => ConflictResolution::ServerWins, + "client_wins" => ConflictResolution::ClientWins, + "keep_both" => ConflictResolution::KeepBoth, + _ => return Err(ApiError::bad_request("Invalid resolution type")), + }; + + state + .storage + .resolve_conflict(id, resolution) + .await + .map_err(|e| ApiError::internal(format!("Failed to resolve conflict: {}", e)))?; + + Ok(StatusCode::OK) +} + +/// Create an upload session for chunked upload +/// POST /api/sync/upload +pub async fn create_upload( + State(state): State, + Extension(_username): Extension, + headers: HeaderMap, + Json(req): Json, +) -> ApiResult> { + let config = state.config.read().await; + if !config.sync.enabled { + return Err(ApiError::bad_request("Sync is not enabled")); + } + let upload_timeout_hours = config.sync.upload_timeout_hours; + drop(config); + + let device_token = headers + .get("X-Device-Token") + .and_then(|v| v.to_str().ok()) + .ok_or_else(|| ApiError::bad_request("Missing X-Device-Token header"))?; + + let token_hash = hash_device_token(device_token); + let device = state + .storage + .get_device_by_token(&token_hash) + .await + .map_err(|e| ApiError::internal(format!("Failed to get device: {}", e)))? + .ok_or_else(|| ApiError::unauthorized("Invalid device token"))?; + + let chunk_size = req.chunk_size.unwrap_or(DEFAULT_CHUNK_SIZE); + let chunk_count = (req.expected_size + chunk_size - 1) / chunk_size; + let now = Utc::now(); + + let session = UploadSession { + id: Uuid::now_v7(), + device_id: device.id, + target_path: req.target_path, + expected_hash: ContentHash(req.expected_hash), + expected_size: req.expected_size, + chunk_size, + chunk_count, + status: UploadStatus::Pending, + created_at: now, + expires_at: now + chrono::Duration::hours(upload_timeout_hours as i64), + last_activity: now, + }; + + state + .storage + .create_upload_session(&session) + .await + .map_err(|e| ApiError::internal(format!("Failed to create upload session: {}", e)))?; + + Ok(Json(session.into())) +} + +/// Upload a chunk +/// PUT /api/sync/upload/{id}/chunks/{index} +pub async fn upload_chunk( + State(state): State, + Path((session_id, chunk_index)): Path<(Uuid, u64)>, + _headers: HeaderMap, + body: axum::body::Bytes, +) -> ApiResult> { + let session = state + .storage + .get_upload_session(session_id) + .await + .map_err(|e| ApiError::not_found(format!("Upload session not found: {}", e)))?; + + if session.status == UploadStatus::Expired { + return Err(ApiError::bad_request("Upload session has expired")); + } + + if chunk_index >= session.chunk_count { + return Err(ApiError::bad_request("Invalid chunk index")); + } + + // Calculate chunk hash + let hash = blake3::hash(&body); + let chunk_hash = hash.to_hex().to_string(); + + let chunk = ChunkInfo { + upload_id: session_id, + chunk_index, + offset: chunk_index * session.chunk_size, + size: body.len() as u64, + hash: chunk_hash, + received_at: Utc::now(), + }; + + state + .storage + .record_chunk(session_id, &chunk) + .await + .map_err(|e| ApiError::internal(format!("Failed to record chunk: {}", e)))?; + + // Store the chunk data (would integrate with managed storage) + // For now, this is a placeholder - actual implementation would write to temp storage + + Ok(Json(ChunkUploadedResponse { + chunk_index, + received: true, + })) +} + +/// Get upload session status +/// GET /api/sync/upload/{id} +pub async fn get_upload_status( + State(state): State, + Path(id): Path, +) -> ApiResult> { + let session = state + .storage + .get_upload_session(id) + .await + .map_err(|e| ApiError::not_found(format!("Upload session not found: {}", e)))?; + + Ok(Json(session.into())) +} + +/// Complete an upload session +/// POST /api/sync/upload/{id}/complete +pub async fn complete_upload( + State(state): State, + Path(id): Path, +) -> ApiResult { + let mut session = state + .storage + .get_upload_session(id) + .await + .map_err(|e| ApiError::not_found(format!("Upload session not found: {}", e)))?; + + // Verify all chunks received + let chunks = state + .storage + .get_upload_chunks(id) + .await + .map_err(|e| ApiError::internal(format!("Failed to get chunks: {}", e)))?; + + if chunks.len() != session.chunk_count as usize { + return Err(ApiError::bad_request(format!( + "Missing chunks: expected {}, got {}", + session.chunk_count, + chunks.len() + ))); + } + + // Mark session as completed + session.status = UploadStatus::Completed; + state + .storage + .update_upload_session(&session) + .await + .map_err(|e| ApiError::internal(format!("Failed to update session: {}", e)))?; + + // Record the sync change + let entry = SyncLogEntry { + id: Uuid::now_v7(), + sequence: 0, + change_type: SyncChangeType::Created, + media_id: None, + path: session.target_path, + content_hash: Some(session.expected_hash), + file_size: Some(session.expected_size), + metadata_json: None, + changed_by_device: Some(session.device_id), + timestamp: Utc::now(), + }; + + state + .storage + .record_sync_change(&entry) + .await + .map_err(|e| ApiError::internal(format!("Failed to record change: {}", e)))?; + + Ok(StatusCode::OK) +} + +/// Cancel an upload session +/// DELETE /api/sync/upload/{id} +pub async fn cancel_upload( + State(state): State, + Path(id): Path, +) -> ApiResult { + let mut session = state + .storage + .get_upload_session(id) + .await + .map_err(|e| ApiError::not_found(format!("Upload session not found: {}", e)))?; + + session.status = UploadStatus::Cancelled; + state + .storage + .update_upload_session(&session) + .await + .map_err(|e| ApiError::internal(format!("Failed to cancel session: {}", e)))?; + + Ok(StatusCode::NO_CONTENT) +} + +/// Download a file for sync (supports Range header) +/// GET /api/sync/download/{*path} +pub async fn download_file( + State(state): State, + Path(path): Path, + headers: HeaderMap, +) -> ApiResult { + let item = state + .storage + .get_media_by_path(FilePath::new(&path)) + .await + .map_err(|e| ApiError::internal(format!("Failed to get media: {}", e)))? + .ok_or_else(|| ApiError::not_found("File not found"))?; + + let file = tokio::fs::File::open(&item.path) + .await + .map_err(|e| ApiError::not_found(format!("File not found: {}", e)))?; + + let metadata = file + .metadata() + .await + .map_err(|e| ApiError::internal(format!("Failed to get metadata: {}", e)))?; + + let file_size = metadata.len(); + + // Check for Range header + if let Some(range_header) = headers.get(header::RANGE) { + if let Ok(range_str) = range_header.to_str() { + if let Some(range) = parse_range_header(range_str, file_size) { + // Partial content response + let (start, end) = range; + let length = end - start + 1; + + let file = tokio::fs::File::open(&item.path) + .await + .map_err(|e| ApiError::internal(format!("Failed to reopen file: {}", e)))?; + + let stream = ReaderStream::new(file); + let body = Body::from_stream(stream); + + return Ok(( + StatusCode::PARTIAL_CONTENT, + [ + (header::CONTENT_TYPE, item.media_type.mime_type()), + (header::CONTENT_LENGTH, length.to_string()), + ( + header::CONTENT_RANGE, + format!("bytes {}-{}/{}", start, end, file_size), + ), + (header::ACCEPT_RANGES, "bytes".to_string()), + ], + body, + ) + .into_response()); + } + } + } + + // Full content response + let stream = ReaderStream::new(file); + let body = Body::from_stream(stream); + + Ok(( + StatusCode::OK, + [ + (header::CONTENT_TYPE, item.media_type.mime_type()), + (header::CONTENT_LENGTH, file_size.to_string()), + (header::ACCEPT_RANGES, "bytes".to_string()), + ], + body, + ) + .into_response()) +} + +/// Parse HTTP Range header +fn parse_range_header(range: &str, file_size: u64) -> Option<(u64, u64)> { + let range = range.strip_prefix("bytes=")?; + let parts: Vec<&str> = range.split('-').collect(); + if parts.len() != 2 { + return None; + } + + let start: u64 = parts[0].parse().ok()?; + let end: u64 = if parts[1].is_empty() { + file_size - 1 + } else { + parts[1].parse().ok()? + }; + + if start > end || end >= file_size { + return None; + } + + Some((start, end)) +} diff --git a/crates/pinakes-server/src/routes/upload.rs b/crates/pinakes-server/src/routes/upload.rs new file mode 100644 index 0000000..56d2279 --- /dev/null +++ b/crates/pinakes-server/src/routes/upload.rs @@ -0,0 +1,169 @@ +use axum::{ + Json, + extract::{Multipart, Path, State}, + http::{StatusCode, header}, + response::IntoResponse, +}; +use tokio_util::io::ReaderStream; +use uuid::Uuid; + +use crate::dto::{ManagedStorageStatsResponse, UploadResponse}; +use crate::error::{ApiError, ApiResult}; +use crate::state::AppState; +use pinakes_core::model::MediaId; +use pinakes_core::upload; + +/// Upload a file to managed storage +/// POST /api/upload +pub async fn upload_file( + State(state): State, + mut multipart: Multipart, +) -> ApiResult> { + let managed_storage = state + .managed_storage + .as_ref() + .ok_or_else(|| ApiError::bad_request("Managed storage is not enabled"))?; + + let config = state.config.read().await; + if !config.managed_storage.enabled { + return Err(ApiError::bad_request("Managed storage is not enabled")); + } + drop(config); + + // Extract file from multipart + let field = multipart + .next_field() + .await + .map_err(|e| ApiError::bad_request(format!("Failed to read multipart field: {}", e)))? + .ok_or_else(|| ApiError::bad_request("No file provided"))?; + + let original_filename = field + .file_name() + .map(|s| s.to_string()) + .unwrap_or_else(|| "unknown".to_string()); + + let content_type = field + .content_type() + .map(|s| s.to_string()) + .unwrap_or_else(|| "application/octet-stream".to_string()); + + let data = field + .bytes() + .await + .map_err(|e| ApiError::bad_request(format!("Failed to read file data: {}", e)))?; + + // Process the upload + let result = upload::process_upload_bytes( + &state.storage, + managed_storage.as_ref(), + &data, + &original_filename, + Some(&content_type), + ) + .await + .map_err(|e| ApiError::internal(format!("Upload failed: {}", e)))?; + + Ok(Json(result.into())) +} + +/// Download a managed file +/// GET /api/media/{id}/download +pub async fn download_file( + State(state): State, + Path(id): Path, +) -> ApiResult { + let media_id = MediaId(id); + let item = state + .storage + .get_media(media_id) + .await + .map_err(|e| ApiError::not_found(format!("Media not found: {}", e)))?; + + let managed_storage = state + .managed_storage + .as_ref() + .ok_or_else(|| ApiError::bad_request("Managed storage is not enabled"))?; + + // Check if this is a managed file + if item.storage_mode != pinakes_core::model::StorageMode::Managed { + // For external files, stream from their original path + let file = tokio::fs::File::open(&item.path) + .await + .map_err(|e| ApiError::not_found(format!("File not found: {}", e)))?; + + let stream = ReaderStream::new(file); + let body = axum::body::Body::from_stream(stream); + + let content_type = item.media_type.mime_type(); + + let filename = item.original_filename.unwrap_or(item.file_name); + + return Ok(( + [ + (header::CONTENT_TYPE, content_type), + ( + header::CONTENT_DISPOSITION, + format!("attachment; filename=\"{}\"", filename), + ), + ], + body, + )); + } + + // For managed files, stream from content-addressable storage + let file = managed_storage + .open(&item.content_hash) + .await + .map_err(|e| ApiError::not_found(format!("Blob not found: {}", e)))?; + + let stream = ReaderStream::new(file); + let body = axum::body::Body::from_stream(stream); + + let content_type = item.media_type.mime_type(); + + let filename = item.original_filename.unwrap_or(item.file_name); + + Ok(( + [ + (header::CONTENT_TYPE, content_type), + ( + header::CONTENT_DISPOSITION, + format!("attachment; filename=\"{}\"", filename), + ), + ], + body, + )) +} + +/// Migrate an external file to managed storage +/// POST /api/media/{id}/move-to-managed +pub async fn move_to_managed( + State(state): State, + Path(id): Path, +) -> ApiResult { + let managed_storage = state + .managed_storage + .as_ref() + .ok_or_else(|| ApiError::bad_request("Managed storage is not enabled"))?; + + let media_id = MediaId(id); + upload::migrate_to_managed(&state.storage, managed_storage.as_ref(), media_id) + .await + .map_err(|e| ApiError::internal(format!("Migration failed: {}", e)))?; + + Ok(StatusCode::NO_CONTENT) +} + +/// Get managed storage statistics +/// GET /api/managed/stats +pub async fn managed_stats( + State(state): State, +) -> ApiResult> { + let stats = state + .storage + .managed_storage_stats() + .await + .map_err(|e| ApiError::internal(format!("Failed to get stats: {}", e)))?; + + Ok(Json(stats.into())) +} diff --git a/crates/pinakes-server/src/state.rs b/crates/pinakes-server/src/state.rs index 0cdfa9e..6d61f04 100644 --- a/crates/pinakes-server/src/state.rs +++ b/crates/pinakes-server/src/state.rs @@ -6,6 +6,7 @@ use tokio::sync::RwLock; use pinakes_core::cache::CacheLayer; use pinakes_core::config::Config; use pinakes_core::jobs::JobQueue; +use pinakes_core::managed_storage::ManagedStorageService; use pinakes_core::plugin::PluginManager; use pinakes_core::scan::ScanProgress; use pinakes_core::scheduler::TaskScheduler; @@ -26,4 +27,5 @@ pub struct AppState { pub scheduler: Arc, pub plugin_manager: Option>, pub transcode_service: Option>, + pub managed_storage: Option>, } diff --git a/crates/pinakes-server/tests/api_test.rs b/crates/pinakes-server/tests/api_test.rs index 2b66343..b3e37b5 100644 --- a/crates/pinakes-server/tests/api_test.rs +++ b/crates/pinakes-server/tests/api_test.rs @@ -11,9 +11,9 @@ use tower::ServiceExt; use pinakes_core::cache::CacheLayer; use pinakes_core::config::{ AccountsConfig, AnalyticsConfig, CloudConfig, Config, DirectoryConfig, EnrichmentConfig, - JobsConfig, PhotoConfig, PluginsConfig, ScanningConfig, ServerConfig, SqliteConfig, - StorageBackendType, StorageConfig, ThumbnailConfig, TlsConfig, TranscodingConfig, UiConfig, - UserAccount, UserRole, WebhookConfig, + JobsConfig, ManagedStorageConfig, PhotoConfig, PluginsConfig, ScanningConfig, ServerConfig, + SharingConfig, SqliteConfig, StorageBackendType, StorageConfig, SyncConfig, ThumbnailConfig, + TlsConfig, TranscodingConfig, UiConfig, UserAccount, UserRole, WebhookConfig, }; use pinakes_core::jobs::JobQueue; use pinakes_core::storage::StorageBackend; @@ -127,6 +127,9 @@ fn default_config() -> Config { cloud: CloudConfig::default(), analytics: AnalyticsConfig::default(), photos: PhotoConfig::default(), + managed_storage: ManagedStorageConfig::default(), + sync: SyncConfig::default(), + sharing: SharingConfig::default(), } } @@ -156,6 +159,7 @@ async fn setup_app() -> axum::Router { scheduler: Arc::new(scheduler), plugin_manager: None, transcode_service: None, + managed_storage: None, }; pinakes_server::app::create_router(state) @@ -227,6 +231,7 @@ async fn setup_app_with_auth() -> (axum::Router, String, String, String) { scheduler: Arc::new(scheduler), plugin_manager: None, transcode_service: None, + managed_storage: None, }; let app = pinakes_server::app::create_router(state); diff --git a/crates/pinakes-server/tests/plugin_test.rs b/crates/pinakes-server/tests/plugin_test.rs index 7970f6d..83fd81c 100644 --- a/crates/pinakes-server/tests/plugin_test.rs +++ b/crates/pinakes-server/tests/plugin_test.rs @@ -11,9 +11,9 @@ use tower::ServiceExt; use pinakes_core::cache::CacheLayer; use pinakes_core::config::{ AccountsConfig, AnalyticsConfig, CloudConfig, Config, DirectoryConfig, EnrichmentConfig, - JobsConfig, PhotoConfig, PluginsConfig, ScanningConfig, ServerConfig, SqliteConfig, - StorageBackendType, StorageConfig, ThumbnailConfig, TlsConfig, TranscodingConfig, UiConfig, - WebhookConfig, + JobsConfig, ManagedStorageConfig, PhotoConfig, PluginsConfig, ScanningConfig, ServerConfig, + SharingConfig, SqliteConfig, StorageBackendType, StorageConfig, SyncConfig, ThumbnailConfig, + TlsConfig, TranscodingConfig, UiConfig, WebhookConfig, }; use pinakes_core::jobs::JobQueue; use pinakes_core::plugin::PluginManager; @@ -93,6 +93,9 @@ async fn setup_app_with_plugins() -> (axum::Router, Arc, tempfile cloud: CloudConfig::default(), analytics: AnalyticsConfig::default(), photos: PhotoConfig::default(), + managed_storage: ManagedStorageConfig::default(), + sync: SyncConfig::default(), + sharing: SharingConfig::default(), }; let job_queue = JobQueue::new(1, |_id, _kind, _cancel, _jobs| tokio::spawn(async {})); @@ -114,6 +117,7 @@ async fn setup_app_with_plugins() -> (axum::Router, Arc, tempfile scheduler: Arc::new(scheduler), plugin_manager: Some(plugin_manager.clone()), transcode_service: None, + managed_storage: None, }; let router = pinakes_server::app::create_router(state); diff --git a/crates/pinakes-ui/src/app.rs b/crates/pinakes-ui/src/app.rs index a0986e3..033d24a 100644 --- a/crates/pinakes-ui/src/app.rs +++ b/crates/pinakes-ui/src/app.rs @@ -1412,7 +1412,6 @@ pub fn App() -> Element { // Check if already importing - if so, add to queue - // Get preview files if available for per-file progress // Use parallel import with per-batch progress @@ -1439,26 +1438,30 @@ pub fn App() -> Element { // Update progress after batch // Extended import state + + + + + + + + + + + + + + + + + + if *import_in_progress.read() { - - - - - - - - - - - - - - import_queue.write().push(file_name); show_toast("Added to import queue".into(), false); return; diff --git a/migrations/postgres/V15__managed_storage.sql b/migrations/postgres/V15__managed_storage.sql new file mode 100644 index 0000000..56ef8f4 --- /dev/null +++ b/migrations/postgres/V15__managed_storage.sql @@ -0,0 +1,30 @@ +-- V15: Managed File Storage +-- Adds server-side content-addressable storage for uploaded files + +-- Add storage mode to media_items (external = file on disk, managed = in content-addressable storage) +ALTER TABLE media_items ADD COLUMN storage_mode TEXT NOT NULL DEFAULT 'external'; + +-- Original filename for managed uploads (preserved separately from file_name which may be normalized) +ALTER TABLE media_items ADD COLUMN original_filename TEXT; + +-- When the file was uploaded to managed storage +ALTER TABLE media_items ADD COLUMN uploaded_at TIMESTAMPTZ; + +-- Storage key for looking up the blob (usually same as content_hash for deduplication) +ALTER TABLE media_items ADD COLUMN storage_key TEXT; + +-- Managed blobs table - tracks deduplicated file storage +CREATE TABLE managed_blobs ( + content_hash TEXT PRIMARY KEY NOT NULL, + file_size BIGINT NOT NULL, + mime_type TEXT NOT NULL, + reference_count INTEGER NOT NULL DEFAULT 1, + stored_at TIMESTAMPTZ NOT NULL, + last_verified TIMESTAMPTZ +); + +-- Index for finding managed media items +CREATE INDEX idx_media_storage_mode ON media_items(storage_mode); + +-- Index for finding orphaned blobs (reference_count = 0) +CREATE INDEX idx_blobs_reference_count ON managed_blobs(reference_count); diff --git a/migrations/postgres/V16__sync_system.sql b/migrations/postgres/V16__sync_system.sql new file mode 100644 index 0000000..5a54b7a --- /dev/null +++ b/migrations/postgres/V16__sync_system.sql @@ -0,0 +1,103 @@ +-- V16: Cross-Device Sync System +-- Adds device registration, change tracking, and chunked upload support + +-- Sync devices table +CREATE TABLE sync_devices ( + id TEXT PRIMARY KEY NOT NULL, + user_id TEXT NOT NULL REFERENCES users(id) ON DELETE CASCADE, + name TEXT NOT NULL, + device_type TEXT NOT NULL, + client_version TEXT NOT NULL, + os_info TEXT, + device_token_hash TEXT NOT NULL UNIQUE, + last_sync_at TIMESTAMPTZ, + last_seen_at TIMESTAMPTZ NOT NULL, + sync_cursor BIGINT DEFAULT 0, + enabled BOOLEAN NOT NULL DEFAULT TRUE, + created_at TIMESTAMPTZ NOT NULL, + updated_at TIMESTAMPTZ NOT NULL +); + +CREATE INDEX idx_sync_devices_user ON sync_devices(user_id); +CREATE INDEX idx_sync_devices_token ON sync_devices(device_token_hash); + +-- Sync log table - tracks all changes for sync +CREATE TABLE sync_log ( + id TEXT PRIMARY KEY NOT NULL, + sequence BIGSERIAL UNIQUE NOT NULL, + change_type TEXT NOT NULL, + media_id TEXT REFERENCES media_items(id) ON DELETE SET NULL, + path TEXT NOT NULL, + content_hash TEXT, + file_size BIGINT, + metadata_json TEXT, + changed_by_device TEXT REFERENCES sync_devices(id) ON DELETE SET NULL, + timestamp TIMESTAMPTZ NOT NULL +); + +CREATE INDEX idx_sync_log_sequence ON sync_log(sequence); +CREATE INDEX idx_sync_log_path ON sync_log(path); +CREATE INDEX idx_sync_log_timestamp ON sync_log(timestamp); + +-- Device sync state - tracks sync status per device per file +CREATE TABLE device_sync_state ( + device_id TEXT NOT NULL REFERENCES sync_devices(id) ON DELETE CASCADE, + path TEXT NOT NULL, + local_hash TEXT, + server_hash TEXT, + local_mtime BIGINT, + server_mtime BIGINT, + sync_status TEXT NOT NULL, + last_synced_at TIMESTAMPTZ, + conflict_info_json TEXT, + PRIMARY KEY (device_id, path) +); + +CREATE INDEX idx_device_sync_status ON device_sync_state(device_id, sync_status); + +-- Upload sessions for chunked uploads +CREATE TABLE upload_sessions ( + id TEXT PRIMARY KEY NOT NULL, + device_id TEXT NOT NULL REFERENCES sync_devices(id) ON DELETE CASCADE, + target_path TEXT NOT NULL, + expected_hash TEXT NOT NULL, + expected_size BIGINT NOT NULL, + chunk_size BIGINT NOT NULL, + chunk_count BIGINT NOT NULL, + status TEXT NOT NULL, + created_at TIMESTAMPTZ NOT NULL, + expires_at TIMESTAMPTZ NOT NULL, + last_activity TIMESTAMPTZ NOT NULL +); + +CREATE INDEX idx_upload_sessions_device ON upload_sessions(device_id); +CREATE INDEX idx_upload_sessions_status ON upload_sessions(status); +CREATE INDEX idx_upload_sessions_expires ON upload_sessions(expires_at); + +-- Upload chunks - tracks received chunks +CREATE TABLE upload_chunks ( + upload_id TEXT NOT NULL REFERENCES upload_sessions(id) ON DELETE CASCADE, + chunk_index BIGINT NOT NULL, + offset BIGINT NOT NULL, + size BIGINT NOT NULL, + hash TEXT NOT NULL, + received_at TIMESTAMPTZ NOT NULL, + PRIMARY KEY (upload_id, chunk_index) +); + +-- Sync conflicts +CREATE TABLE sync_conflicts ( + id TEXT PRIMARY KEY NOT NULL, + device_id TEXT NOT NULL REFERENCES sync_devices(id) ON DELETE CASCADE, + path TEXT NOT NULL, + local_hash TEXT NOT NULL, + local_mtime BIGINT NOT NULL, + server_hash TEXT NOT NULL, + server_mtime BIGINT NOT NULL, + detected_at TIMESTAMPTZ NOT NULL, + resolved_at TIMESTAMPTZ, + resolution TEXT +); + +CREATE INDEX idx_sync_conflicts_device ON sync_conflicts(device_id); +CREATE INDEX idx_sync_conflicts_unresolved ON sync_conflicts(device_id) WHERE resolved_at IS NULL; diff --git a/migrations/postgres/V17__enhanced_sharing.sql b/migrations/postgres/V17__enhanced_sharing.sql new file mode 100644 index 0000000..2107b5c --- /dev/null +++ b/migrations/postgres/V17__enhanced_sharing.sql @@ -0,0 +1,83 @@ +-- V17: Enhanced Sharing System +-- Replaces simple share_links with comprehensive sharing capabilities + +-- Enhanced shares table +CREATE TABLE shares ( + id TEXT PRIMARY KEY NOT NULL, + target_type TEXT NOT NULL CHECK (target_type IN ('media', 'collection', 'tag', 'saved_search')), + target_id TEXT NOT NULL, + owner_id TEXT NOT NULL REFERENCES users(id) ON DELETE CASCADE, + recipient_type TEXT NOT NULL CHECK (recipient_type IN ('public_link', 'user', 'group', 'federated')), + recipient_user_id TEXT REFERENCES users(id) ON DELETE CASCADE, + recipient_group_id TEXT, + recipient_federated_handle TEXT, + recipient_federated_server TEXT, + public_token TEXT UNIQUE, + public_password_hash TEXT, + perm_view BOOLEAN NOT NULL DEFAULT TRUE, + perm_download BOOLEAN NOT NULL DEFAULT FALSE, + perm_edit BOOLEAN NOT NULL DEFAULT FALSE, + perm_delete BOOLEAN NOT NULL DEFAULT FALSE, + perm_reshare BOOLEAN NOT NULL DEFAULT FALSE, + perm_add BOOLEAN NOT NULL DEFAULT FALSE, + note TEXT, + expires_at TIMESTAMPTZ, + access_count BIGINT NOT NULL DEFAULT 0, + last_accessed TIMESTAMPTZ, + inherit_to_children BOOLEAN NOT NULL DEFAULT TRUE, + parent_share_id TEXT REFERENCES shares(id) ON DELETE CASCADE, + created_at TIMESTAMPTZ NOT NULL, + updated_at TIMESTAMPTZ NOT NULL, + UNIQUE(owner_id, target_type, target_id, recipient_type, recipient_user_id) +); + +CREATE INDEX idx_shares_owner ON shares(owner_id); +CREATE INDEX idx_shares_recipient_user ON shares(recipient_user_id); +CREATE INDEX idx_shares_target ON shares(target_type, target_id); +CREATE INDEX idx_shares_token ON shares(public_token); +CREATE INDEX idx_shares_expires ON shares(expires_at); + +-- Share activity log +CREATE TABLE share_activity ( + id TEXT PRIMARY KEY NOT NULL, + share_id TEXT NOT NULL REFERENCES shares(id) ON DELETE CASCADE, + actor_id TEXT REFERENCES users(id) ON DELETE SET NULL, + actor_ip TEXT, + action TEXT NOT NULL, + details TEXT, + timestamp TIMESTAMPTZ NOT NULL +); + +CREATE INDEX idx_share_activity_share ON share_activity(share_id); +CREATE INDEX idx_share_activity_timestamp ON share_activity(timestamp); + +-- Share notifications +CREATE TABLE share_notifications ( + id TEXT PRIMARY KEY NOT NULL, + user_id TEXT NOT NULL REFERENCES users(id) ON DELETE CASCADE, + share_id TEXT NOT NULL REFERENCES shares(id) ON DELETE CASCADE, + notification_type TEXT NOT NULL, + is_read BOOLEAN NOT NULL DEFAULT FALSE, + created_at TIMESTAMPTZ NOT NULL +); + +CREATE INDEX idx_share_notifications_user ON share_notifications(user_id); +CREATE INDEX idx_share_notifications_unread ON share_notifications(user_id) WHERE is_read = FALSE; + +-- Migrate existing share_links to new shares table +DO $$ +BEGIN + IF EXISTS (SELECT 1 FROM information_schema.tables WHERE table_name = 'share_links') THEN + INSERT INTO shares ( + id, target_type, target_id, owner_id, recipient_type, + public_token, public_password_hash, perm_view, perm_download, + access_count, expires_at, created_at, updated_at + ) + SELECT + id, 'media', media_id, created_by, 'public_link', + token, password_hash, TRUE, TRUE, + view_count, expires_at, created_at, created_at + FROM share_links + ON CONFLICT DO NOTHING; + END IF; +END $$; diff --git a/migrations/sqlite/V15__managed_storage.sql b/migrations/sqlite/V15__managed_storage.sql new file mode 100644 index 0000000..b7f2a9d --- /dev/null +++ b/migrations/sqlite/V15__managed_storage.sql @@ -0,0 +1,30 @@ +-- V15: Managed File Storage +-- Adds server-side content-addressable storage for uploaded files + +-- Add storage mode to media_items (external = file on disk, managed = in content-addressable storage) +ALTER TABLE media_items ADD COLUMN storage_mode TEXT NOT NULL DEFAULT 'external'; + +-- Original filename for managed uploads (preserved separately from file_name which may be normalized) +ALTER TABLE media_items ADD COLUMN original_filename TEXT; + +-- When the file was uploaded to managed storage +ALTER TABLE media_items ADD COLUMN uploaded_at TEXT; + +-- Storage key for looking up the blob (usually same as content_hash for deduplication) +ALTER TABLE media_items ADD COLUMN storage_key TEXT; + +-- Managed blobs table - tracks deduplicated file storage +CREATE TABLE managed_blobs ( + content_hash TEXT PRIMARY KEY NOT NULL, + file_size INTEGER NOT NULL, + mime_type TEXT NOT NULL, + reference_count INTEGER NOT NULL DEFAULT 1, + stored_at TEXT NOT NULL, + last_verified TEXT +); + +-- Index for finding managed media items +CREATE INDEX idx_media_storage_mode ON media_items(storage_mode); + +-- Index for finding orphaned blobs (reference_count = 0) +CREATE INDEX idx_blobs_reference_count ON managed_blobs(reference_count); diff --git a/migrations/sqlite/V16__sync_system.sql b/migrations/sqlite/V16__sync_system.sql new file mode 100644 index 0000000..8941500 --- /dev/null +++ b/migrations/sqlite/V16__sync_system.sql @@ -0,0 +1,117 @@ +-- V16: Cross-Device Sync System +-- Adds device registration, change tracking, and chunked upload support + +-- Sync devices table +CREATE TABLE sync_devices ( + id TEXT PRIMARY KEY NOT NULL, + user_id TEXT NOT NULL, + name TEXT NOT NULL, + device_type TEXT NOT NULL, + client_version TEXT NOT NULL, + os_info TEXT, + device_token_hash TEXT NOT NULL UNIQUE, + last_sync_at TEXT, + last_seen_at TEXT NOT NULL, + sync_cursor INTEGER DEFAULT 0, + enabled INTEGER NOT NULL DEFAULT 1, + created_at TEXT NOT NULL, + updated_at TEXT NOT NULL, + FOREIGN KEY (user_id) REFERENCES users(id) ON DELETE CASCADE +); + +CREATE INDEX idx_sync_devices_user ON sync_devices(user_id); +CREATE INDEX idx_sync_devices_token ON sync_devices(device_token_hash); + +-- Sync log table - tracks all changes for sync +CREATE TABLE sync_log ( + id TEXT PRIMARY KEY NOT NULL, + sequence INTEGER NOT NULL UNIQUE, + change_type TEXT NOT NULL, + media_id TEXT, + path TEXT NOT NULL, + content_hash TEXT, + file_size INTEGER, + metadata_json TEXT, + changed_by_device TEXT, + timestamp TEXT NOT NULL, + FOREIGN KEY (media_id) REFERENCES media_items(id) ON DELETE SET NULL, + FOREIGN KEY (changed_by_device) REFERENCES sync_devices(id) ON DELETE SET NULL +); + +CREATE INDEX idx_sync_log_sequence ON sync_log(sequence); +CREATE INDEX idx_sync_log_path ON sync_log(path); +CREATE INDEX idx_sync_log_timestamp ON sync_log(timestamp); + +-- Sequence counter for sync log +CREATE TABLE sync_sequence ( + id INTEGER PRIMARY KEY CHECK (id = 1), + current_value INTEGER NOT NULL DEFAULT 0 +); +INSERT INTO sync_sequence (id, current_value) VALUES (1, 0); + +-- Device sync state - tracks sync status per device per file +CREATE TABLE device_sync_state ( + device_id TEXT NOT NULL, + path TEXT NOT NULL, + local_hash TEXT, + server_hash TEXT, + local_mtime INTEGER, + server_mtime INTEGER, + sync_status TEXT NOT NULL, + last_synced_at TEXT, + conflict_info_json TEXT, + PRIMARY KEY (device_id, path), + FOREIGN KEY (device_id) REFERENCES sync_devices(id) ON DELETE CASCADE +); + +CREATE INDEX idx_device_sync_status ON device_sync_state(device_id, sync_status); + +-- Upload sessions for chunked uploads +CREATE TABLE upload_sessions ( + id TEXT PRIMARY KEY NOT NULL, + device_id TEXT NOT NULL, + target_path TEXT NOT NULL, + expected_hash TEXT NOT NULL, + expected_size INTEGER NOT NULL, + chunk_size INTEGER NOT NULL, + chunk_count INTEGER NOT NULL, + status TEXT NOT NULL, + created_at TEXT NOT NULL, + expires_at TEXT NOT NULL, + last_activity TEXT NOT NULL, + FOREIGN KEY (device_id) REFERENCES sync_devices(id) ON DELETE CASCADE +); + +CREATE INDEX idx_upload_sessions_device ON upload_sessions(device_id); +CREATE INDEX idx_upload_sessions_status ON upload_sessions(status); +CREATE INDEX idx_upload_sessions_expires ON upload_sessions(expires_at); + +-- Upload chunks - tracks received chunks +CREATE TABLE upload_chunks ( + upload_id TEXT NOT NULL, + chunk_index INTEGER NOT NULL, + offset INTEGER NOT NULL, + size INTEGER NOT NULL, + hash TEXT NOT NULL, + received_at TEXT NOT NULL, + PRIMARY KEY (upload_id, chunk_index), + FOREIGN KEY (upload_id) REFERENCES upload_sessions(id) ON DELETE CASCADE +); + +-- Sync conflicts +CREATE TABLE sync_conflicts ( + id TEXT PRIMARY KEY NOT NULL, + device_id TEXT NOT NULL, + path TEXT NOT NULL, + local_hash TEXT NOT NULL, + local_mtime INTEGER NOT NULL, + server_hash TEXT NOT NULL, + server_mtime INTEGER NOT NULL, + detected_at TEXT NOT NULL, + resolved_at TEXT, + resolution TEXT, + FOREIGN KEY (device_id) REFERENCES sync_devices(id) ON DELETE CASCADE +); + +CREATE INDEX idx_sync_conflicts_device ON sync_conflicts(device_id); +CREATE INDEX idx_sync_conflicts_unresolved ON sync_conflicts(device_id, resolved_at) WHERE resolved_at IS NULL; diff --git a/migrations/sqlite/V17__enhanced_sharing.sql b/migrations/sqlite/V17__enhanced_sharing.sql new file mode 100644 index 0000000..1cd17f3 --- /dev/null +++ b/migrations/sqlite/V17__enhanced_sharing.sql @@ -0,0 +1,85 @@ +-- V17: Enhanced Sharing System +-- Replaces simple share_links with comprehensive sharing capabilities + +-- Enhanced shares table +CREATE TABLE shares ( + id TEXT PRIMARY KEY NOT NULL, + target_type TEXT NOT NULL CHECK (target_type IN ('media', 'collection', 'tag', 'saved_search')), + target_id TEXT NOT NULL, + owner_id TEXT NOT NULL, + recipient_type TEXT NOT NULL CHECK (recipient_type IN ('public_link', 'user', 'group', 'federated')), + recipient_user_id TEXT, + recipient_group_id TEXT, + recipient_federated_handle TEXT, + recipient_federated_server TEXT, + public_token TEXT UNIQUE, + public_password_hash TEXT, + perm_view INTEGER NOT NULL DEFAULT 1, + perm_download INTEGER NOT NULL DEFAULT 0, + perm_edit INTEGER NOT NULL DEFAULT 0, + perm_delete INTEGER NOT NULL DEFAULT 0, + perm_reshare INTEGER NOT NULL DEFAULT 0, + perm_add INTEGER NOT NULL DEFAULT 0, + note TEXT, + expires_at TEXT, + access_count INTEGER NOT NULL DEFAULT 0, + last_accessed TEXT, + inherit_to_children INTEGER NOT NULL DEFAULT 1, + parent_share_id TEXT, + created_at TEXT NOT NULL, + updated_at TEXT NOT NULL, + FOREIGN KEY (owner_id) REFERENCES users(id) ON DELETE CASCADE, + FOREIGN KEY (recipient_user_id) REFERENCES users(id) ON DELETE CASCADE, + FOREIGN KEY (parent_share_id) REFERENCES shares(id) ON DELETE CASCADE, + UNIQUE(owner_id, target_type, target_id, recipient_type, recipient_user_id) +); + +CREATE INDEX idx_shares_owner ON shares(owner_id); +CREATE INDEX idx_shares_recipient_user ON shares(recipient_user_id); +CREATE INDEX idx_shares_target ON shares(target_type, target_id); +CREATE INDEX idx_shares_token ON shares(public_token); +CREATE INDEX idx_shares_expires ON shares(expires_at); + +-- Share activity log +CREATE TABLE share_activity ( + id TEXT PRIMARY KEY NOT NULL, + share_id TEXT NOT NULL, + actor_id TEXT, + actor_ip TEXT, + action TEXT NOT NULL, + details TEXT, + timestamp TEXT NOT NULL, + FOREIGN KEY (share_id) REFERENCES shares(id) ON DELETE CASCADE, + FOREIGN KEY (actor_id) REFERENCES users(id) ON DELETE SET NULL +); + +CREATE INDEX idx_share_activity_share ON share_activity(share_id); +CREATE INDEX idx_share_activity_timestamp ON share_activity(timestamp); + +-- Share notifications +CREATE TABLE share_notifications ( + id TEXT PRIMARY KEY NOT NULL, + user_id TEXT NOT NULL, + share_id TEXT NOT NULL, + notification_type TEXT NOT NULL, + is_read INTEGER NOT NULL DEFAULT 0, + created_at TEXT NOT NULL, + FOREIGN KEY (user_id) REFERENCES users(id) ON DELETE CASCADE, + FOREIGN KEY (share_id) REFERENCES shares(id) ON DELETE CASCADE +); + +CREATE INDEX idx_share_notifications_user ON share_notifications(user_id); +CREATE INDEX idx_share_notifications_unread ON share_notifications(user_id, is_read) WHERE is_read = 0; + +-- Migrate existing share_links to new shares table (if share_links exists) +INSERT OR IGNORE INTO shares ( + id, target_type, target_id, owner_id, recipient_type, + public_token, public_password_hash, perm_view, perm_download, + access_count, expires_at, created_at, updated_at +) +SELECT + id, 'media', media_id, created_by, 'public_link', + token, password_hash, 1, 1, + view_count, expires_at, created_at, created_at +FROM share_links +WHERE EXISTS (SELECT 1 FROM sqlite_master WHERE type='table' AND name='share_links');