pinakes/crates/pinakes-core/src/storage/postgres.rs
NotAShelf 27be136e01
pinakes-core: add markdown link storage methods
Signed-off-by: NotAShelf <raf@notashelf.dev>
Change-Id: I5fa9fd045711664e5dcc7f1c12b6ca896a6a6964
2026-02-09 15:49:27 +03:00

6878 lines
240 KiB
Rust

use std::collections::HashMap;
use std::path::PathBuf;
use chrono::Utc;
use deadpool_postgres::{Config as PoolConfig, Pool, Runtime};
use native_tls::TlsConnector;
use postgres_native_tls::MakeTlsConnector;
use tokio_postgres::types::ToSql;
use tokio_postgres::{NoTls, Row};
use uuid::Uuid;
use crate::config::PostgresConfig;
use crate::error::{PinakesError, Result};
use crate::media_type::MediaType;
use crate::model::*;
use crate::search::*;
use crate::storage::StorageBackend;
pub struct PostgresBackend {
pool: Pool,
}
impl PostgresBackend {
pub async fn new(config: &PostgresConfig) -> Result<Self> {
let mut pool_config = PoolConfig::new();
pool_config.host = Some(config.host.clone());
pool_config.port = Some(config.port);
pool_config.dbname = Some(config.database.clone());
pool_config.user = Some(config.username.clone());
pool_config.password = Some(config.password.clone());
if config.tls_enabled {
// Build TLS connector
let mut tls_builder = TlsConnector::builder();
// Load custom CA certificate if provided
if let Some(ref ca_cert_path) = config.tls_ca_cert_path {
let cert_bytes = std::fs::read(ca_cert_path).map_err(|e| {
PinakesError::Config(format!(
"failed to read CA certificate file {}: {e}",
ca_cert_path.display()
))
})?;
let cert = native_tls::Certificate::from_pem(&cert_bytes).map_err(|e| {
PinakesError::Config(format!(
"failed to parse CA certificate {}: {e}",
ca_cert_path.display()
))
})?;
tls_builder.add_root_certificate(cert);
}
// Configure certificate validation
if !config.tls_verify_ca {
tracing::warn!(
"PostgreSQL TLS certificate verification disabled - this is insecure!"
);
tls_builder.danger_accept_invalid_certs(true);
}
let connector = tls_builder.build().map_err(|e| {
PinakesError::Database(format!("failed to build TLS connector: {e}"))
})?;
let tls = MakeTlsConnector::new(connector);
let pool = pool_config
.create_pool(Some(Runtime::Tokio1), tls)
.map_err(|e| {
PinakesError::Database(format!("failed to create connection pool: {e}"))
})?;
// Verify connectivity
let _ = pool.get().await.map_err(|e| {
PinakesError::Database(format!("failed to connect to postgres: {e}"))
})?;
tracing::info!("PostgreSQL connection established with TLS");
Ok(Self { pool })
} else {
tracing::warn!(
"PostgreSQL TLS is disabled - connection is unencrypted. \
Set postgres.tls_enabled = true to enable encryption."
);
let pool = pool_config
.create_pool(Some(Runtime::Tokio1), NoTls)
.map_err(|e| {
PinakesError::Database(format!("failed to create connection pool: {e}"))
})?;
// Verify connectivity
let _ = pool.get().await.map_err(|e| {
PinakesError::Database(format!("failed to connect to postgres: {e}"))
})?;
Ok(Self { pool })
}
}
}
fn media_type_to_string(mt: &MediaType) -> String {
serde_json::to_value(mt)
.ok()
.and_then(|v| v.as_str().map(String::from))
.unwrap_or_else(|| format!("{mt:?}").to_lowercase())
}
fn media_type_from_string(s: &str) -> Result<MediaType> {
serde_json::from_value(serde_json::Value::String(s.to_string()))
.map_err(|_| PinakesError::Database(format!("unknown media type: {s}")))
}
fn audit_action_to_string(action: &AuditAction) -> String {
// AuditAction uses serde rename_all = "snake_case"
serde_json::to_value(action)
.ok()
.and_then(|v| v.as_str().map(String::from))
.unwrap_or_else(|| format!("{action}"))
}
fn audit_action_from_string(s: &str) -> Result<AuditAction> {
serde_json::from_value(serde_json::Value::String(s.to_string()))
.map_err(|_| PinakesError::Database(format!("unknown audit action: {s}")))
}
fn collection_kind_to_string(kind: &CollectionKind) -> String {
serde_json::to_value(kind)
.ok()
.and_then(|v| v.as_str().map(String::from))
.unwrap_or_else(|| format!("{kind:?}").to_lowercase())
}
fn collection_kind_from_string(s: &str) -> Result<CollectionKind> {
serde_json::from_value(serde_json::Value::String(s.to_string()))
.map_err(|_| PinakesError::Database(format!("unknown collection kind: {s}")))
}
fn custom_field_type_to_string(ft: &CustomFieldType) -> String {
serde_json::to_value(ft)
.ok()
.and_then(|v| v.as_str().map(String::from))
.unwrap_or_else(|| format!("{ft:?}").to_lowercase())
}
fn custom_field_type_from_string(s: &str) -> Result<CustomFieldType> {
serde_json::from_value(serde_json::Value::String(s.to_string()))
.map_err(|_| PinakesError::Database(format!("unknown custom field type: {s}")))
}
fn storage_mode_from_string(s: &str) -> StorageMode {
match s {
"managed" => StorageMode::Managed,
_ => StorageMode::External,
}
}
fn row_to_media_item(row: &Row) -> Result<MediaItem> {
let media_type_str: String = row.get("media_type");
let media_type = media_type_from_string(&media_type_str)?;
let storage_mode_str: String = row.get("storage_mode");
let storage_mode = storage_mode_from_string(&storage_mode_str);
Ok(MediaItem {
id: MediaId(row.get("id")),
path: PathBuf::from(row.get::<_, String>("path")),
file_name: row.get("file_name"),
media_type,
content_hash: ContentHash(row.get("content_hash")),
file_size: row.get::<_, i64>("file_size") as u64,
title: row.get("title"),
artist: row.get("artist"),
album: row.get("album"),
genre: row.get("genre"),
year: row.get("year"),
duration_secs: row.get("duration_secs"),
description: row.get("description"),
thumbnail_path: row
.get::<_, Option<String>>("thumbnail_path")
.map(PathBuf::from),
custom_fields: HashMap::new(),
file_mtime: row.get("file_mtime"),
// Photo-specific fields
date_taken: row.get("date_taken"),
latitude: row.get("latitude"),
longitude: row.get("longitude"),
camera_make: row.get("camera_make"),
camera_model: row.get("camera_model"),
rating: row.get("rating"),
perceptual_hash: row.get("perceptual_hash"),
// Managed storage fields
storage_mode,
original_filename: row.get("original_filename"),
uploaded_at: row.get("uploaded_at"),
storage_key: row.get("storage_key"),
created_at: row.get("created_at"),
updated_at: row.get("updated_at"),
// Trash support
deleted_at: row.try_get("deleted_at").ok().flatten(),
// Markdown links extraction timestamp
links_extracted_at: row.try_get("links_extracted_at").ok().flatten(),
})
}
fn row_to_tag(row: &Row) -> Result<Tag> {
Ok(Tag {
id: row.get("id"),
name: row.get("name"),
parent_id: row.get("parent_id"),
created_at: row.get("created_at"),
})
}
fn row_to_collection(row: &Row) -> Result<Collection> {
let kind_str: String = row.get("kind");
let kind = collection_kind_from_string(&kind_str)?;
Ok(Collection {
id: row.get("id"),
name: row.get("name"),
description: row.get("description"),
kind,
filter_query: row.get("filter_query"),
created_at: row.get("created_at"),
updated_at: row.get("updated_at"),
})
}
fn row_to_audit_entry(row: &Row) -> Result<AuditEntry> {
let action_str: String = row.get("action");
let action = audit_action_from_string(&action_str)?;
let media_id: Option<Uuid> = row.get("media_id");
Ok(AuditEntry {
id: row.get("id"),
media_id: media_id.map(MediaId),
action,
details: row.get("details"),
timestamp: row.get("timestamp"),
})
}
/// Recursively builds a tsquery string and collects parameters for a SearchQuery.
///
/// Returns a tuple of:
/// - `sql_fragment`: the WHERE clause fragment (may include $N placeholders)
/// - `params`: boxed parameter values matching the placeholders
/// - `type_filters`: collected TypeFilter values to append as extra WHERE clauses
/// - `tag_filters`: collected TagFilter values to append as extra WHERE clauses
///
/// `param_offset` is the current 1-based parameter index; the function returns
/// the next available offset.
fn build_search_clause(
query: &SearchQuery,
param_offset: &mut i32,
params: &mut Vec<Box<dyn ToSql + Sync + Send>>,
) -> Result<(String, Vec<String>, Vec<String>)> {
let mut type_filters = Vec::new();
let mut tag_filters = Vec::new();
let fragment = build_search_inner(
query,
param_offset,
params,
&mut type_filters,
&mut tag_filters,
)?;
Ok((fragment, type_filters, tag_filters))
}
fn build_search_inner(
query: &SearchQuery,
offset: &mut i32,
params: &mut Vec<Box<dyn ToSql + Sync + Send>>,
type_filters: &mut Vec<String>,
tag_filters: &mut Vec<String>,
) -> Result<String> {
match query {
SearchQuery::FullText(text) => {
if text.is_empty() {
return Ok("TRUE".to_string());
}
// Combine FTS with trigram similarity and ILIKE for comprehensive fuzzy matching
// This allows partial matches like "mus" -> "music"
let idx_fts = *offset;
*offset += 1;
let idx_prefix = *offset;
*offset += 1;
let idx_ilike = *offset;
*offset += 1;
let idx_sim_title = *offset;
*offset += 1;
let idx_sim_artist = *offset;
*offset += 1;
let idx_sim_album = *offset;
*offset += 1;
let idx_sim_filename = *offset;
*offset += 1;
// Sanitize for tsquery prefix matching
let sanitized = text.replace(['&', '|', '!', '(', ')', ':', '*', '\\', '\''], "");
let prefix_query = if sanitized.contains(' ') {
// For multi-word, join with & and add :* to last word
let words: Vec<&str> = sanitized.split_whitespace().collect();
if let Some((last, rest)) = words.split_last() {
let prefix_parts: Vec<String> = rest.iter().map(|w| w.to_string()).collect();
if prefix_parts.is_empty() {
format!("{}:*", last)
} else {
format!("{} & {}:*", prefix_parts.join(" & "), last)
}
} else {
format!("{}:*", sanitized)
}
} else {
format!("{}:*", sanitized)
};
params.push(Box::new(text.clone()));
params.push(Box::new(prefix_query));
params.push(Box::new(format!("%{}%", text)));
params.push(Box::new(text.clone()));
params.push(Box::new(text.clone()));
params.push(Box::new(text.clone()));
params.push(Box::new(text.clone()));
Ok(format!(
"(\
search_vector @@ plainto_tsquery('english', ${idx_fts}) OR \
search_vector @@ to_tsquery('english', ${idx_prefix}) OR \
LOWER(COALESCE(title, '')) LIKE LOWER(${idx_ilike}) OR \
LOWER(COALESCE(file_name, '')) LIKE LOWER(${idx_ilike}) OR \
similarity(COALESCE(title, ''), ${idx_sim_title}) > 0.3 OR \
similarity(COALESCE(artist, ''), ${idx_sim_artist}) > 0.3 OR \
similarity(COALESCE(album, ''), ${idx_sim_album}) > 0.3 OR \
similarity(COALESCE(file_name, ''), ${idx_sim_filename}) > 0.25\
)"
))
}
SearchQuery::Prefix(term) => {
let idx = *offset;
*offset += 1;
// Sanitize by stripping special tsquery characters
let sanitized = term.replace(['&', '|', '!', '(', ')', ':', '*', '\\', '\''], "");
params.push(Box::new(format!("{sanitized}:*")));
Ok(format!("search_vector @@ to_tsquery('english', ${idx})"))
}
SearchQuery::Fuzzy(term) => {
// Use trigram similarity on multiple fields
let idx_title = *offset;
*offset += 1;
let idx_artist = *offset;
*offset += 1;
let idx_album = *offset;
*offset += 1;
let idx_filename = *offset;
*offset += 1;
let idx_ilike = *offset;
*offset += 1;
params.push(Box::new(term.clone()));
params.push(Box::new(term.clone()));
params.push(Box::new(term.clone()));
params.push(Box::new(term.clone()));
params.push(Box::new(format!("%{}%", term)));
Ok(format!(
"(\
similarity(COALESCE(title, ''), ${idx_title}) > 0.3 OR \
similarity(COALESCE(artist, ''), ${idx_artist}) > 0.3 OR \
similarity(COALESCE(album, ''), ${idx_album}) > 0.3 OR \
similarity(COALESCE(file_name, ''), ${idx_filename}) > 0.25 OR \
LOWER(COALESCE(title, '')) LIKE LOWER(${idx_ilike}) OR \
LOWER(COALESCE(file_name, '')) LIKE LOWER(${idx_ilike})\
)"
))
}
SearchQuery::FieldMatch { field, value } => {
let idx = *offset;
*offset += 1;
params.push(Box::new(value.clone()));
let col = match field.as_str() {
"title" => "title",
"artist" => "artist",
"album" => "album",
"genre" => "genre",
"file_name" => "file_name",
"description" => "description",
_ => {
return Err(PinakesError::SearchParse(format!("unknown field: {field}")));
}
};
Ok(format!("LOWER(COALESCE({col}, '')) = LOWER(${idx})"))
}
SearchQuery::TypeFilter(type_val) => {
type_filters.push(type_val.clone());
Ok("TRUE".to_string())
}
SearchQuery::TagFilter(tag_name) => {
tag_filters.push(tag_name.clone());
Ok("TRUE".to_string())
}
SearchQuery::And(children) => {
let mut parts = Vec::new();
for child in children {
let frag = build_search_inner(child, offset, params, type_filters, tag_filters)?;
parts.push(frag);
}
if parts.is_empty() {
Ok("TRUE".to_string())
} else {
Ok(format!("({})", parts.join(" AND ")))
}
}
SearchQuery::Or(children) => {
let mut parts = Vec::new();
for child in children {
let frag = build_search_inner(child, offset, params, type_filters, tag_filters)?;
parts.push(frag);
}
if parts.is_empty() {
Ok("TRUE".to_string())
} else {
Ok(format!("({})", parts.join(" OR ")))
}
}
SearchQuery::Not(inner) => {
let frag = build_search_inner(inner, offset, params, type_filters, tag_filters)?;
Ok(format!("NOT ({frag})"))
}
SearchQuery::RangeQuery { field, start, end } => {
let col = match field.as_str() {
"year" => "year",
"size" | "file_size" => "file_size",
"duration" => "duration_secs",
_ => return Ok("TRUE".to_string()), // Unknown field, ignore
};
match (start, end) {
(Some(s), Some(e)) => {
let idx_start = *offset;
*offset += 1;
let idx_end = *offset;
*offset += 1;
params.push(Box::new(*s));
params.push(Box::new(*e));
Ok(format!("({col} >= ${idx_start} AND {col} <= ${idx_end})"))
}
(Some(s), None) => {
let idx = *offset;
*offset += 1;
params.push(Box::new(*s));
Ok(format!("{col} >= ${idx}"))
}
(None, Some(e)) => {
let idx = *offset;
*offset += 1;
params.push(Box::new(*e));
Ok(format!("{col} <= ${idx}"))
}
(None, None) => Ok("TRUE".to_string()),
}
}
SearchQuery::CompareQuery { field, op, value } => {
let col = match field.as_str() {
"year" => "year",
"size" | "file_size" => "file_size",
"duration" => "duration_secs",
_ => return Ok("TRUE".to_string()), // Unknown field, ignore
};
let op_sql = match op {
crate::search::CompareOp::GreaterThan => ">",
crate::search::CompareOp::GreaterOrEqual => ">=",
crate::search::CompareOp::LessThan => "<",
crate::search::CompareOp::LessOrEqual => "<=",
};
let idx = *offset;
*offset += 1;
params.push(Box::new(*value));
Ok(format!("{col} {op_sql} ${idx}"))
}
SearchQuery::DateQuery { field, value } => {
let col = match field.as_str() {
"created" => "created_at",
"modified" | "updated" => "updated_at",
_ => return Ok("TRUE".to_string()),
};
Ok(date_value_to_postgres_expr(col, value))
}
}
}
/// Convert a DateValue to a PostgreSQL datetime comparison expression
fn date_value_to_postgres_expr(col: &str, value: &crate::search::DateValue) -> String {
use crate::search::DateValue;
match value {
DateValue::Today => format!("{col}::date = CURRENT_DATE"),
DateValue::Yesterday => format!("{col}::date = CURRENT_DATE - INTERVAL '1 day'"),
DateValue::ThisWeek => format!("{col} >= date_trunc('week', CURRENT_DATE)"),
DateValue::LastWeek => format!(
"{col} >= date_trunc('week', CURRENT_DATE) - INTERVAL '7 days' AND {col} < date_trunc('week', CURRENT_DATE)"
),
DateValue::ThisMonth => format!("{col} >= date_trunc('month', CURRENT_DATE)"),
DateValue::LastMonth => format!(
"{col} >= date_trunc('month', CURRENT_DATE) - INTERVAL '1 month' AND {col} < date_trunc('month', CURRENT_DATE)"
),
DateValue::ThisYear => format!("{col} >= date_trunc('year', CURRENT_DATE)"),
DateValue::LastYear => format!(
"{col} >= date_trunc('year', CURRENT_DATE) - INTERVAL '1 year' AND {col} < date_trunc('year', CURRENT_DATE)"
),
DateValue::DaysAgo(days) => format!("{col} >= CURRENT_DATE - INTERVAL '{days} days'"),
}
}
fn sort_order_clause(sort: &SortOrder) -> &'static str {
match sort {
SortOrder::Relevance => "created_at DESC", // fallback when no FTS
SortOrder::DateAsc => "created_at ASC",
SortOrder::DateDesc => "created_at DESC",
SortOrder::NameAsc => "file_name ASC",
SortOrder::NameDesc => "file_name DESC",
SortOrder::SizeAsc => "file_size ASC",
SortOrder::SizeDesc => "file_size DESC",
}
}
/// Returns a relevance-aware ORDER BY when there's an active FTS query.
fn sort_order_clause_with_rank(sort: &SortOrder, has_fts: bool) -> String {
match sort {
SortOrder::Relevance if has_fts => "ts_rank(search_vector, query) DESC".to_string(),
_ => sort_order_clause(sort).to_string(),
}
}
#[async_trait::async_trait]
impl StorageBackend for PostgresBackend {
async fn run_migrations(&self) -> Result<()> {
let mut obj = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
// deadpool_postgres::Object derefs to tokio_postgres::Client,
// but refinery needs &mut Client. We can get the inner client.
let client: &mut tokio_postgres::Client = obj.as_mut();
crate::storage::migrations::run_postgres_migrations(client).await
}
// ---- Root directories ----
async fn add_root_dir(&self, path: PathBuf) -> Result<()> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
client
.execute(
"INSERT INTO root_dirs (path) VALUES ($1) ON CONFLICT (path) DO NOTHING",
&[&path.to_string_lossy().as_ref()],
)
.await?;
Ok(())
}
async fn list_root_dirs(&self) -> Result<Vec<PathBuf>> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
let rows = client
.query("SELECT path FROM root_dirs ORDER BY path", &[])
.await?;
Ok(rows
.iter()
.map(|r| PathBuf::from(r.get::<_, String>(0)))
.collect())
}
async fn remove_root_dir(&self, path: &std::path::Path) -> Result<()> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
client
.execute(
"DELETE FROM root_dirs WHERE path = $1",
&[&path.to_string_lossy().as_ref()],
)
.await?;
Ok(())
}
// ---- Media CRUD ----
async fn insert_media(&self, item: &MediaItem) -> Result<()> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
let media_type_str = media_type_to_string(&item.media_type);
let path_str = item.path.to_string_lossy().to_string();
let file_size = item.file_size as i64;
client
.execute(
"INSERT INTO media_items (
id, path, file_name, media_type, content_hash, file_size,
title, artist, album, genre, year, duration_secs, description,
thumbnail_path, date_taken, latitude, longitude, camera_make,
camera_model, rating, perceptual_hash, created_at, updated_at
) VALUES (
$1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, $19, $20, $21, $22, $23
)",
&[
&item.id.0,
&path_str,
&item.file_name,
&media_type_str,
&item.content_hash.0,
&file_size,
&item.title,
&item.artist,
&item.album,
&item.genre,
&item.year,
&item.duration_secs,
&item.description,
&item
.thumbnail_path
.as_ref()
.map(|p| p.to_string_lossy().to_string()),
&item.date_taken,
&item.latitude,
&item.longitude,
&item.camera_make,
&item.camera_model,
&item.rating,
&item.perceptual_hash,
&item.created_at,
&item.updated_at,
],
)
.await?;
// Insert custom fields
for (name, field) in &item.custom_fields {
let ft = custom_field_type_to_string(&field.field_type);
client
.execute(
"INSERT INTO custom_fields (media_id, field_name, field_type, field_value)
VALUES ($1, $2, $3, $4)
ON CONFLICT (media_id, field_name) DO UPDATE
SET field_type = EXCLUDED.field_type, field_value = EXCLUDED.field_value",
&[&item.id.0, &name, &ft, &field.value],
)
.await?;
}
Ok(())
}
async fn count_media(&self) -> Result<u64> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(e.to_string()))?;
let row = client
.query_one(
"SELECT COUNT(*) FROM media_items WHERE deleted_at IS NULL",
&[],
)
.await?;
let count: i64 = row.get(0);
Ok(count as u64)
}
async fn get_media(&self, id: MediaId) -> Result<MediaItem> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
let row = client
.query_opt(
"SELECT id, path, file_name, media_type, content_hash, file_size,
title, artist, album, genre, year, duration_secs, description,
thumbnail_path, file_mtime, date_taken, latitude, longitude,
camera_make, camera_model, rating, perceptual_hash, created_at, updated_at
FROM media_items WHERE id = $1",
&[&id.0],
)
.await?
.ok_or_else(|| PinakesError::NotFound(format!("media item {id}")))?;
let mut item = row_to_media_item(&row)?;
item.custom_fields = self.get_custom_fields(id).await?;
Ok(item)
}
async fn get_media_by_hash(&self, hash: &ContentHash) -> Result<Option<MediaItem>> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
let row = client
.query_opt(
"SELECT id, path, file_name, media_type, content_hash, file_size,
title, artist, album, genre, year, duration_secs, description,
thumbnail_path, file_mtime, date_taken, latitude, longitude,
camera_make, camera_model, rating, perceptual_hash, created_at, updated_at
FROM media_items WHERE content_hash = $1",
&[&hash.0],
)
.await?;
match row {
Some(r) => {
let mut item = row_to_media_item(&r)?;
item.custom_fields = self.get_custom_fields(item.id).await?;
Ok(Some(item))
}
None => Ok(None),
}
}
async fn get_media_by_path(&self, path: &std::path::Path) -> Result<Option<MediaItem>> {
let path_str = path.to_string_lossy().to_string();
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
let row = client
.query_opt(
"SELECT id, path, file_name, media_type, content_hash, file_size,
title, artist, album, genre, year, duration_secs, description,
thumbnail_path, file_mtime, date_taken, latitude, longitude,
camera_make, camera_model, rating, perceptual_hash, created_at, updated_at
FROM media_items WHERE path = $1",
&[&path_str],
)
.await?;
match row {
Some(r) => {
let mut item = row_to_media_item(&r)?;
item.custom_fields = self.get_custom_fields(item.id).await?;
Ok(Some(item))
}
None => Ok(None),
}
}
async fn list_media(&self, pagination: &Pagination) -> Result<Vec<MediaItem>> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
let order_by = match pagination.sort.as_deref() {
Some("created_at_asc") => "created_at ASC",
Some("name_asc") => "file_name ASC",
Some("name_desc") => "file_name DESC",
Some("size_asc") => "file_size ASC",
Some("size_desc") => "file_size DESC",
Some("type_asc") => "media_type ASC",
Some("type_desc") => "media_type DESC",
// "created_at_desc" or any unrecognized value falls back to default
_ => "created_at DESC",
};
let sql = format!(
"SELECT id, path, file_name, media_type, content_hash, file_size,
title, artist, album, genre, year, duration_secs, description,
thumbnail_path, file_mtime, date_taken, latitude, longitude,
camera_make, camera_model, rating, perceptual_hash,
storage_mode, original_filename, uploaded_at, storage_key,
created_at, updated_at, deleted_at
FROM media_items
WHERE deleted_at IS NULL
ORDER BY {order_by}
LIMIT $1 OFFSET $2"
);
let rows = client
.query(
&sql,
&[&(pagination.limit as i64), &(pagination.offset as i64)],
)
.await?;
let mut items = Vec::with_capacity(rows.len());
for row in &rows {
let item = row_to_media_item(row)?;
items.push(item);
}
// Batch-load custom fields for all items
if !items.is_empty() {
let ids: Vec<Uuid> = items.iter().map(|i| i.id.0).collect();
let cf_rows = client
.query(
"SELECT media_id, field_name, field_type, field_value
FROM custom_fields WHERE media_id = ANY($1)",
&[&ids],
)
.await?;
let mut cf_map: HashMap<Uuid, HashMap<String, CustomField>> = HashMap::new();
for row in &cf_rows {
let mid: Uuid = row.get("media_id");
let name: String = row.get("field_name");
let ft_str: String = row.get("field_type");
let value: String = row.get("field_value");
let field_type = custom_field_type_from_string(&ft_str)?;
cf_map
.entry(mid)
.or_default()
.insert(name, CustomField { field_type, value });
}
for item in &mut items {
if let Some(fields) = cf_map.remove(&item.id.0) {
item.custom_fields = fields;
}
}
}
Ok(items)
}
async fn update_media(&self, item: &MediaItem) -> Result<()> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
let media_type_str = media_type_to_string(&item.media_type);
let path_str = item.path.to_string_lossy().to_string();
let file_size = item.file_size as i64;
let rows_affected = client
.execute(
"UPDATE media_items SET
path = $2, file_name = $3, media_type = $4, content_hash = $5,
file_size = $6, title = $7, artist = $8, album = $9, genre = $10,
year = $11, duration_secs = $12, description = $13,
thumbnail_path = $14, date_taken = $15, latitude = $16, longitude = $17,
camera_make = $18, camera_model = $19, rating = $20, perceptual_hash = $21, updated_at = $22
WHERE id = $1",
&[
&item.id.0,
&path_str,
&item.file_name,
&media_type_str,
&item.content_hash.0,
&file_size,
&item.title,
&item.artist,
&item.album,
&item.genre,
&item.year,
&item.duration_secs,
&item.description,
&item
.thumbnail_path
.as_ref()
.map(|p| p.to_string_lossy().to_string()),
&item.date_taken,
&item.latitude,
&item.longitude,
&item.camera_make,
&item.camera_model,
&item.rating,
&item.perceptual_hash,
&item.updated_at,
],
)
.await?;
if rows_affected == 0 {
return Err(PinakesError::NotFound(format!("media item {}", item.id)));
}
// Replace custom fields: delete all then re-insert
client
.execute(
"DELETE FROM custom_fields WHERE media_id = $1",
&[&item.id.0],
)
.await?;
for (name, field) in &item.custom_fields {
let ft = custom_field_type_to_string(&field.field_type);
client
.execute(
"INSERT INTO custom_fields (media_id, field_name, field_type, field_value)
VALUES ($1, $2, $3, $4)",
&[&item.id.0, &name, &ft, &field.value],
)
.await?;
}
Ok(())
}
async fn delete_media(&self, id: MediaId) -> Result<()> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
let rows_affected = client
.execute("DELETE FROM media_items WHERE id = $1", &[&id.0])
.await?;
if rows_affected == 0 {
return Err(PinakesError::NotFound(format!("media item {id}")));
}
Ok(())
}
async fn delete_all_media(&self) -> Result<u64> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
let count: i64 = client
.query_one("SELECT COUNT(*) FROM media_items", &[])
.await?
.get(0);
client.execute("DELETE FROM media_items", &[]).await?;
Ok(count as u64)
}
// ---- Batch Operations ----
async fn batch_delete_media(&self, ids: &[MediaId]) -> Result<u64> {
if ids.is_empty() {
return Ok(0);
}
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
// Use ANY with array for efficient batch delete
let uuids: Vec<Uuid> = ids.iter().map(|id| id.0).collect();
let rows = client
.execute("DELETE FROM media_items WHERE id = ANY($1)", &[&uuids])
.await?;
Ok(rows)
}
async fn batch_tag_media(&self, media_ids: &[MediaId], tag_ids: &[Uuid]) -> Result<u64> {
if media_ids.is_empty() || tag_ids.is_empty() {
return Ok(0);
}
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
// Use UNNEST for efficient batch insert
let mut media_uuids = Vec::new();
let mut tag_uuids = Vec::new();
for mid in media_ids {
for tid in tag_ids {
media_uuids.push(mid.0);
tag_uuids.push(*tid);
}
}
let rows = client
.execute(
"INSERT INTO media_tags (media_id, tag_id)
SELECT * FROM UNNEST($1::uuid[], $2::uuid[])
ON CONFLICT DO NOTHING",
&[&media_uuids, &tag_uuids],
)
.await?;
Ok(rows)
}
// ---- Tags ----
async fn create_tag(&self, name: &str, parent_id: Option<Uuid>) -> Result<Tag> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
let id = Uuid::now_v7();
let now = Utc::now();
client
.execute(
"INSERT INTO tags (id, name, parent_id, created_at) VALUES ($1, $2, $3, $4)",
&[&id, &name, &parent_id, &now],
)
.await?;
Ok(Tag {
id,
name: name.to_string(),
parent_id,
created_at: now,
})
}
async fn get_tag(&self, id: Uuid) -> Result<Tag> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
let row = client
.query_opt(
"SELECT id, name, parent_id, created_at FROM tags WHERE id = $1",
&[&id],
)
.await?
.ok_or_else(|| PinakesError::TagNotFound(id.to_string()))?;
row_to_tag(&row)
}
async fn list_tags(&self) -> Result<Vec<Tag>> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
let rows = client
.query(
"SELECT id, name, parent_id, created_at FROM tags ORDER BY name",
&[],
)
.await?;
rows.iter().map(row_to_tag).collect()
}
async fn delete_tag(&self, id: Uuid) -> Result<()> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
let rows_affected = client
.execute("DELETE FROM tags WHERE id = $1", &[&id])
.await?;
if rows_affected == 0 {
return Err(PinakesError::TagNotFound(id.to_string()));
}
Ok(())
}
async fn tag_media(&self, media_id: MediaId, tag_id: Uuid) -> Result<()> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
client
.execute(
"INSERT INTO media_tags (media_id, tag_id) VALUES ($1, $2)
ON CONFLICT (media_id, tag_id) DO NOTHING",
&[&media_id.0, &tag_id],
)
.await?;
Ok(())
}
async fn untag_media(&self, media_id: MediaId, tag_id: Uuid) -> Result<()> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
client
.execute(
"DELETE FROM media_tags WHERE media_id = $1 AND tag_id = $2",
&[&media_id.0, &tag_id],
)
.await?;
Ok(())
}
async fn get_media_tags(&self, media_id: MediaId) -> Result<Vec<Tag>> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
let rows = client
.query(
"SELECT t.id, t.name, t.parent_id, t.created_at
FROM tags t
JOIN media_tags mt ON mt.tag_id = t.id
WHERE mt.media_id = $1
ORDER BY t.name",
&[&media_id.0],
)
.await?;
rows.iter().map(row_to_tag).collect()
}
async fn get_tag_descendants(&self, tag_id: Uuid) -> Result<Vec<Tag>> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
let rows = client
.query(
"WITH RECURSIVE descendants AS (
SELECT id, name, parent_id, created_at
FROM tags
WHERE parent_id = $1
UNION ALL
SELECT t.id, t.name, t.parent_id, t.created_at
FROM tags t
JOIN descendants d ON t.parent_id = d.id
)
SELECT id, name, parent_id, created_at FROM descendants ORDER BY name",
&[&tag_id],
)
.await?;
rows.iter().map(row_to_tag).collect()
}
// ---- Collections ----
async fn create_collection(
&self,
name: &str,
kind: CollectionKind,
description: Option<&str>,
filter_query: Option<&str>,
) -> Result<Collection> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
let id = Uuid::now_v7();
let now = Utc::now();
let kind_str = collection_kind_to_string(&kind);
client
.execute(
"INSERT INTO collections (id, name, description, kind, filter_query, created_at, updated_at)
VALUES ($1, $2, $3, $4, $5, $6, $7)",
&[
&id,
&name,
&description,
&kind_str,
&filter_query,
&now,
&now,
],
)
.await?;
Ok(Collection {
id,
name: name.to_string(),
description: description.map(String::from),
kind,
filter_query: filter_query.map(String::from),
created_at: now,
updated_at: now,
})
}
async fn get_collection(&self, id: Uuid) -> Result<Collection> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
let row = client
.query_opt(
"SELECT id, name, description, kind, filter_query, created_at, updated_at
FROM collections WHERE id = $1",
&[&id],
)
.await?
.ok_or_else(|| PinakesError::CollectionNotFound(id.to_string()))?;
row_to_collection(&row)
}
async fn list_collections(&self) -> Result<Vec<Collection>> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
let rows = client
.query(
"SELECT id, name, description, kind, filter_query, created_at, updated_at
FROM collections ORDER BY name",
&[],
)
.await?;
rows.iter().map(row_to_collection).collect()
}
async fn delete_collection(&self, id: Uuid) -> Result<()> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
let rows_affected = client
.execute("DELETE FROM collections WHERE id = $1", &[&id])
.await?;
if rows_affected == 0 {
return Err(PinakesError::CollectionNotFound(id.to_string()));
}
Ok(())
}
async fn add_to_collection(
&self,
collection_id: Uuid,
media_id: MediaId,
position: i32,
) -> Result<()> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
let now = Utc::now();
client
.execute(
"INSERT INTO collection_members (collection_id, media_id, position, added_at)
VALUES ($1, $2, $3, $4)
ON CONFLICT (collection_id, media_id) DO UPDATE SET position = EXCLUDED.position",
&[&collection_id, &media_id.0, &position, &now],
)
.await?;
// Update the collection's updated_at timestamp
client
.execute(
"UPDATE collections SET updated_at = $2 WHERE id = $1",
&[&collection_id, &now],
)
.await?;
Ok(())
}
async fn remove_from_collection(&self, collection_id: Uuid, media_id: MediaId) -> Result<()> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
client
.execute(
"DELETE FROM collection_members WHERE collection_id = $1 AND media_id = $2",
&[&collection_id, &media_id.0],
)
.await?;
let now = Utc::now();
client
.execute(
"UPDATE collections SET updated_at = $2 WHERE id = $1",
&[&collection_id, &now],
)
.await?;
Ok(())
}
async fn get_collection_members(&self, collection_id: Uuid) -> Result<Vec<MediaItem>> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
let rows = client
.query(
"SELECT m.id, m.path, m.file_name, m.media_type, m.content_hash, m.file_size,
m.title, m.artist, m.album, m.genre, m.year, m.duration_secs,
m.description, m.thumbnail_path, m.created_at, m.updated_at
FROM media_items m
JOIN collection_members cm ON cm.media_id = m.id
WHERE cm.collection_id = $1
ORDER BY cm.position ASC",
&[&collection_id],
)
.await?;
let mut items = Vec::with_capacity(rows.len());
for row in &rows {
items.push(row_to_media_item(row)?);
}
// Batch-load custom fields
if !items.is_empty() {
let ids: Vec<Uuid> = items.iter().map(|i| i.id.0).collect();
let cf_rows = client
.query(
"SELECT media_id, field_name, field_type, field_value
FROM custom_fields WHERE media_id = ANY($1)",
&[&ids],
)
.await?;
let mut cf_map: HashMap<Uuid, HashMap<String, CustomField>> = HashMap::new();
for row in &cf_rows {
let mid: Uuid = row.get("media_id");
let name: String = row.get("field_name");
let ft_str: String = row.get("field_type");
let value: String = row.get("field_value");
let field_type = custom_field_type_from_string(&ft_str)?;
cf_map
.entry(mid)
.or_default()
.insert(name, CustomField { field_type, value });
}
for item in &mut items {
if let Some(fields) = cf_map.remove(&item.id.0) {
item.custom_fields = fields;
}
}
}
Ok(items)
}
// ---- Search ----
async fn search(&self, request: &SearchRequest) -> Result<SearchResults> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
let mut param_offset: i32 = 1;
let mut params: Vec<Box<dyn ToSql + Sync + Send>> = Vec::new();
let (where_clause, type_filters, tag_filters) =
build_search_clause(&request.query, &mut param_offset, &mut params)?;
// Detect whether we have an FTS condition (for rank-based sorting)
let has_fts = query_has_fts(&request.query);
// Build additional WHERE conditions for type and tag filters
let mut extra_where = Vec::new();
for tf in &type_filters {
let idx = param_offset;
param_offset += 1;
params.push(Box::new(tf.clone()));
extra_where.push(format!("m.media_type = ${idx}"));
}
for tg in &tag_filters {
let idx = param_offset;
param_offset += 1;
params.push(Box::new(tg.clone()));
extra_where.push(format!(
"EXISTS (SELECT 1 FROM media_tags mt JOIN tags t ON mt.tag_id = t.id WHERE mt.media_id = m.id AND t.name = ${idx})"
));
}
let full_where = if extra_where.is_empty() {
where_clause.clone()
} else {
format!("{where_clause} AND {}", extra_where.join(" AND "))
};
let order_by = sort_order_clause_with_rank(&request.sort, has_fts);
// For relevance sorting with FTS, we need a CTE or subquery to define 'query'
let (count_sql, select_sql) = if has_fts && request.sort == SortOrder::Relevance {
// Extract the FTS query parameter for ts_rank
// We wrap the query in a CTE that exposes the tsquery
let fts_param_idx = find_first_fts_param(&request.query);
let count = format!("SELECT COUNT(*) FROM media_items m WHERE {full_where}");
let select = format!(
"SELECT m.id, m.path, m.file_name, m.media_type, m.content_hash, m.file_size,
m.title, m.artist, m.album, m.genre, m.year, m.duration_secs,
m.description, m.thumbnail_path, m.file_mtime, m.date_taken, m.latitude,
m.longitude, m.camera_make, m.camera_model, m.rating, m.perceptual_hash,
m.created_at, m.updated_at,
ts_rank(m.search_vector, plainto_tsquery('english', ${fts_param_idx})) AS rank
FROM media_items m
WHERE {full_where}
ORDER BY rank DESC
LIMIT ${} OFFSET ${}",
param_offset,
param_offset + 1
);
(count, select)
} else {
let count = format!("SELECT COUNT(*) FROM media_items m WHERE {full_where}");
let select = format!(
"SELECT m.id, m.path, m.file_name, m.media_type, m.content_hash, m.file_size,
m.title, m.artist, m.album, m.genre, m.year, m.duration_secs,
m.description, m.thumbnail_path, m.file_mtime, m.date_taken, m.latitude,
m.longitude, m.camera_make, m.camera_model, m.rating, m.perceptual_hash,
m.created_at, m.updated_at
FROM media_items m
WHERE {full_where}
ORDER BY {order_by}
LIMIT ${} OFFSET ${}",
param_offset,
param_offset + 1
);
(count, select)
};
// Count query uses the current params (without limit/offset)
let count_params: Vec<&(dyn ToSql + Sync)> = params
.iter()
.map(|p| p.as_ref() as &(dyn ToSql + Sync))
.collect();
let count_row = client.query_one(&count_sql, &count_params).await?;
let total_count: i64 = count_row.get(0);
// Add pagination params
params.push(Box::new(request.pagination.limit as i64));
params.push(Box::new(request.pagination.offset as i64));
let select_params: Vec<&(dyn ToSql + Sync)> = params
.iter()
.map(|p| p.as_ref() as &(dyn ToSql + Sync))
.collect();
let rows = client.query(&select_sql, &select_params).await?;
let mut items = Vec::with_capacity(rows.len());
for row in &rows {
items.push(row_to_media_item(row)?);
}
// Batch-load custom fields
if !items.is_empty() {
let ids: Vec<Uuid> = items.iter().map(|i| i.id.0).collect();
let cf_rows = client
.query(
"SELECT media_id, field_name, field_type, field_value
FROM custom_fields WHERE media_id = ANY($1)",
&[&ids],
)
.await?;
let mut cf_map: HashMap<Uuid, HashMap<String, CustomField>> = HashMap::new();
for row in &cf_rows {
let mid: Uuid = row.get("media_id");
let name: String = row.get("field_name");
let ft_str: String = row.get("field_type");
let value: String = row.get("field_value");
let field_type = custom_field_type_from_string(&ft_str)?;
cf_map
.entry(mid)
.or_default()
.insert(name, CustomField { field_type, value });
}
for item in &mut items {
if let Some(fields) = cf_map.remove(&item.id.0) {
item.custom_fields = fields;
}
}
}
Ok(SearchResults {
items,
total_count: total_count as u64,
})
}
// ---- Audit ----
async fn record_audit(&self, entry: &AuditEntry) -> Result<()> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
let action_str = audit_action_to_string(&entry.action);
let media_id = entry.media_id.map(|m| m.0);
client
.execute(
"INSERT INTO audit_log (id, media_id, action, details, timestamp)
VALUES ($1, $2, $3, $4, $5)",
&[
&entry.id,
&media_id,
&action_str,
&entry.details,
&entry.timestamp,
],
)
.await?;
Ok(())
}
async fn list_audit_entries(
&self,
media_id: Option<MediaId>,
pagination: &Pagination,
) -> Result<Vec<AuditEntry>> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
let rows = match media_id {
Some(mid) => {
client
.query(
"SELECT id, media_id, action, details, timestamp
FROM audit_log
WHERE media_id = $1
ORDER BY timestamp DESC
LIMIT $2 OFFSET $3",
&[
&mid.0,
&(pagination.limit as i64),
&(pagination.offset as i64),
],
)
.await?
}
None => {
client
.query(
"SELECT id, media_id, action, details, timestamp
FROM audit_log
ORDER BY timestamp DESC
LIMIT $1 OFFSET $2",
&[&(pagination.limit as i64), &(pagination.offset as i64)],
)
.await?
}
};
rows.iter().map(row_to_audit_entry).collect()
}
// ---- Custom fields ----
async fn set_custom_field(
&self,
media_id: MediaId,
name: &str,
field: &CustomField,
) -> Result<()> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
let ft = custom_field_type_to_string(&field.field_type);
client
.execute(
"INSERT INTO custom_fields (media_id, field_name, field_type, field_value)
VALUES ($1, $2, $3, $4)
ON CONFLICT (media_id, field_name) DO UPDATE
SET field_type = EXCLUDED.field_type, field_value = EXCLUDED.field_value",
&[&media_id.0, &name, &ft, &field.value],
)
.await?;
Ok(())
}
async fn get_custom_fields(&self, media_id: MediaId) -> Result<HashMap<String, CustomField>> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
let rows = client
.query(
"SELECT field_name, field_type, field_value
FROM custom_fields WHERE media_id = $1",
&[&media_id.0],
)
.await?;
let mut map = HashMap::new();
for row in &rows {
let name: String = row.get("field_name");
let ft_str: String = row.get("field_type");
let value: String = row.get("field_value");
let field_type = custom_field_type_from_string(&ft_str)?;
map.insert(name, CustomField { field_type, value });
}
Ok(map)
}
async fn delete_custom_field(&self, media_id: MediaId, name: &str) -> Result<()> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
client
.execute(
"DELETE FROM custom_fields WHERE media_id = $1 AND field_name = $2",
&[&media_id.0, &name],
)
.await?;
Ok(())
}
// ---- Duplicates ----
async fn find_duplicates(&self) -> Result<Vec<Vec<MediaItem>>> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
let rows = client
.query(
"SELECT * FROM media_items WHERE content_hash IN (
SELECT content_hash FROM media_items GROUP BY content_hash HAVING COUNT(*) > 1
) ORDER BY content_hash, created_at",
&[],
)
.await?;
let mut items = Vec::with_capacity(rows.len());
for row in &rows {
items.push(row_to_media_item(row)?);
}
// Batch-load custom fields
if !items.is_empty() {
let ids: Vec<Uuid> = items.iter().map(|i| i.id.0).collect();
let cf_rows = client
.query(
"SELECT media_id, field_name, field_type, field_value
FROM custom_fields WHERE media_id = ANY($1)",
&[&ids],
)
.await?;
let mut cf_map: HashMap<Uuid, HashMap<String, CustomField>> = HashMap::new();
for row in &cf_rows {
let mid: Uuid = row.get("media_id");
let name: String = row.get("field_name");
let ft_str: String = row.get("field_type");
let value: String = row.get("field_value");
let field_type = custom_field_type_from_string(&ft_str)?;
cf_map
.entry(mid)
.or_default()
.insert(name, CustomField { field_type, value });
}
for item in &mut items {
if let Some(fields) = cf_map.remove(&item.id.0) {
item.custom_fields = fields;
}
}
}
// Group by content_hash
let mut groups: Vec<Vec<MediaItem>> = Vec::new();
let mut current_hash = String::new();
for item in items {
if item.content_hash.0 != current_hash {
current_hash = item.content_hash.0.clone();
groups.push(Vec::new());
}
if let Some(group) = groups.last_mut() {
group.push(item);
}
}
Ok(groups)
}
async fn find_perceptual_duplicates(&self, threshold: u32) -> Result<Vec<Vec<MediaItem>>> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
// Get all images with perceptual hashes
let rows = client
.query(
"SELECT id, path, file_name, media_type, content_hash, file_size,
title, artist, album, genre, year, duration_secs, description,
thumbnail_path, file_mtime, date_taken, latitude, longitude,
camera_make, camera_model, rating, perceptual_hash, created_at, updated_at
FROM media_items WHERE perceptual_hash IS NOT NULL ORDER BY id",
&[],
)
.await?;
let mut items = Vec::with_capacity(rows.len());
for row in &rows {
items.push(row_to_media_item(row)?);
}
// Batch-load custom fields
if !items.is_empty() {
let ids: Vec<Uuid> = items.iter().map(|i| i.id.0).collect();
let cf_rows = client
.query(
"SELECT media_id, field_name, field_type, field_value
FROM custom_fields WHERE media_id = ANY($1)",
&[&ids],
)
.await?;
let mut cf_map: HashMap<Uuid, HashMap<String, CustomField>> = HashMap::new();
for row in &cf_rows {
let mid: Uuid = row.get("media_id");
let name: String = row.get("field_name");
let ft_str: String = row.get("field_type");
let value: String = row.get("field_value");
let field_type = custom_field_type_from_string(&ft_str)?;
cf_map
.entry(mid)
.or_default()
.insert(name, CustomField { field_type, value });
}
for item in &mut items {
if let Some(fields) = cf_map.remove(&item.id.0) {
item.custom_fields = fields;
}
}
}
// Compare each pair and build groups
use image_hasher::ImageHash;
let mut groups: Vec<Vec<MediaItem>> = Vec::new();
let mut grouped_indices: std::collections::HashSet<usize> =
std::collections::HashSet::new();
for i in 0..items.len() {
if grouped_indices.contains(&i) {
continue;
}
let hash_a = match &items[i].perceptual_hash {
Some(h) => match ImageHash::<Vec<u8>>::from_base64(h) {
Ok(hash) => hash,
Err(_) => continue,
},
None => continue,
};
let mut group = vec![items[i].clone()];
grouped_indices.insert(i);
for (j, item_j) in items.iter().enumerate().skip(i + 1) {
if grouped_indices.contains(&j) {
continue;
}
let hash_b = match &item_j.perceptual_hash {
Some(h) => match ImageHash::<Vec<u8>>::from_base64(h) {
Ok(hash) => hash,
Err(_) => continue,
},
None => continue,
};
let distance = hash_a.dist(&hash_b);
if distance <= threshold {
group.push(item_j.clone());
grouped_indices.insert(j);
}
}
// Only add groups with more than one item (actual duplicates)
if group.len() > 1 {
groups.push(group);
}
}
Ok(groups)
}
// ---- Database management ----
async fn database_stats(&self) -> Result<crate::storage::DatabaseStats> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
let media_count: i64 = client
.query_one("SELECT COUNT(*) FROM media_items", &[])
.await?
.get(0);
let tag_count: i64 = client
.query_one("SELECT COUNT(*) FROM tags", &[])
.await?
.get(0);
let collection_count: i64 = client
.query_one("SELECT COUNT(*) FROM collections", &[])
.await?
.get(0);
let audit_count: i64 = client
.query_one("SELECT COUNT(*) FROM audit_log", &[])
.await?
.get(0);
let database_size_bytes: i64 = client
.query_one("SELECT pg_database_size(current_database())", &[])
.await?
.get(0);
Ok(crate::storage::DatabaseStats {
media_count: media_count as u64,
tag_count: tag_count as u64,
collection_count: collection_count as u64,
audit_count: audit_count as u64,
database_size_bytes: database_size_bytes as u64,
backend_name: "postgres".to_string(),
})
}
async fn vacuum(&self) -> Result<()> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
client.execute("VACUUM ANALYZE", &[]).await?;
Ok(())
}
async fn clear_all_data(&self) -> Result<()> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
client
.execute(
"TRUNCATE audit_log, custom_fields, collection_members, media_tags, media_items, tags, collections CASCADE",
&[],
)
.await?;
Ok(())
}
async fn list_media_paths(&self) -> Result<Vec<(MediaId, std::path::PathBuf, ContentHash)>> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
let rows = client
.query("SELECT id, path, content_hash FROM media_items", &[])
.await?;
let mut results = Vec::with_capacity(rows.len());
for row in rows {
let id: Uuid = row.get(0);
let path: String = row.get(1);
let hash: String = row.get(2);
results.push((MediaId(id), PathBuf::from(path), ContentHash::new(hash)));
}
Ok(results)
}
async fn save_search(
&self,
id: Uuid,
name: &str,
query: &str,
sort_order: Option<&str>,
) -> Result<()> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
let now = Utc::now();
client
.execute(
"INSERT INTO saved_searches (id, name, query, sort_order, created_at) VALUES ($1, $2, $3, $4, $5)
ON CONFLICT (id) DO UPDATE SET name = $2, query = $3, sort_order = $4",
&[&id, &name, &query, &sort_order, &now],
)
.await?;
Ok(())
}
async fn list_saved_searches(&self) -> Result<Vec<crate::model::SavedSearch>> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
let rows = client
.query("SELECT id, name, query, sort_order, created_at FROM saved_searches ORDER BY created_at DESC", &[])
.await?;
let mut results = Vec::with_capacity(rows.len());
for row in rows {
results.push(crate::model::SavedSearch {
id: row.get(0),
name: row.get(1),
query: row.get(2),
sort_order: row.get(3),
created_at: row.get(4),
});
}
Ok(results)
}
async fn delete_saved_search(&self, id: Uuid) -> Result<()> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
client
.execute("DELETE FROM saved_searches WHERE id = $1", &[&id])
.await?;
Ok(())
}
async fn list_media_ids_for_thumbnails(&self, only_missing: bool) -> Result<Vec<MediaId>> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
let sql = if only_missing {
"SELECT id FROM media_items WHERE thumbnail_path IS NULL ORDER BY created_at DESC"
} else {
"SELECT id FROM media_items ORDER BY created_at DESC"
};
let rows = client.query(sql, &[]).await?;
let ids = rows
.iter()
.map(|r| {
let id: uuid::Uuid = r.get(0);
MediaId(id)
})
.collect();
Ok(ids)
}
async fn library_statistics(&self) -> Result<super::LibraryStatistics> {
tokio::time::timeout(
std::time::Duration::from_secs(30),
self.library_statistics_inner(),
)
.await
.map_err(|_| PinakesError::Database("library_statistics query timed out".to_string()))?
}
async fn list_users(&self) -> Result<Vec<crate::users::User>> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
let rows = client
.query("SELECT id, username, password_hash, role, created_at, updated_at FROM users ORDER BY created_at DESC", &[])
.await?;
let mut users = Vec::with_capacity(rows.len());
for row in rows {
let user_id: uuid::Uuid = row.get::<_, uuid::Uuid>(0);
let profile = self.load_user_profile(user_id).await?;
users.push(crate::users::User {
id: crate::users::UserId(user_id),
username: row.get(1),
password_hash: row.get(2),
role: serde_json::from_value(row.get(3)).unwrap_or(crate::config::UserRole::Viewer),
profile,
created_at: row.get(4),
updated_at: row.get(5),
});
}
Ok(users)
}
async fn get_user(&self, id: crate::users::UserId) -> Result<crate::users::User> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
let row = client
.query_opt("SELECT id, username, password_hash, role, created_at, updated_at FROM users WHERE id = $1", &[&id.0])
.await?
.ok_or_else(|| PinakesError::NotFound(format!("user {}", id.0)))?;
let profile = self.load_user_profile(id.0).await?;
Ok(crate::users::User {
id: crate::users::UserId(row.get::<_, uuid::Uuid>(0)),
username: row.get(1),
password_hash: row.get(2),
role: serde_json::from_value(row.get(3)).unwrap_or(crate::config::UserRole::Viewer),
profile,
created_at: row.get(4),
updated_at: row.get(5),
})
}
async fn get_user_by_username(&self, username: &str) -> Result<crate::users::User> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
let row = client
.query_opt("SELECT id, username, password_hash, role, created_at, updated_at FROM users WHERE username = $1", &[&username])
.await?
.ok_or_else(|| PinakesError::NotFound(format!("user with username {}", username)))?;
let user_id: uuid::Uuid = row.get::<_, uuid::Uuid>(0);
let profile = self.load_user_profile(user_id).await?;
Ok(crate::users::User {
id: crate::users::UserId(user_id),
username: row.get(1),
password_hash: row.get(2),
role: serde_json::from_value(row.get(3)).unwrap_or(crate::config::UserRole::Viewer),
profile,
created_at: row.get(4),
updated_at: row.get(5),
})
}
async fn create_user(
&self,
username: &str,
password_hash: &str,
role: crate::config::UserRole,
profile: Option<crate::users::UserProfile>,
) -> Result<crate::users::User> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
let id = uuid::Uuid::now_v7();
let now = chrono::Utc::now();
let role_json = serde_json::to_value(role)?;
client
.execute(
"INSERT INTO users (id, username, password_hash, role, created_at, updated_at) VALUES ($1, $2, $3, $4, $5, $6)",
&[&id, &username, &password_hash, &role_json, &now, &now],
)
.await?;
let user_profile = if let Some(prof) = profile.clone() {
let prefs_json = serde_json::to_value(&prof.preferences)?;
client
.execute(
"INSERT INTO user_profiles (user_id, avatar_path, bio, preferences_json, created_at, updated_at) VALUES ($1, $2, $3, $4, $5, $6)",
&[&id, &prof.avatar_path, &prof.bio, &prefs_json, &now, &now],
)
.await?;
prof
} else {
crate::users::UserProfile {
avatar_path: None,
bio: None,
preferences: Default::default(),
}
};
Ok(crate::users::User {
id: crate::users::UserId(id),
username: username.to_string(),
password_hash: password_hash.to_string(),
role,
profile: user_profile,
created_at: now,
updated_at: now,
})
}
async fn update_user(
&self,
id: crate::users::UserId,
password_hash: Option<&str>,
role: Option<crate::config::UserRole>,
profile: Option<crate::users::UserProfile>,
) -> Result<crate::users::User> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
let now = chrono::Utc::now();
// Update password and/or role if provided
if password_hash.is_some() || role.is_some() {
let mut updates = vec!["updated_at = $1".to_string()];
let mut param_idx = 2;
let pw_update = if password_hash.is_some() {
let s = format!("password_hash = ${}", param_idx);
param_idx += 1;
Some(s)
} else {
None
};
if let Some(ref s) = pw_update {
updates.push(s.clone());
}
let role_json: Option<serde_json::Value> = if let Some(ref r) = role {
param_idx += 1;
Some(serde_json::to_value(r)?)
} else {
None
};
if role_json.is_some() {
updates.push(format!("role = ${}", param_idx - 1));
}
let sql = format!(
"UPDATE users SET {} WHERE id = ${}",
updates.join(", "),
param_idx
);
let mut params: Vec<&(dyn tokio_postgres::types::ToSql + Sync)> = vec![&now];
if let Some(ref pw) = password_hash {
params.push(pw);
}
if let Some(ref rj) = role_json {
params.push(rj);
}
params.push(&id.0);
client.execute(&sql, &params).await?;
}
// Update profile if provided
if let Some(prof) = profile {
let prefs_json = serde_json::to_value(&prof.preferences)?;
client
.execute(
"INSERT INTO user_profiles (user_id, avatar_path, bio, preferences_json, created_at, updated_at)
VALUES ($1, $2, $3, $4, $5, $6)
ON CONFLICT (user_id) DO UPDATE SET avatar_path = $2, bio = $3, preferences_json = $4, updated_at = $6",
&[&id.0, &prof.avatar_path, &prof.bio, &prefs_json, &now, &now],
)
.await?;
}
// Fetch updated user
self.get_user(id).await
}
async fn delete_user(&self, id: crate::users::UserId) -> Result<()> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
// Delete profile first due to foreign key
client
.execute("DELETE FROM user_profiles WHERE user_id = $1", &[&id.0])
.await?;
// Delete library access
client
.execute("DELETE FROM user_libraries WHERE user_id = $1", &[&id.0])
.await?;
// Delete user
let affected = client
.execute("DELETE FROM users WHERE id = $1", &[&id.0])
.await?;
if affected == 0 {
return Err(PinakesError::NotFound(format!("user {}", id.0)));
}
Ok(())
}
async fn get_user_libraries(
&self,
user_id: crate::users::UserId,
) -> Result<Vec<crate::users::UserLibraryAccess>> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
let rows = client
.query("SELECT user_id, root_path, permission, granted_at FROM user_libraries WHERE user_id = $1", &[&user_id.0])
.await?;
let mut libraries = Vec::with_capacity(rows.len());
for row in rows {
libraries.push(crate::users::UserLibraryAccess {
user_id: crate::users::UserId(row.get::<_, uuid::Uuid>(0)),
root_path: row.get(1),
permission: serde_json::from_value(row.get(2))
.unwrap_or(crate::users::LibraryPermission::Read),
granted_at: row.get(3),
});
}
Ok(libraries)
}
async fn grant_library_access(
&self,
user_id: crate::users::UserId,
root_path: &str,
permission: crate::users::LibraryPermission,
) -> Result<()> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
let perm_json = serde_json::to_value(permission)?;
let now = chrono::Utc::now();
client
.execute(
"INSERT INTO user_libraries (user_id, root_path, permission, granted_at) VALUES ($1, $2, $3, $4)
ON CONFLICT (user_id, root_path) DO UPDATE SET permission = $3, granted_at = $4",
&[&user_id.0, &root_path, &perm_json, &now],
)
.await?;
Ok(())
}
async fn revoke_library_access(
&self,
user_id: crate::users::UserId,
root_path: &str,
) -> Result<()> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
client
.execute(
"DELETE FROM user_libraries WHERE user_id = $1 AND root_path = $2",
&[&user_id.0, &root_path],
)
.await?;
Ok(())
}
// ===== Ratings =====
async fn rate_media(
&self,
user_id: crate::users::UserId,
media_id: MediaId,
stars: u8,
review: Option<&str>,
) -> Result<crate::social::Rating> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
let id = Uuid::now_v7();
let now = Utc::now();
let stars_i32 = stars as i32;
let row = client.query_one(
"INSERT INTO ratings (id, user_id, media_id, stars, review_text, created_at) VALUES ($1, $2, $3, $4, $5, $6) ON CONFLICT (user_id, media_id) DO UPDATE SET stars = $4, review_text = $5 RETURNING id, created_at",
&[&id, &user_id.0, &media_id.0, &stars_i32, &review, &now],
).await?;
let actual_id: Uuid = row.get(0);
let actual_created_at: chrono::DateTime<Utc> = row.get(1);
Ok(crate::social::Rating {
id: actual_id,
user_id,
media_id,
stars,
review_text: review.map(String::from),
created_at: actual_created_at,
})
}
async fn get_media_ratings(&self, media_id: MediaId) -> Result<Vec<crate::social::Rating>> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
let rows = client.query(
"SELECT id, user_id, media_id, stars, review_text, created_at FROM ratings WHERE media_id = $1 ORDER BY created_at DESC",
&[&media_id.0],
).await?;
Ok(rows
.iter()
.map(|row| crate::social::Rating {
id: row.get("id"),
user_id: crate::users::UserId(row.get("user_id")),
media_id: MediaId(row.get("media_id")),
stars: row.get::<_, i32>("stars") as u8,
review_text: row.get("review_text"),
created_at: row.get("created_at"),
})
.collect())
}
async fn get_user_rating(
&self,
user_id: crate::users::UserId,
media_id: MediaId,
) -> Result<Option<crate::social::Rating>> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
let rows = client.query(
"SELECT id, user_id, media_id, stars, review_text, created_at FROM ratings WHERE user_id = $1 AND media_id = $2",
&[&user_id.0, &media_id.0],
).await?;
Ok(rows.first().map(|row| crate::social::Rating {
id: row.get("id"),
user_id: crate::users::UserId(row.get("user_id")),
media_id: MediaId(row.get("media_id")),
stars: row.get::<_, i32>("stars") as u8,
review_text: row.get("review_text"),
created_at: row.get("created_at"),
}))
}
async fn delete_rating(&self, id: Uuid) -> Result<()> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
client
.execute("DELETE FROM ratings WHERE id = $1", &[&id])
.await?;
Ok(())
}
// ===== Comments =====
async fn add_comment(
&self,
user_id: crate::users::UserId,
media_id: MediaId,
text: &str,
parent_id: Option<Uuid>,
) -> Result<crate::social::Comment> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
let id = Uuid::now_v7();
let now = Utc::now();
client.execute(
"INSERT INTO comments (id, user_id, media_id, parent_comment_id, text, created_at) VALUES ($1, $2, $3, $4, $5, $6)",
&[&id, &user_id.0, &media_id.0, &parent_id, &text, &now],
).await?;
Ok(crate::social::Comment {
id,
user_id,
media_id,
parent_comment_id: parent_id,
text: text.to_string(),
created_at: now,
})
}
async fn get_media_comments(&self, media_id: MediaId) -> Result<Vec<crate::social::Comment>> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
let rows = client.query(
"SELECT id, user_id, media_id, parent_comment_id, text, created_at FROM comments WHERE media_id = $1 ORDER BY created_at ASC",
&[&media_id.0],
).await?;
Ok(rows
.iter()
.map(|row| crate::social::Comment {
id: row.get("id"),
user_id: crate::users::UserId(row.get("user_id")),
media_id: MediaId(row.get("media_id")),
parent_comment_id: row.get("parent_comment_id"),
text: row.get("text"),
created_at: row.get("created_at"),
})
.collect())
}
async fn delete_comment(&self, id: Uuid) -> Result<()> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
client
.execute("DELETE FROM comments WHERE id = $1", &[&id])
.await?;
Ok(())
}
// ===== Favorites =====
async fn add_favorite(&self, user_id: crate::users::UserId, media_id: MediaId) -> Result<()> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
let now = Utc::now();
client.execute(
"INSERT INTO favorites (user_id, media_id, created_at) VALUES ($1, $2, $3) ON CONFLICT DO NOTHING",
&[&user_id.0, &media_id.0, &now],
).await?;
Ok(())
}
async fn remove_favorite(
&self,
user_id: crate::users::UserId,
media_id: MediaId,
) -> Result<()> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
client
.execute(
"DELETE FROM favorites WHERE user_id = $1 AND media_id = $2",
&[&user_id.0, &media_id.0],
)
.await?;
Ok(())
}
async fn get_user_favorites(
&self,
user_id: crate::users::UserId,
pagination: &Pagination,
) -> Result<Vec<MediaItem>> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
let rows = client.query(
"SELECT m.id, m.path, m.file_name, m.media_type, m.content_hash, m.file_size, m.title, m.artist, m.album, m.genre, m.year, m.duration_secs, m.description, m.thumbnail_path, m.file_mtime, m.date_taken, m.latitude, m.longitude, m.camera_make, m.camera_model, m.rating, m.perceptual_hash, m.created_at, m.updated_at FROM media_items m JOIN favorites f ON m.id = f.media_id WHERE f.user_id = $1 ORDER BY f.created_at DESC LIMIT $2 OFFSET $3",
&[&user_id.0, &(pagination.limit as i64), &(pagination.offset as i64)],
).await?;
let mut items: Vec<MediaItem> = rows
.iter()
.map(row_to_media_item)
.collect::<Result<Vec<_>>>()?;
// Batch-load custom fields
if !items.is_empty() {
let ids: Vec<Uuid> = items.iter().map(|i| i.id.0).collect();
let cf_rows = client
.query(
"SELECT media_id, field_name, field_type, field_value
FROM custom_fields WHERE media_id = ANY($1)",
&[&ids],
)
.await?;
let mut cf_map: HashMap<Uuid, HashMap<String, CustomField>> = HashMap::new();
for row in &cf_rows {
let mid: Uuid = row.get("media_id");
let name: String = row.get("field_name");
let ft_str: String = row.get("field_type");
let value: String = row.get("field_value");
let field_type = custom_field_type_from_string(&ft_str)?;
cf_map
.entry(mid)
.or_default()
.insert(name, CustomField { field_type, value });
}
for item in &mut items {
if let Some(fields) = cf_map.remove(&item.id.0) {
item.custom_fields = fields;
}
}
}
Ok(items)
}
async fn is_favorite(&self, user_id: crate::users::UserId, media_id: MediaId) -> Result<bool> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
let row = client
.query_one(
"SELECT COUNT(*) FROM favorites WHERE user_id = $1 AND media_id = $2",
&[&user_id.0, &media_id.0],
)
.await?;
let count: i64 = row.get(0);
Ok(count > 0)
}
// ===== Share Links =====
async fn create_share_link(
&self,
media_id: MediaId,
created_by: crate::users::UserId,
token: &str,
password_hash: Option<&str>,
expires_at: Option<chrono::DateTime<chrono::Utc>>,
) -> Result<crate::social::ShareLink> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
let id = Uuid::now_v7();
let now = Utc::now();
let view_count: i32 = 0;
client.execute(
"INSERT INTO share_links (id, media_id, created_by, token, password_hash, expires_at, view_count, created_at) VALUES ($1, $2, $3, $4, $5, $6, $7, $8)",
&[&id, &media_id.0, &created_by.0, &token, &password_hash, &expires_at, &view_count, &now],
).await?;
Ok(crate::social::ShareLink {
id,
media_id,
created_by,
token: token.to_string(),
password_hash: password_hash.map(String::from),
expires_at,
view_count: 0,
created_at: now,
})
}
async fn get_share_link(&self, token: &str) -> Result<crate::social::ShareLink> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
let rows = client.query(
"SELECT id, media_id, created_by, token, password_hash, expires_at, view_count, created_at FROM share_links WHERE token = $1",
&[&token],
).await?;
let row = rows
.first()
.ok_or_else(|| PinakesError::NotFound("share link not found".into()))?;
Ok(crate::social::ShareLink {
id: row.get("id"),
media_id: MediaId(row.get("media_id")),
created_by: crate::users::UserId(row.get("created_by")),
token: row.get("token"),
password_hash: row.get("password_hash"),
expires_at: row.get("expires_at"),
view_count: row.get::<_, i32>("view_count") as u64,
created_at: row.get("created_at"),
})
}
async fn increment_share_views(&self, token: &str) -> Result<()> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
client
.execute(
"UPDATE share_links SET view_count = view_count + 1 WHERE token = $1",
&[&token],
)
.await?;
Ok(())
}
async fn delete_share_link(&self, id: Uuid) -> Result<()> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
client
.execute("DELETE FROM share_links WHERE id = $1", &[&id])
.await?;
Ok(())
}
// ===== Playlists =====
async fn create_playlist(
&self,
owner_id: crate::users::UserId,
name: &str,
description: Option<&str>,
is_public: bool,
is_smart: bool,
filter_query: Option<&str>,
) -> Result<crate::playlists::Playlist> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
let id = Uuid::now_v7();
let now = Utc::now();
client.execute(
"INSERT INTO playlists (id, owner_id, name, description, is_public, is_smart, filter_query, created_at, updated_at) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9)",
&[&id, &owner_id.0, &name, &description, &is_public, &is_smart, &filter_query, &now, &now],
).await?;
Ok(crate::playlists::Playlist {
id,
owner_id,
name: name.to_string(),
description: description.map(String::from),
is_public,
is_smart,
filter_query: filter_query.map(String::from),
created_at: now,
updated_at: now,
})
}
async fn get_playlist(&self, id: Uuid) -> Result<crate::playlists::Playlist> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
let rows = client.query(
"SELECT id, owner_id, name, description, is_public, is_smart, filter_query, created_at, updated_at FROM playlists WHERE id = $1",
&[&id],
).await?;
let row = rows
.first()
.ok_or_else(|| PinakesError::NotFound(format!("playlist {id}")))?;
Ok(crate::playlists::Playlist {
id: row.get("id"),
owner_id: crate::users::UserId(row.get("owner_id")),
name: row.get("name"),
description: row.get("description"),
is_public: row.get("is_public"),
is_smart: row.get("is_smart"),
filter_query: row.get("filter_query"),
created_at: row.get("created_at"),
updated_at: row.get("updated_at"),
})
}
async fn list_playlists(
&self,
owner_id: Option<crate::users::UserId>,
) -> Result<Vec<crate::playlists::Playlist>> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
let rows = match owner_id {
Some(uid) => client.query(
"SELECT id, owner_id, name, description, is_public, is_smart, filter_query, created_at, updated_at FROM playlists WHERE owner_id = $1 OR is_public = true ORDER BY updated_at DESC",
&[&uid.0],
).await?,
None => client.query(
"SELECT id, owner_id, name, description, is_public, is_smart, filter_query, created_at, updated_at FROM playlists ORDER BY updated_at DESC",
&[],
).await?,
};
Ok(rows
.iter()
.map(|row| crate::playlists::Playlist {
id: row.get("id"),
owner_id: crate::users::UserId(row.get("owner_id")),
name: row.get("name"),
description: row.get("description"),
is_public: row.get("is_public"),
is_smart: row.get("is_smart"),
filter_query: row.get("filter_query"),
created_at: row.get("created_at"),
updated_at: row.get("updated_at"),
})
.collect())
}
async fn update_playlist(
&self,
id: Uuid,
name: Option<&str>,
description: Option<&str>,
is_public: Option<bool>,
) -> Result<crate::playlists::Playlist> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
let now = Utc::now();
// Build dynamic update
let mut set_parts = vec!["updated_at = $1".to_string()];
let mut params: Vec<Box<dyn tokio_postgres::types::ToSql + Sync + Send>> =
vec![Box::new(now)];
let mut idx = 2;
if let Some(n) = name {
set_parts.push(format!("name = ${idx}"));
params.push(Box::new(n.to_string()));
idx += 1;
}
if let Some(d) = description {
set_parts.push(format!("description = ${idx}"));
params.push(Box::new(d.to_string()));
idx += 1;
}
if let Some(p) = is_public {
set_parts.push(format!("is_public = ${idx}"));
params.push(Box::new(p));
idx += 1;
}
params.push(Box::new(id));
let sql = format!(
"UPDATE playlists SET {} WHERE id = ${idx}",
set_parts.join(", ")
);
let param_refs: Vec<&(dyn tokio_postgres::types::ToSql + Sync)> = params
.iter()
.map(|p| &**p as &(dyn tokio_postgres::types::ToSql + Sync))
.collect();
client.execute(&sql, &param_refs).await?;
self.get_playlist(id).await
}
async fn delete_playlist(&self, id: Uuid) -> Result<()> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
client
.execute("DELETE FROM playlists WHERE id = $1", &[&id])
.await?;
Ok(())
}
async fn add_to_playlist(
&self,
playlist_id: Uuid,
media_id: MediaId,
position: i32,
) -> Result<()> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
let now = Utc::now();
client.execute(
"INSERT INTO playlist_items (playlist_id, media_id, position, added_at) VALUES ($1, $2, $3, $4) ON CONFLICT (playlist_id, media_id) DO UPDATE SET position = $3",
&[&playlist_id, &media_id.0, &position, &now],
).await?;
Ok(())
}
async fn remove_from_playlist(&self, playlist_id: Uuid, media_id: MediaId) -> Result<()> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
client
.execute(
"DELETE FROM playlist_items WHERE playlist_id = $1 AND media_id = $2",
&[&playlist_id, &media_id.0],
)
.await?;
Ok(())
}
async fn get_playlist_items(&self, playlist_id: Uuid) -> Result<Vec<MediaItem>> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
let rows = client.query(
"SELECT m.id, m.path, m.file_name, m.media_type, m.content_hash, m.file_size, m.title, m.artist, m.album, m.genre, m.year, m.duration_secs, m.description, m.thumbnail_path, m.file_mtime, m.date_taken, m.latitude, m.longitude, m.camera_make, m.camera_model, m.rating, m.perceptual_hash, m.created_at, m.updated_at FROM media_items m JOIN playlist_items pi ON m.id = pi.media_id WHERE pi.playlist_id = $1 ORDER BY pi.position ASC",
&[&playlist_id],
).await?;
let mut items: Vec<MediaItem> = rows
.iter()
.map(row_to_media_item)
.collect::<Result<Vec<_>>>()?;
// Batch-load custom fields
if !items.is_empty() {
let ids: Vec<Uuid> = items.iter().map(|i| i.id.0).collect();
let cf_rows = client
.query(
"SELECT media_id, field_name, field_type, field_value
FROM custom_fields WHERE media_id = ANY($1)",
&[&ids],
)
.await?;
let mut cf_map: HashMap<Uuid, HashMap<String, CustomField>> = HashMap::new();
for row in &cf_rows {
let mid: Uuid = row.get("media_id");
let name: String = row.get("field_name");
let ft_str: String = row.get("field_type");
let value: String = row.get("field_value");
let field_type = custom_field_type_from_string(&ft_str)?;
cf_map
.entry(mid)
.or_default()
.insert(name, CustomField { field_type, value });
}
for item in &mut items {
if let Some(fields) = cf_map.remove(&item.id.0) {
item.custom_fields = fields;
}
}
}
Ok(items)
}
async fn reorder_playlist(
&self,
playlist_id: Uuid,
media_id: MediaId,
new_position: i32,
) -> Result<()> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
client
.execute(
"UPDATE playlist_items SET position = $1 WHERE playlist_id = $2 AND media_id = $3",
&[&new_position, &playlist_id, &media_id.0],
)
.await?;
Ok(())
}
// ===== Analytics =====
async fn record_usage_event(&self, event: &crate::analytics::UsageEvent) -> Result<()> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
let media_id = event.media_id.map(|m| m.0);
let user_id = event.user_id.map(|u| u.0);
let event_type = event.event_type.to_string();
let context: Option<serde_json::Value> = event
.context_json
.as_ref()
.and_then(|s| serde_json::from_str(s).ok());
client.execute(
"INSERT INTO usage_events (id, media_id, user_id, event_type, timestamp, duration_secs, context_json) VALUES ($1, $2, $3, $4, $5, $6, $7)",
&[&event.id, &media_id, &user_id, &event_type, &event.timestamp, &event.duration_secs, &context],
).await?;
Ok(())
}
async fn get_usage_events(
&self,
media_id: Option<MediaId>,
user_id: Option<crate::users::UserId>,
limit: u64,
) -> Result<Vec<crate::analytics::UsageEvent>> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
let mut conditions = Vec::new();
let mut params: Vec<Box<dyn tokio_postgres::types::ToSql + Sync + Send>> = Vec::new();
let mut idx = 1;
if let Some(mid) = media_id {
conditions.push(format!("media_id = ${idx}"));
params.push(Box::new(mid.0));
idx += 1;
}
if let Some(uid) = user_id {
conditions.push(format!("user_id = ${idx}"));
params.push(Box::new(uid.0));
idx += 1;
}
let where_clause = if conditions.is_empty() {
String::new()
} else {
format!("WHERE {}", conditions.join(" AND "))
};
params.push(Box::new(limit as i64));
let sql = format!(
"SELECT id, media_id, user_id, event_type, timestamp, duration_secs, context_json FROM usage_events {} ORDER BY timestamp DESC LIMIT ${idx}",
where_clause
);
let param_refs: Vec<&(dyn tokio_postgres::types::ToSql + Sync)> = params
.iter()
.map(|p| &**p as &(dyn tokio_postgres::types::ToSql + Sync))
.collect();
let rows = client.query(&sql, &param_refs).await?;
Ok(rows
.iter()
.map(|row| {
let event_type_str: String = row.get("event_type");
let context_json: Option<serde_json::Value> = row.get("context_json");
crate::analytics::UsageEvent {
id: row.get("id"),
media_id: row.get::<_, Option<Uuid>>("media_id").map(MediaId),
user_id: row
.get::<_, Option<Uuid>>("user_id")
.map(crate::users::UserId),
event_type: event_type_str
.parse()
.unwrap_or(crate::analytics::UsageEventType::View),
timestamp: row.get("timestamp"),
duration_secs: row.get("duration_secs"),
context_json: context_json.map(|v| v.to_string()),
}
})
.collect())
}
async fn get_most_viewed(&self, limit: u64) -> Result<Vec<(MediaItem, u64)>> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
let rows = client.query(
"SELECT m.id, m.path, m.file_name, m.media_type, m.content_hash, m.file_size, m.title, m.artist, m.album, m.genre, m.year, m.duration_secs, m.description, m.thumbnail_path, m.file_mtime, m.date_taken, m.latitude, m.longitude, m.camera_make, m.camera_model, m.rating, m.perceptual_hash, m.created_at, m.updated_at, COUNT(ue.id) as view_count FROM media_items m JOIN usage_events ue ON m.id = ue.media_id WHERE ue.event_type IN ('view', 'play') GROUP BY m.id, m.path, m.file_name, m.media_type, m.content_hash, m.file_size, m.title, m.artist, m.album, m.genre, m.year, m.duration_secs, m.description, m.thumbnail_path, m.file_mtime, m.date_taken, m.latitude, m.longitude, m.camera_make, m.camera_model, m.rating, m.perceptual_hash, m.created_at, m.updated_at ORDER BY view_count DESC LIMIT $1",
&[&(limit as i64)],
).await?;
let mut results = Vec::new();
for row in &rows {
let item = row_to_media_item(row)?;
let count: i64 = row.get(24);
results.push((item, count as u64));
}
// Batch-load custom fields
if !results.is_empty() {
let ids: Vec<Uuid> = results.iter().map(|(i, _)| i.id.0).collect();
let cf_rows = client
.query(
"SELECT media_id, field_name, field_type, field_value
FROM custom_fields WHERE media_id = ANY($1)",
&[&ids],
)
.await?;
let mut cf_map: HashMap<Uuid, HashMap<String, CustomField>> = HashMap::new();
for row in &cf_rows {
let mid: Uuid = row.get("media_id");
let name: String = row.get("field_name");
let ft_str: String = row.get("field_type");
let value: String = row.get("field_value");
let field_type = custom_field_type_from_string(&ft_str)?;
cf_map
.entry(mid)
.or_default()
.insert(name, CustomField { field_type, value });
}
for (item, _) in &mut results {
if let Some(fields) = cf_map.remove(&item.id.0) {
item.custom_fields = fields;
}
}
}
Ok(results)
}
async fn get_recently_viewed(
&self,
user_id: crate::users::UserId,
limit: u64,
) -> Result<Vec<MediaItem>> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
let rows = client.query(
"SELECT m.id, m.path, m.file_name, m.media_type, m.content_hash, m.file_size, m.title, m.artist, m.album, m.genre, m.year, m.duration_secs, m.description, m.thumbnail_path, m.file_mtime, m.date_taken, m.latitude, m.longitude, m.camera_make, m.camera_model, m.rating, m.perceptual_hash, m.created_at, m.updated_at FROM media_items m JOIN usage_events ue ON m.id = ue.media_id WHERE ue.user_id = $1 AND ue.event_type IN ('view', 'play') GROUP BY m.id, m.path, m.file_name, m.media_type, m.content_hash, m.file_size, m.title, m.artist, m.album, m.genre, m.year, m.duration_secs, m.description, m.thumbnail_path, m.file_mtime, m.date_taken, m.latitude, m.longitude, m.camera_make, m.camera_model, m.rating, m.perceptual_hash, m.created_at, m.updated_at ORDER BY MAX(ue.timestamp) DESC LIMIT $2",
&[&user_id.0, &(limit as i64)],
).await?;
let mut items: Vec<MediaItem> = rows
.iter()
.map(row_to_media_item)
.collect::<Result<Vec<_>>>()?;
// Batch-load custom fields
if !items.is_empty() {
let ids: Vec<Uuid> = items.iter().map(|i| i.id.0).collect();
let cf_rows = client
.query(
"SELECT media_id, field_name, field_type, field_value
FROM custom_fields WHERE media_id = ANY($1)",
&[&ids],
)
.await?;
let mut cf_map: HashMap<Uuid, HashMap<String, CustomField>> = HashMap::new();
for row in &cf_rows {
let mid: Uuid = row.get("media_id");
let name: String = row.get("field_name");
let ft_str: String = row.get("field_type");
let value: String = row.get("field_value");
let field_type = custom_field_type_from_string(&ft_str)?;
cf_map
.entry(mid)
.or_default()
.insert(name, CustomField { field_type, value });
}
for item in &mut items {
if let Some(fields) = cf_map.remove(&item.id.0) {
item.custom_fields = fields;
}
}
}
Ok(items)
}
async fn update_watch_progress(
&self,
user_id: crate::users::UserId,
media_id: MediaId,
progress_secs: f64,
) -> Result<()> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
let id = Uuid::now_v7();
let now = Utc::now();
client.execute(
"INSERT INTO watch_history (id, user_id, media_id, progress_secs, last_watched) VALUES ($1, $2, $3, $4, $5) ON CONFLICT (user_id, media_id) DO UPDATE SET progress_secs = $4, last_watched = $5",
&[&id, &user_id.0, &media_id.0, &progress_secs, &now],
).await?;
Ok(())
}
async fn get_watch_progress(
&self,
user_id: crate::users::UserId,
media_id: MediaId,
) -> Result<Option<f64>> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
let rows = client
.query(
"SELECT progress_secs FROM watch_history WHERE user_id = $1 AND media_id = $2",
&[&user_id.0, &media_id.0],
)
.await?;
Ok(rows.first().map(|row| row.get("progress_secs")))
}
async fn cleanup_old_events(&self, before: chrono::DateTime<chrono::Utc>) -> Result<u64> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
let affected = client
.execute("DELETE FROM usage_events WHERE timestamp < $1", &[&before])
.await?;
Ok(affected)
}
// ===== Subtitles =====
async fn add_subtitle(&self, subtitle: &crate::subtitles::Subtitle) -> Result<()> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
let format_str = subtitle.format.to_string();
let file_path = subtitle
.file_path
.as_ref()
.map(|p| p.to_string_lossy().to_string());
let track_index = subtitle.track_index.map(|i| i as i32);
let offset_ms = subtitle.offset_ms as i32;
client.execute(
"INSERT INTO subtitles (id, media_id, language, format, file_path, is_embedded, track_index, offset_ms, created_at) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9)",
&[&subtitle.id, &subtitle.media_id.0, &subtitle.language, &format_str, &file_path, &subtitle.is_embedded, &track_index, &offset_ms, &subtitle.created_at],
).await?;
Ok(())
}
async fn get_media_subtitles(
&self,
media_id: MediaId,
) -> Result<Vec<crate::subtitles::Subtitle>> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
let rows = client.query(
"SELECT id, media_id, language, format, file_path, is_embedded, track_index, offset_ms, created_at FROM subtitles WHERE media_id = $1",
&[&media_id.0],
).await?;
Ok(rows
.iter()
.map(|row| {
let format_str: String = row.get("format");
crate::subtitles::Subtitle {
id: row.get("id"),
media_id: MediaId(row.get("media_id")),
language: row.get("language"),
format: format_str
.parse()
.unwrap_or(crate::subtitles::SubtitleFormat::Srt),
file_path: row
.get::<_, Option<String>>("file_path")
.map(std::path::PathBuf::from),
is_embedded: row.get("is_embedded"),
track_index: row.get::<_, Option<i32>>("track_index").map(|i| i as usize),
offset_ms: row.get::<_, i32>("offset_ms") as i64,
created_at: row.get("created_at"),
}
})
.collect())
}
async fn delete_subtitle(&self, id: Uuid) -> Result<()> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
client
.execute("DELETE FROM subtitles WHERE id = $1", &[&id])
.await?;
Ok(())
}
async fn update_subtitle_offset(&self, id: Uuid, offset_ms: i64) -> Result<()> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
let offset = offset_ms as i32;
client
.execute(
"UPDATE subtitles SET offset_ms = $1 WHERE id = $2",
&[&offset, &id],
)
.await?;
Ok(())
}
// ===== External Metadata (Enrichment) =====
async fn store_external_metadata(
&self,
meta: &crate::enrichment::ExternalMetadata,
) -> Result<()> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
let source = meta.source.to_string();
let metadata_json: serde_json::Value = serde_json::from_str(&meta.metadata_json)
.unwrap_or_else(|e| {
tracing::warn!(
"failed to deserialize metadata_json for external metadata {}: {}",
meta.id,
e
);
serde_json::Value::Object(Default::default())
});
client.execute(
"INSERT INTO external_metadata (id, media_id, source, external_id, metadata_json, confidence, last_updated) VALUES ($1, $2, $3, $4, $5, $6, $7) ON CONFLICT (id) DO UPDATE SET metadata_json = $5, confidence = $6, last_updated = $7",
&[&meta.id, &meta.media_id.0, &source, &meta.external_id, &metadata_json, &meta.confidence, &meta.last_updated],
).await?;
Ok(())
}
async fn get_external_metadata(
&self,
media_id: MediaId,
) -> Result<Vec<crate::enrichment::ExternalMetadata>> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
let rows = client.query(
"SELECT id, media_id, source, external_id, metadata_json, confidence, last_updated FROM external_metadata WHERE media_id = $1",
&[&media_id.0],
).await?;
Ok(rows
.iter()
.map(|row| {
let source_str: String = row.get("source");
let metadata_json: serde_json::Value = row.get("metadata_json");
crate::enrichment::ExternalMetadata {
id: row.get("id"),
media_id: MediaId(row.get("media_id")),
source: source_str
.parse()
.unwrap_or(crate::enrichment::EnrichmentSourceType::MusicBrainz),
external_id: row.get("external_id"),
metadata_json: metadata_json.to_string(),
confidence: row.get("confidence"),
last_updated: row.get("last_updated"),
}
})
.collect())
}
async fn delete_external_metadata(&self, id: Uuid) -> Result<()> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
client
.execute("DELETE FROM external_metadata WHERE id = $1", &[&id])
.await?;
Ok(())
}
// ===== Transcode Sessions =====
async fn create_transcode_session(
&self,
session: &crate::transcode::TranscodeSession,
) -> Result<()> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
let user_id = session.user_id.map(|u| u.0);
let cache_path = session.cache_path.to_string_lossy().to_string();
let status = session.status.as_str().to_string();
let error_message = session.status.error_message().map(String::from);
let progress = session.progress as f64;
client.execute(
"INSERT INTO transcode_sessions (id, media_id, user_id, profile, cache_path, status, progress, error_message, created_at, expires_at) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10)",
&[&session.id, &session.media_id.0, &user_id, &session.profile, &cache_path, &status, &progress, &error_message, &session.created_at, &session.expires_at],
).await?;
Ok(())
}
async fn get_transcode_session(&self, id: Uuid) -> Result<crate::transcode::TranscodeSession> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
let rows = client.query(
"SELECT id, media_id, user_id, profile, cache_path, status, progress, error_message, created_at, expires_at FROM transcode_sessions WHERE id = $1",
&[&id],
).await?;
let row = rows
.first()
.ok_or_else(|| PinakesError::NotFound(format!("transcode session {id}")))?;
let status_str: String = row.get("status");
let error_msg: Option<String> = row.get("error_message");
let progress: f64 = row.get("progress");
Ok(crate::transcode::TranscodeSession {
id: row.get("id"),
media_id: MediaId(row.get("media_id")),
user_id: row
.get::<_, Option<Uuid>>("user_id")
.map(crate::users::UserId),
profile: row.get("profile"),
cache_path: std::path::PathBuf::from(row.get::<_, String>("cache_path")),
status: crate::transcode::TranscodeStatus::from_db(&status_str, error_msg.as_deref()),
progress: progress as f32,
created_at: row.get("created_at"),
expires_at: row.get("expires_at"),
duration_secs: None,
child_cancel: None,
})
}
async fn list_transcode_sessions(
&self,
media_id: Option<MediaId>,
) -> Result<Vec<crate::transcode::TranscodeSession>> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
let rows = match media_id {
Some(mid) => client.query(
"SELECT id, media_id, user_id, profile, cache_path, status, progress, error_message, created_at, expires_at FROM transcode_sessions WHERE media_id = $1 ORDER BY created_at DESC",
&[&mid.0],
).await?,
None => client.query(
"SELECT id, media_id, user_id, profile, cache_path, status, progress, error_message, created_at, expires_at FROM transcode_sessions ORDER BY created_at DESC",
&[],
).await?,
};
Ok(rows
.iter()
.map(|row| {
let status_str: String = row.get("status");
let error_msg: Option<String> = row.get("error_message");
let progress: f64 = row.get("progress");
crate::transcode::TranscodeSession {
id: row.get("id"),
media_id: MediaId(row.get("media_id")),
user_id: row
.get::<_, Option<Uuid>>("user_id")
.map(crate::users::UserId),
profile: row.get("profile"),
cache_path: std::path::PathBuf::from(row.get::<_, String>("cache_path")),
status: crate::transcode::TranscodeStatus::from_db(
&status_str,
error_msg.as_deref(),
),
progress: progress as f32,
created_at: row.get("created_at"),
expires_at: row.get("expires_at"),
duration_secs: None,
child_cancel: None,
}
})
.collect())
}
async fn update_transcode_status(
&self,
id: Uuid,
status: crate::transcode::TranscodeStatus,
progress: f32,
) -> Result<()> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
let status_str = status.as_str().to_string();
let error_message = status.error_message().map(String::from);
let progress_f64 = progress as f64;
client.execute(
"UPDATE transcode_sessions SET status = $1, progress = $2, error_message = $3 WHERE id = $4",
&[&status_str, &progress_f64, &error_message, &id],
).await?;
Ok(())
}
async fn cleanup_expired_transcodes(
&self,
before: chrono::DateTime<chrono::Utc>,
) -> Result<u64> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
let affected = client
.execute(
"DELETE FROM transcode_sessions WHERE expires_at IS NOT NULL AND expires_at < $1",
&[&before],
)
.await?;
Ok(affected)
}
// ===== Session Management =====
async fn create_session(&self, session: &crate::storage::SessionData) -> Result<()> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
client
.execute(
"INSERT INTO sessions (session_token, user_id, username, role, created_at, expires_at, last_accessed)
VALUES ($1, $2, $3, $4, $5, $6, $7)",
&[
&session.session_token,
&session.user_id,
&session.username,
&session.role,
&session.created_at,
&session.expires_at,
&session.last_accessed,
],
)
.await?;
Ok(())
}
async fn get_session(
&self,
session_token: &str,
) -> Result<Option<crate::storage::SessionData>> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
let row = client
.query_opt(
"SELECT session_token, user_id, username, role, created_at, expires_at, last_accessed
FROM sessions WHERE session_token = $1",
&[&session_token],
)
.await?;
Ok(row.map(|r| crate::storage::SessionData {
session_token: r.get(0),
user_id: r.get(1),
username: r.get(2),
role: r.get(3),
created_at: r.get(4),
expires_at: r.get(5),
last_accessed: r.get(6),
}))
}
async fn touch_session(&self, session_token: &str) -> Result<()> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
let now = chrono::Utc::now();
client
.execute(
"UPDATE sessions SET last_accessed = $1 WHERE session_token = $2",
&[&now, &session_token],
)
.await?;
Ok(())
}
async fn delete_session(&self, session_token: &str) -> Result<()> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
client
.execute(
"DELETE FROM sessions WHERE session_token = $1",
&[&session_token],
)
.await?;
Ok(())
}
async fn delete_user_sessions(&self, username: &str) -> Result<u64> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
let affected = client
.execute("DELETE FROM sessions WHERE username = $1", &[&username])
.await?;
Ok(affected)
}
async fn delete_expired_sessions(&self) -> Result<u64> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
let now = chrono::Utc::now();
let affected = client
.execute("DELETE FROM sessions WHERE expires_at < $1", &[&now])
.await?;
Ok(affected)
}
async fn list_active_sessions(
&self,
username: Option<&str>,
) -> Result<Vec<crate::storage::SessionData>> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
let now = chrono::Utc::now();
let rows = if let Some(user) = username {
client
.query(
"SELECT session_token, user_id, username, role, created_at, expires_at, last_accessed
FROM sessions WHERE expires_at > $1 AND username = $2
ORDER BY last_accessed DESC",
&[&now, &user],
)
.await?
} else {
client
.query(
"SELECT session_token, user_id, username, role, created_at, expires_at, last_accessed
FROM sessions WHERE expires_at > $1
ORDER BY last_accessed DESC",
&[&now],
)
.await?
};
Ok(rows
.into_iter()
.map(|r| crate::storage::SessionData {
session_token: r.get(0),
user_id: r.get(1),
username: r.get(2),
role: r.get(3),
created_at: r.get(4),
expires_at: r.get(5),
last_accessed: r.get(6),
})
.collect())
}
// Book Management Methods
async fn upsert_book_metadata(&self, metadata: &crate::model::BookMetadata) -> Result<()> {
let mut client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
let tx = client.transaction().await?;
// Upsert book_metadata
tx.execute(
"INSERT INTO book_metadata (
media_id, isbn, isbn13, publisher, language, page_count,
publication_date, series_name, series_index, format
) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10)
ON CONFLICT(media_id) DO UPDATE SET
isbn = $2, isbn13 = $3, publisher = $4, language = $5,
page_count = $6, publication_date = $7, series_name = $8,
series_index = $9, format = $10, updated_at = NOW()",
&[
&metadata.media_id.0,
&metadata.isbn,
&metadata.isbn13,
&metadata.publisher,
&metadata.language,
&metadata.page_count,
&metadata.publication_date,
&metadata.series_name,
&metadata.series_index,
&metadata.format,
],
)
.await?;
// Clear existing authors and identifiers
tx.execute(
"DELETE FROM book_authors WHERE media_id = $1",
&[&metadata.media_id.0],
)
.await?;
tx.execute(
"DELETE FROM book_identifiers WHERE media_id = $1",
&[&metadata.media_id.0],
)
.await?;
// Insert authors
for author in &metadata.authors {
tx.execute(
"INSERT INTO book_authors (media_id, author_name, author_sort, role, position)
VALUES ($1, $2, $3, $4, $5)",
&[
&metadata.media_id.0,
&author.name,
&author.file_as,
&author.role,
&author.position,
],
)
.await?;
}
// Insert identifiers
for (id_type, values) in &metadata.identifiers {
for value in values {
tx.execute(
"INSERT INTO book_identifiers (media_id, identifier_type, identifier_value)
VALUES ($1, $2, $3)",
&[&metadata.media_id.0, &id_type, &value],
)
.await?;
}
}
tx.commit().await?;
Ok(())
}
async fn get_book_metadata(
&self,
media_id: MediaId,
) -> Result<Option<crate::model::BookMetadata>> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
// Get base book metadata
let row = client
.query_opt(
"SELECT isbn, isbn13, publisher, language, page_count,
publication_date, series_name, series_index, format,
created_at, updated_at
FROM book_metadata WHERE media_id = $1",
&[&media_id.0],
)
.await?;
if row.is_none() {
return Ok(None);
}
let row = row.unwrap();
// Get authors
let author_rows = client
.query(
"SELECT author_name, author_sort, role, position
FROM book_authors WHERE media_id = $1 ORDER BY position",
&[&media_id.0],
)
.await?;
let authors: Vec<crate::model::AuthorInfo> = author_rows
.iter()
.map(|r| crate::model::AuthorInfo {
name: r.get(0),
file_as: r.get(1),
role: r.get(2),
position: r.get(3),
})
.collect();
// Get identifiers
let id_rows = client
.query(
"SELECT identifier_type, identifier_value
FROM book_identifiers WHERE media_id = $1",
&[&media_id.0],
)
.await?;
let mut identifiers: std::collections::HashMap<String, Vec<String>> =
std::collections::HashMap::new();
for r in id_rows {
let id_type: String = r.get(0);
let value: String = r.get(1);
identifiers.entry(id_type).or_default().push(value);
}
Ok(Some(crate::model::BookMetadata {
media_id,
isbn: row.get(0),
isbn13: row.get(1),
publisher: row.get(2),
language: row.get(3),
page_count: row.get(4),
publication_date: row.get(5),
series_name: row.get(6),
series_index: row.get(7),
format: row.get(8),
authors,
identifiers,
created_at: row.get(9),
updated_at: row.get(10),
}))
}
async fn add_book_author(
&self,
media_id: MediaId,
author: &crate::model::AuthorInfo,
) -> Result<()> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
client
.execute(
"INSERT INTO book_authors (media_id, author_name, author_sort, role, position)
VALUES ($1, $2, $3, $4, $5)
ON CONFLICT(media_id, author_name, role) DO UPDATE SET
author_sort = $3, position = $5",
&[
&media_id.0,
&author.name,
&author.file_as,
&author.role,
&author.position,
],
)
.await?;
Ok(())
}
async fn get_book_authors(&self, media_id: MediaId) -> Result<Vec<crate::model::AuthorInfo>> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
let rows = client
.query(
"SELECT author_name, author_sort, role, position
FROM book_authors WHERE media_id = $1 ORDER BY position",
&[&media_id.0],
)
.await?;
Ok(rows
.iter()
.map(|r| crate::model::AuthorInfo {
name: r.get(0),
file_as: r.get(1),
role: r.get(2),
position: r.get(3),
})
.collect())
}
async fn list_all_authors(&self, pagination: &Pagination) -> Result<Vec<(String, u64)>> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
let rows = client
.query(
"SELECT author_name, COUNT(DISTINCT media_id) as book_count
FROM book_authors
GROUP BY author_name
ORDER BY book_count DESC, author_name
LIMIT $1 OFFSET $2",
&[&(pagination.limit as i64), &(pagination.offset as i64)],
)
.await?;
Ok(rows
.iter()
.map(|r| (r.get(0), r.get::<_, i64>(1) as u64))
.collect())
}
async fn list_series(&self) -> Result<Vec<(String, u64)>> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
let rows = client
.query(
"SELECT series_name, COUNT(*) as book_count
FROM book_metadata
WHERE series_name IS NOT NULL
GROUP BY series_name
ORDER BY series_name",
&[],
)
.await?;
Ok(rows
.iter()
.map(|r| (r.get(0), r.get::<_, i64>(1) as u64))
.collect())
}
async fn get_series_books(&self, series_name: &str) -> Result<Vec<MediaItem>> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
let rows = client
.query(
"SELECT m.id, m.path, m.file_name, m.media_type, m.content_hash,
m.file_size, m.title, m.artist, m.album, m.genre, m.year,
m.duration_secs, m.description, m.thumbnail_path, m.file_mtime,
m.created_at, m.updated_at
FROM media_items m
INNER JOIN book_metadata b ON m.id = b.media_id
WHERE b.series_name = $1
ORDER BY b.series_index, m.title",
&[&series_name],
)
.await?;
rows.iter().map(row_to_media_item).collect()
}
async fn update_reading_progress(
&self,
user_id: uuid::Uuid,
media_id: MediaId,
current_page: i32,
) -> Result<()> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
client
.execute(
"INSERT INTO watch_history (user_id, media_id, progress_secs, last_watched_at)
VALUES ($1, $2, $3, NOW())
ON CONFLICT(user_id, media_id) DO UPDATE SET
progress_secs = $3, last_watched_at = NOW()",
&[&user_id, &media_id.0, &(current_page as f64)],
)
.await?;
Ok(())
}
async fn get_reading_progress(
&self,
user_id: uuid::Uuid,
media_id: MediaId,
) -> Result<Option<crate::model::ReadingProgress>> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
let row = client
.query_opt(
"SELECT wh.progress_secs, bm.page_count, wh.last_watched_at
FROM watch_history wh
LEFT JOIN book_metadata bm ON wh.media_id = bm.media_id
WHERE wh.user_id = $1 AND wh.media_id = $2",
&[&user_id, &media_id.0],
)
.await?;
Ok(row.map(|r| {
let current_page = r.get::<_, f64>(0) as i32;
let total_pages: Option<i32> = r.get(1);
let progress_percent = if let Some(total) = total_pages {
if total > 0 {
(current_page as f64 / total as f64 * 100.0).min(100.0)
} else {
0.0
}
} else {
0.0
};
crate::model::ReadingProgress {
media_id,
user_id,
current_page,
total_pages,
progress_percent,
last_read_at: r.get(2),
}
}))
}
async fn get_reading_list(
&self,
user_id: uuid::Uuid,
status: Option<crate::model::ReadingStatus>,
) -> Result<Vec<MediaItem>> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
// Query books with reading progress for this user
let rows = client
.query(
"SELECT m.*, wh.progress_secs, bm.page_count
FROM media_items m
INNER JOIN watch_history wh ON m.id = wh.media_id
LEFT JOIN book_metadata bm ON m.id = bm.media_id
WHERE wh.user_id = $1
ORDER BY wh.last_watched_at DESC",
&[&user_id],
)
.await
.map_err(|e| PinakesError::Database(e.to_string()))?;
let mut results = Vec::new();
for row in rows {
// Parse the media item
let item = row_to_media_item(&row)?;
// Get progress info
let current_page: f64 = row.get("progress_secs");
let current_page = current_page as i32;
let total_pages: Option<i32> = row.get("page_count");
// Calculate status based on progress
let calculated_status = if let Some(total) = total_pages {
if total > 0 {
let percent = (current_page as f64 / total as f64 * 100.0).min(100.0);
if percent >= 100.0 {
crate::model::ReadingStatus::Completed
} else if percent > 0.0 {
crate::model::ReadingStatus::Reading
} else {
crate::model::ReadingStatus::ToRead
}
} else {
crate::model::ReadingStatus::Reading
}
} else {
// No total pages known, assume reading
crate::model::ReadingStatus::Reading
};
// Filter by status if specified
match status {
None => results.push(item),
Some(s) if s == calculated_status => results.push(item),
_ => {}
}
}
Ok(results)
}
#[allow(clippy::too_many_arguments)]
async fn search_books(
&self,
isbn: Option<&str>,
author: Option<&str>,
series: Option<&str>,
publisher: Option<&str>,
language: Option<&str>,
pagination: &Pagination,
) -> Result<Vec<MediaItem>> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
// For PostgreSQL, we need to handle parameters carefully due to lifetimes
// Simplified approach: use separate queries for different filter combinations
let rows = if let (Some(i), Some(a), Some(s), Some(p), Some(l)) =
(isbn, author, series, publisher, language)
{
let author_pattern = format!("%{}%", a);
let series_pattern = format!("%{}%", s);
let publisher_pattern = format!("%{}%", p);
client
.query(
"SELECT DISTINCT m.id, m.path, m.file_name, m.media_type, m.content_hash,
m.file_size, m.title, m.artist, m.album, m.genre, m.year,
m.duration_secs, m.description, m.thumbnail_path, m.file_mtime,
m.created_at, m.updated_at
FROM media_items m
INNER JOIN book_metadata bm ON m.id = bm.media_id
INNER JOIN book_authors ba ON m.id = ba.media_id
WHERE (bm.isbn = $1 OR bm.isbn13 = $1) AND ba.author_name ILIKE $2
AND bm.series_name ILIKE $3 AND bm.publisher ILIKE $4 AND bm.language = $5
ORDER BY m.title LIMIT $6 OFFSET $7",
&[
&i,
&author_pattern,
&series_pattern,
&publisher_pattern,
&l,
&(pagination.limit as i64),
&(pagination.offset as i64),
],
)
.await?
} else if isbn.is_none()
&& author.is_none()
&& series.is_none()
&& publisher.is_none()
&& language.is_none()
{
// No filters
client
.query(
"SELECT DISTINCT m.id, m.path, m.file_name, m.media_type, m.content_hash,
m.file_size, m.title, m.artist, m.album, m.genre, m.year,
m.duration_secs, m.description, m.thumbnail_path, m.file_mtime,
m.created_at, m.updated_at
FROM media_items m
INNER JOIN book_metadata bm ON m.id = bm.media_id
ORDER BY m.title LIMIT $1 OFFSET $2",
&[&(pagination.limit as i64), &(pagination.offset as i64)],
)
.await?
} else {
// For other combinations, use dynamic query (simplified - just filter by what's provided)
let mut query =
"SELECT DISTINCT m.id, m.path, m.file_name, m.media_type, m.content_hash,
m.file_size, m.title, m.artist, m.album, m.genre, m.year,
m.duration_secs, m.description, m.thumbnail_path, m.file_mtime,
m.created_at, m.updated_at
FROM media_items m
INNER JOIN book_metadata bm ON m.id = bm.media_id WHERE 1=1"
.to_string();
if isbn.is_some() {
query.push_str(" AND (bm.isbn = $1 OR bm.isbn13 = $1)");
}
query.push_str(" ORDER BY m.title LIMIT $2 OFFSET $3");
if let Some(i) = isbn {
client
.query(
&query,
&[&i, &(pagination.limit as i64), &(pagination.offset as i64)],
)
.await?
} else {
client
.query(
&query,
&[&(pagination.limit as i64), &(pagination.offset as i64)],
)
.await?
}
};
let items: Result<Vec<_>> = rows.iter().map(row_to_media_item).collect();
items
}
// =========================================================================
// Managed Storage
// =========================================================================
async fn insert_managed_media(&self, item: &MediaItem) -> Result<()> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("failed to get connection: {e}")))?;
client
.execute(
"INSERT INTO media_items (id, path, file_name, media_type, content_hash, file_size,
title, artist, album, genre, year, duration_secs, description, thumbnail_path,
storage_mode, original_filename, uploaded_at, storage_key, created_at, updated_at)
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, $19, $20)",
&[
&item.id.0,
&item.path.to_string_lossy().to_string(),
&item.file_name,
&media_type_to_string(&item.media_type),
&item.content_hash.0,
&(item.file_size as i64),
&item.title,
&item.artist,
&item.album,
&item.genre,
&item.year,
&item.duration_secs,
&item.description,
&item.thumbnail_path.as_ref().map(|p| p.to_string_lossy().to_string()),
&item.storage_mode.to_string(),
&item.original_filename,
&item.uploaded_at,
&item.storage_key,
&item.created_at,
&item.updated_at,
],
)
.await
.map_err(|e| PinakesError::Database(e.to_string()))?;
Ok(())
}
async fn get_or_create_blob(
&self,
hash: &ContentHash,
size: u64,
mime_type: &str,
) -> Result<ManagedBlob> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("failed to get connection: {e}")))?;
// Try to get existing blob
let existing = client
.query_opt(
"SELECT content_hash, file_size, mime_type, reference_count, stored_at, last_verified
FROM managed_blobs WHERE content_hash = $1",
&[&hash.0],
)
.await
.map_err(|e| PinakesError::Database(e.to_string()))?;
if let Some(row) = existing {
return Ok(ManagedBlob {
content_hash: ContentHash(row.get(0)),
file_size: row.get::<_, i64>(1) as u64,
mime_type: row.get(2),
reference_count: row.get::<_, i32>(3) as u32,
stored_at: row.get(4),
last_verified: row.get(5),
});
}
// Create new blob
let now = chrono::Utc::now();
client
.execute(
"INSERT INTO managed_blobs (content_hash, file_size, mime_type, reference_count, stored_at)
VALUES ($1, $2, $3, 1, $4)",
&[&hash.0, &(size as i64), &mime_type, &now],
)
.await
.map_err(|e| PinakesError::Database(e.to_string()))?;
Ok(ManagedBlob {
content_hash: hash.clone(),
file_size: size,
mime_type: mime_type.to_string(),
reference_count: 1,
stored_at: now,
last_verified: None,
})
}
async fn get_blob(&self, hash: &ContentHash) -> Result<Option<ManagedBlob>> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("failed to get connection: {e}")))?;
let row = client
.query_opt(
"SELECT content_hash, file_size, mime_type, reference_count, stored_at, last_verified
FROM managed_blobs WHERE content_hash = $1",
&[&hash.0],
)
.await
.map_err(|e| PinakesError::Database(e.to_string()))?;
Ok(row.map(|r| ManagedBlob {
content_hash: ContentHash(r.get(0)),
file_size: r.get::<_, i64>(1) as u64,
mime_type: r.get(2),
reference_count: r.get::<_, i32>(3) as u32,
stored_at: r.get(4),
last_verified: r.get(5),
}))
}
async fn increment_blob_ref(&self, hash: &ContentHash) -> Result<()> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("failed to get connection: {e}")))?;
client
.execute(
"UPDATE managed_blobs SET reference_count = reference_count + 1 WHERE content_hash = $1",
&[&hash.0],
)
.await
.map_err(|e| PinakesError::Database(e.to_string()))?;
Ok(())
}
async fn decrement_blob_ref(&self, hash: &ContentHash) -> Result<bool> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("failed to get connection: {e}")))?;
client
.execute(
"UPDATE managed_blobs SET reference_count = reference_count - 1 WHERE content_hash = $1",
&[&hash.0],
)
.await
.map_err(|e| PinakesError::Database(e.to_string()))?;
// Check if reference count is now 0
let row = client
.query_opt(
"SELECT reference_count FROM managed_blobs WHERE content_hash = $1",
&[&hash.0],
)
.await
.map_err(|e| PinakesError::Database(e.to_string()))?;
let count: i32 = row.map(|r| r.get(0)).unwrap_or(0);
Ok(count <= 0)
}
async fn update_blob_verified(&self, hash: &ContentHash) -> Result<()> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("failed to get connection: {e}")))?;
let now = chrono::Utc::now();
client
.execute(
"UPDATE managed_blobs SET last_verified = $1 WHERE content_hash = $2",
&[&now, &hash.0],
)
.await
.map_err(|e| PinakesError::Database(e.to_string()))?;
Ok(())
}
async fn list_orphaned_blobs(&self) -> Result<Vec<ManagedBlob>> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("failed to get connection: {e}")))?;
let rows = client
.query(
"SELECT content_hash, file_size, mime_type, reference_count, stored_at, last_verified
FROM managed_blobs WHERE reference_count <= 0",
&[],
)
.await
.map_err(|e| PinakesError::Database(e.to_string()))?;
Ok(rows
.iter()
.map(|r| ManagedBlob {
content_hash: ContentHash(r.get(0)),
file_size: r.get::<_, i64>(1) as u64,
mime_type: r.get(2),
reference_count: r.get::<_, i32>(3) as u32,
stored_at: r.get(4),
last_verified: r.get(5),
})
.collect())
}
async fn delete_blob(&self, hash: &ContentHash) -> Result<()> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("failed to get connection: {e}")))?;
client
.execute(
"DELETE FROM managed_blobs WHERE content_hash = $1",
&[&hash.0],
)
.await
.map_err(|e| PinakesError::Database(e.to_string()))?;
Ok(())
}
async fn managed_storage_stats(&self) -> Result<ManagedStorageStats> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("failed to get connection: {e}")))?;
let total_blobs: i64 = client
.query_one("SELECT COUNT(*) FROM managed_blobs", &[])
.await
.map_err(|e| PinakesError::Database(e.to_string()))?
.get(0);
let total_size: i64 = client
.query_one("SELECT COALESCE(SUM(file_size), 0) FROM managed_blobs", &[])
.await
.map_err(|e| PinakesError::Database(e.to_string()))?
.get(0);
let unique_size: i64 = client
.query_one(
"SELECT COALESCE(SUM(file_size), 0) FROM managed_blobs WHERE reference_count = 1",
&[],
)
.await
.map_err(|e| PinakesError::Database(e.to_string()))?
.get(0);
let managed_media_count: i64 = client
.query_one(
"SELECT COUNT(*) FROM media_items WHERE storage_mode = 'managed'",
&[],
)
.await
.map_err(|e| PinakesError::Database(e.to_string()))?
.get(0);
let orphaned_blobs: i64 = client
.query_one(
"SELECT COUNT(*) FROM managed_blobs WHERE reference_count <= 0",
&[],
)
.await
.map_err(|e| PinakesError::Database(e.to_string()))?
.get(0);
let dedup_ratio = if total_size > 0 {
unique_size as f64 / total_size as f64
} else {
1.0
};
Ok(ManagedStorageStats {
total_blobs: total_blobs as u64,
total_size_bytes: total_size as u64,
unique_size_bytes: unique_size as u64,
deduplication_ratio: dedup_ratio,
managed_media_count: managed_media_count as u64,
orphaned_blobs: orphaned_blobs as u64,
})
}
// =========================================================================
// Sync Devices
// =========================================================================
async fn register_device(
&self,
device: &crate::sync::SyncDevice,
token_hash: &str,
) -> Result<crate::sync::SyncDevice> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("failed to get connection: {e}")))?;
client
.execute(
"INSERT INTO sync_devices (id, user_id, name, device_type, client_version, os_info,
device_token_hash, last_seen_at, sync_cursor, enabled, created_at, updated_at)
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12)",
&[
&device.id.0,
&device.user_id.0,
&device.name,
&device.device_type.to_string(),
&device.client_version,
&device.os_info,
&token_hash,
&device.last_seen_at,
&device.sync_cursor,
&device.enabled,
&device.created_at,
&device.updated_at,
],
)
.await
.map_err(|e| PinakesError::Database(e.to_string()))?;
Ok(device.clone())
}
async fn get_device(&self, id: crate::sync::DeviceId) -> Result<crate::sync::SyncDevice> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("failed to get connection: {e}")))?;
let row = client
.query_one(
"SELECT id, user_id, name, device_type, client_version, os_info,
last_sync_at, last_seen_at, sync_cursor, enabled, created_at, updated_at
FROM sync_devices WHERE id = $1",
&[&id.0],
)
.await
.map_err(|e| PinakesError::Database(e.to_string()))?;
Ok(crate::sync::SyncDevice {
id: crate::sync::DeviceId(row.get(0)),
user_id: crate::users::UserId(row.get(1)),
name: row.get(2),
device_type: row.get::<_, String>(3).parse().unwrap_or_default(),
client_version: row.get(4),
os_info: row.get(5),
last_sync_at: row.get(6),
last_seen_at: row.get(7),
sync_cursor: row.get(8),
enabled: row.get(9),
created_at: row.get(10),
updated_at: row.get(11),
})
}
async fn get_device_by_token(
&self,
token_hash: &str,
) -> Result<Option<crate::sync::SyncDevice>> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("failed to get connection: {e}")))?;
let row = client
.query_opt(
"SELECT id, user_id, name, device_type, client_version, os_info,
last_sync_at, last_seen_at, sync_cursor, enabled, created_at, updated_at
FROM sync_devices WHERE device_token_hash = $1",
&[&token_hash],
)
.await
.map_err(|e| PinakesError::Database(e.to_string()))?;
Ok(row.map(|r| crate::sync::SyncDevice {
id: crate::sync::DeviceId(r.get(0)),
user_id: crate::users::UserId(r.get(1)),
name: r.get(2),
device_type: r.get::<_, String>(3).parse().unwrap_or_default(),
client_version: r.get(4),
os_info: r.get(5),
last_sync_at: r.get(6),
last_seen_at: r.get(7),
sync_cursor: r.get(8),
enabled: r.get(9),
created_at: r.get(10),
updated_at: r.get(11),
}))
}
async fn list_user_devices(
&self,
user_id: crate::users::UserId,
) -> Result<Vec<crate::sync::SyncDevice>> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("failed to get connection: {e}")))?;
let rows = client
.query(
"SELECT id, user_id, name, device_type, client_version, os_info,
last_sync_at, last_seen_at, sync_cursor, enabled, created_at, updated_at
FROM sync_devices WHERE user_id = $1 ORDER BY last_seen_at DESC",
&[&user_id.0],
)
.await
.map_err(|e| PinakesError::Database(e.to_string()))?;
Ok(rows
.iter()
.map(|r| crate::sync::SyncDevice {
id: crate::sync::DeviceId(r.get(0)),
user_id: crate::users::UserId(r.get(1)),
name: r.get(2),
device_type: r.get::<_, String>(3).parse().unwrap_or_default(),
client_version: r.get(4),
os_info: r.get(5),
last_sync_at: r.get(6),
last_seen_at: r.get(7),
sync_cursor: r.get(8),
enabled: r.get(9),
created_at: r.get(10),
updated_at: r.get(11),
})
.collect())
}
async fn update_device(&self, device: &crate::sync::SyncDevice) -> Result<()> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("failed to get connection: {e}")))?;
client
.execute(
"UPDATE sync_devices SET name = $1, device_type = $2, client_version = $3,
os_info = $4, last_sync_at = $5, last_seen_at = $6, sync_cursor = $7,
enabled = $8, updated_at = $9 WHERE id = $10",
&[
&device.name,
&device.device_type.to_string(),
&device.client_version,
&device.os_info,
&device.last_sync_at,
&device.last_seen_at,
&device.sync_cursor,
&device.enabled,
&device.updated_at,
&device.id.0,
],
)
.await
.map_err(|e| PinakesError::Database(e.to_string()))?;
Ok(())
}
async fn delete_device(&self, id: crate::sync::DeviceId) -> Result<()> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("failed to get connection: {e}")))?;
client
.execute("DELETE FROM sync_devices WHERE id = $1", &[&id.0])
.await
.map_err(|e| PinakesError::Database(e.to_string()))?;
Ok(())
}
async fn touch_device(&self, id: crate::sync::DeviceId) -> Result<()> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("failed to get connection: {e}")))?;
let now = chrono::Utc::now();
client
.execute(
"UPDATE sync_devices SET last_seen_at = $1, updated_at = $1 WHERE id = $2",
&[&now, &id.0],
)
.await
.map_err(|e| PinakesError::Database(e.to_string()))?;
Ok(())
}
// =========================================================================
// Sync Log
// =========================================================================
async fn record_sync_change(&self, change: &crate::sync::SyncLogEntry) -> Result<()> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("failed to get connection: {e}")))?;
// Get and increment sequence
let seq_row = client
.query_one(
"UPDATE sync_sequence SET current_value = current_value + 1 WHERE id = 1 RETURNING current_value",
&[],
)
.await
.map_err(|e| PinakesError::Database(e.to_string()))?;
let seq: i64 = seq_row.get(0);
client
.execute(
"INSERT INTO sync_log (id, sequence, change_type, media_id, path, content_hash,
file_size, metadata_json, changed_by_device, timestamp)
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10)",
&[
&change.id,
&seq,
&change.change_type.to_string(),
&change.media_id.map(|m| m.0),
&change.path,
&change.content_hash.as_ref().map(|h| h.0.clone()),
&change.file_size.map(|s| s as i64),
&change.metadata_json,
&change.changed_by_device.map(|d| d.0),
&change.timestamp,
],
)
.await
.map_err(|e| PinakesError::Database(e.to_string()))?;
Ok(())
}
async fn get_changes_since(
&self,
cursor: i64,
limit: u64,
) -> Result<Vec<crate::sync::SyncLogEntry>> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("failed to get connection: {e}")))?;
let rows = client
.query(
"SELECT id, sequence, change_type, media_id, path, content_hash,
file_size, metadata_json, changed_by_device, timestamp
FROM sync_log WHERE sequence > $1 ORDER BY sequence LIMIT $2",
&[&cursor, &(limit as i64)],
)
.await
.map_err(|e| PinakesError::Database(e.to_string()))?;
Ok(rows
.iter()
.map(|r| crate::sync::SyncLogEntry {
id: r.get(0),
sequence: r.get(1),
change_type: r
.get::<_, String>(2)
.parse()
.unwrap_or(crate::sync::SyncChangeType::Modified),
media_id: r.get::<_, Option<Uuid>>(3).map(MediaId),
path: r.get(4),
content_hash: r.get::<_, Option<String>>(5).map(ContentHash),
file_size: r.get::<_, Option<i64>>(6).map(|s| s as u64),
metadata_json: r.get(7),
changed_by_device: r.get::<_, Option<Uuid>>(8).map(crate::sync::DeviceId),
timestamp: r.get(9),
})
.collect())
}
async fn get_current_sync_cursor(&self) -> Result<i64> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("failed to get connection: {e}")))?;
let row = client
.query_one("SELECT current_value FROM sync_sequence WHERE id = 1", &[])
.await
.map_err(|e| PinakesError::Database(e.to_string()))?;
Ok(row.get(0))
}
async fn cleanup_old_sync_log(&self, before: chrono::DateTime<chrono::Utc>) -> Result<u64> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("failed to get connection: {e}")))?;
let result = client
.execute("DELETE FROM sync_log WHERE timestamp < $1", &[&before])
.await
.map_err(|e| PinakesError::Database(e.to_string()))?;
Ok(result)
}
// =========================================================================
// Device Sync State
// =========================================================================
async fn get_device_sync_state(
&self,
device_id: crate::sync::DeviceId,
path: &str,
) -> Result<Option<crate::sync::DeviceSyncState>> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("failed to get connection: {e}")))?;
let row = client
.query_opt(
"SELECT device_id, path, local_hash, server_hash, local_mtime, server_mtime,
sync_status, last_synced_at, conflict_info_json
FROM device_sync_state WHERE device_id = $1 AND path = $2",
&[&device_id.0, &path],
)
.await
.map_err(|e| PinakesError::Database(e.to_string()))?;
Ok(row.map(|r| crate::sync::DeviceSyncState {
device_id: crate::sync::DeviceId(r.get(0)),
path: r.get(1),
local_hash: r.get(2),
server_hash: r.get(3),
local_mtime: r.get(4),
server_mtime: r.get(5),
sync_status: r
.get::<_, String>(6)
.parse()
.unwrap_or(crate::sync::FileSyncStatus::Synced),
last_synced_at: r.get(7),
conflict_info_json: r.get(8),
}))
}
async fn upsert_device_sync_state(&self, state: &crate::sync::DeviceSyncState) -> Result<()> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("failed to get connection: {e}")))?;
client
.execute(
"INSERT INTO device_sync_state (device_id, path, local_hash, server_hash,
local_mtime, server_mtime, sync_status, last_synced_at, conflict_info_json)
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9)
ON CONFLICT(device_id, path) DO UPDATE SET
local_hash = EXCLUDED.local_hash,
server_hash = EXCLUDED.server_hash,
local_mtime = EXCLUDED.local_mtime,
server_mtime = EXCLUDED.server_mtime,
sync_status = EXCLUDED.sync_status,
last_synced_at = EXCLUDED.last_synced_at,
conflict_info_json = EXCLUDED.conflict_info_json",
&[
&state.device_id.0,
&state.path,
&state.local_hash,
&state.server_hash,
&state.local_mtime,
&state.server_mtime,
&state.sync_status.to_string(),
&state.last_synced_at,
&state.conflict_info_json,
],
)
.await
.map_err(|e| PinakesError::Database(e.to_string()))?;
Ok(())
}
async fn list_pending_sync(
&self,
device_id: crate::sync::DeviceId,
) -> Result<Vec<crate::sync::DeviceSyncState>> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("failed to get connection: {e}")))?;
let rows = client
.query(
"SELECT device_id, path, local_hash, server_hash, local_mtime, server_mtime,
sync_status, last_synced_at, conflict_info_json
FROM device_sync_state
WHERE device_id = $1 AND sync_status IN ('pending_upload', 'pending_download', 'conflict')",
&[&device_id.0],
)
.await
.map_err(|e| PinakesError::Database(e.to_string()))?;
Ok(rows
.iter()
.map(|r| crate::sync::DeviceSyncState {
device_id: crate::sync::DeviceId(r.get(0)),
path: r.get(1),
local_hash: r.get(2),
server_hash: r.get(3),
local_mtime: r.get(4),
server_mtime: r.get(5),
sync_status: r
.get::<_, String>(6)
.parse()
.unwrap_or(crate::sync::FileSyncStatus::Synced),
last_synced_at: r.get(7),
conflict_info_json: r.get(8),
})
.collect())
}
// =========================================================================
// Upload Sessions
// =========================================================================
async fn create_upload_session(&self, session: &crate::sync::UploadSession) -> Result<()> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("failed to get connection: {e}")))?;
client
.execute(
"INSERT INTO upload_sessions (id, device_id, target_path, expected_hash,
expected_size, chunk_size, chunk_count, status, created_at, expires_at, last_activity)
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11)",
&[
&session.id,
&session.device_id.0,
&session.target_path,
&session.expected_hash.0,
&(session.expected_size as i64),
&(session.chunk_size as i64),
&(session.chunk_count as i64),
&session.status.to_string(),
&session.created_at,
&session.expires_at,
&session.last_activity,
],
)
.await
.map_err(|e| PinakesError::Database(e.to_string()))?;
Ok(())
}
async fn get_upload_session(&self, id: Uuid) -> Result<crate::sync::UploadSession> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("failed to get connection: {e}")))?;
let row = client
.query_one(
"SELECT id, device_id, target_path, expected_hash, expected_size, chunk_size,
chunk_count, status, created_at, expires_at, last_activity
FROM upload_sessions WHERE id = $1",
&[&id],
)
.await
.map_err(|e| PinakesError::Database(e.to_string()))?;
Ok(crate::sync::UploadSession {
id: row.get(0),
device_id: crate::sync::DeviceId(row.get(1)),
target_path: row.get(2),
expected_hash: ContentHash(row.get(3)),
expected_size: row.get::<_, i64>(4) as u64,
chunk_size: row.get::<_, i64>(5) as u64,
chunk_count: row.get::<_, i64>(6) as u64,
status: row
.get::<_, String>(7)
.parse()
.unwrap_or(crate::sync::UploadStatus::Pending),
created_at: row.get(8),
expires_at: row.get(9),
last_activity: row.get(10),
})
}
async fn update_upload_session(&self, session: &crate::sync::UploadSession) -> Result<()> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("failed to get connection: {e}")))?;
client
.execute(
"UPDATE upload_sessions SET status = $1, last_activity = $2 WHERE id = $3",
&[
&session.status.to_string(),
&session.last_activity,
&session.id,
],
)
.await
.map_err(|e| PinakesError::Database(e.to_string()))?;
Ok(())
}
async fn record_chunk(&self, upload_id: Uuid, chunk: &crate::sync::ChunkInfo) -> Result<()> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("failed to get connection: {e}")))?;
client
.execute(
"INSERT INTO upload_chunks (upload_id, chunk_index, offset, size, hash, received_at)
VALUES ($1, $2, $3, $4, $5, $6)
ON CONFLICT(upload_id, chunk_index) DO UPDATE SET
offset = EXCLUDED.offset, size = EXCLUDED.size,
hash = EXCLUDED.hash, received_at = EXCLUDED.received_at",
&[
&upload_id,
&(chunk.chunk_index as i64),
&(chunk.offset as i64),
&(chunk.size as i64),
&chunk.hash,
&chunk.received_at,
],
)
.await
.map_err(|e| PinakesError::Database(e.to_string()))?;
Ok(())
}
async fn get_upload_chunks(&self, upload_id: Uuid) -> Result<Vec<crate::sync::ChunkInfo>> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("failed to get connection: {e}")))?;
let rows = client
.query(
"SELECT upload_id, chunk_index, offset, size, hash, received_at
FROM upload_chunks WHERE upload_id = $1 ORDER BY chunk_index",
&[&upload_id],
)
.await
.map_err(|e| PinakesError::Database(e.to_string()))?;
Ok(rows
.iter()
.map(|r| crate::sync::ChunkInfo {
upload_id: r.get(0),
chunk_index: r.get::<_, i64>(1) as u64,
offset: r.get::<_, i64>(2) as u64,
size: r.get::<_, i64>(3) as u64,
hash: r.get(4),
received_at: r.get(5),
})
.collect())
}
async fn cleanup_expired_uploads(&self) -> Result<u64> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("failed to get connection: {e}")))?;
let now = chrono::Utc::now();
let result = client
.execute("DELETE FROM upload_sessions WHERE expires_at < $1", &[&now])
.await
.map_err(|e| PinakesError::Database(e.to_string()))?;
Ok(result)
}
// =========================================================================
// Sync Conflicts
// =========================================================================
async fn record_conflict(&self, conflict: &crate::sync::SyncConflict) -> Result<()> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("failed to get connection: {e}")))?;
client
.execute(
"INSERT INTO sync_conflicts (id, device_id, path, local_hash, local_mtime,
server_hash, server_mtime, detected_at)
VALUES ($1, $2, $3, $4, $5, $6, $7, $8)",
&[
&conflict.id,
&conflict.device_id.0,
&conflict.path,
&conflict.local_hash,
&conflict.local_mtime,
&conflict.server_hash,
&conflict.server_mtime,
&conflict.detected_at,
],
)
.await
.map_err(|e| PinakesError::Database(e.to_string()))?;
Ok(())
}
async fn get_unresolved_conflicts(
&self,
device_id: crate::sync::DeviceId,
) -> Result<Vec<crate::sync::SyncConflict>> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("failed to get connection: {e}")))?;
let rows = client
.query(
"SELECT id, device_id, path, local_hash, local_mtime, server_hash, server_mtime,
detected_at, resolved_at, resolution
FROM sync_conflicts WHERE device_id = $1 AND resolved_at IS NULL",
&[&device_id.0],
)
.await
.map_err(|e| PinakesError::Database(e.to_string()))?;
Ok(rows
.iter()
.map(|r| crate::sync::SyncConflict {
id: r.get(0),
device_id: crate::sync::DeviceId(r.get(1)),
path: r.get(2),
local_hash: r.get(3),
local_mtime: r.get(4),
server_hash: r.get(5),
server_mtime: r.get(6),
detected_at: r.get(7),
resolved_at: r.get(8),
resolution: r
.get::<_, Option<String>>(9)
.and_then(|s| match s.as_str() {
"server_wins" => Some(crate::config::ConflictResolution::ServerWins),
"client_wins" => Some(crate::config::ConflictResolution::ClientWins),
"keep_both" => Some(crate::config::ConflictResolution::KeepBoth),
"manual" => Some(crate::config::ConflictResolution::Manual),
_ => None,
}),
})
.collect())
}
async fn resolve_conflict(
&self,
id: Uuid,
resolution: crate::config::ConflictResolution,
) -> Result<()> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("failed to get connection: {e}")))?;
let now = chrono::Utc::now();
let resolution_str = match resolution {
crate::config::ConflictResolution::ServerWins => "server_wins",
crate::config::ConflictResolution::ClientWins => "client_wins",
crate::config::ConflictResolution::KeepBoth => "keep_both",
crate::config::ConflictResolution::Manual => "manual",
};
client
.execute(
"UPDATE sync_conflicts SET resolved_at = $1, resolution = $2 WHERE id = $3",
&[&now, &resolution_str, &id],
)
.await
.map_err(|e| PinakesError::Database(e.to_string()))?;
Ok(())
}
// =========================================================================
// Shares
// =========================================================================
async fn create_share(&self, share: &crate::sharing::Share) -> Result<crate::sharing::Share> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("failed to get connection: {e}")))?;
let (recipient_type, recipient_user_id, public_token, password_hash): (
&str,
Option<Uuid>,
Option<String>,
Option<String>,
) = match &share.recipient {
crate::sharing::ShareRecipient::PublicLink {
token,
password_hash,
} => (
"public_link",
None,
Some(token.clone()),
password_hash.clone(),
),
crate::sharing::ShareRecipient::User { user_id } => {
("user", Some(user_id.0), None, None)
}
crate::sharing::ShareRecipient::Group { .. } => ("group", None, None, None),
crate::sharing::ShareRecipient::Federated { .. } => ("federated", None, None, None),
};
client
.execute(
"INSERT INTO shares (id, target_type, target_id, owner_id, recipient_type,
recipient_user_id, public_token, public_password_hash,
perm_view, perm_download, perm_edit, perm_delete, perm_reshare, perm_add,
note, expires_at, access_count, inherit_to_children, parent_share_id,
created_at, updated_at)
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, $19, $20, $21)",
&[
&share.id.0,
&share.target.target_type(),
&share.target.target_id(),
&share.owner_id.0,
&recipient_type,
&recipient_user_id,
&public_token,
&password_hash,
&share.permissions.can_view,
&share.permissions.can_download,
&share.permissions.can_edit,
&share.permissions.can_delete,
&share.permissions.can_reshare,
&share.permissions.can_add,
&share.note,
&share.expires_at,
&(share.access_count as i64),
&share.inherit_to_children,
&share.parent_share_id.map(|s| s.0),
&share.created_at,
&share.updated_at,
],
)
.await
.map_err(|e| PinakesError::Database(e.to_string()))?;
Ok(share.clone())
}
async fn get_share(&self, id: crate::sharing::ShareId) -> Result<crate::sharing::Share> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("failed to get connection: {e}")))?;
let row = client
.query_one(
"SELECT id, target_type, target_id, owner_id, recipient_type, recipient_user_id,
public_token, public_password_hash, perm_view, perm_download, perm_edit,
perm_delete, perm_reshare, perm_add, note, expires_at, access_count,
last_accessed, inherit_to_children, parent_share_id, created_at, updated_at
FROM shares WHERE id = $1",
&[&id.0],
)
.await
.map_err(|e| PinakesError::Database(e.to_string()))?;
pg_row_to_share(&row)
}
async fn get_share_by_token(&self, token: &str) -> Result<crate::sharing::Share> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("failed to get connection: {e}")))?;
let row = client
.query_one(
"SELECT id, target_type, target_id, owner_id, recipient_type, recipient_user_id,
public_token, public_password_hash, perm_view, perm_download, perm_edit,
perm_delete, perm_reshare, perm_add, note, expires_at, access_count,
last_accessed, inherit_to_children, parent_share_id, created_at, updated_at
FROM shares WHERE public_token = $1",
&[&token],
)
.await
.map_err(|e| PinakesError::Database(e.to_string()))?;
pg_row_to_share(&row)
}
async fn list_shares_by_owner(
&self,
owner_id: crate::users::UserId,
pagination: &Pagination,
) -> Result<Vec<crate::sharing::Share>> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("failed to get connection: {e}")))?;
let rows = client
.query(
"SELECT id, target_type, target_id, owner_id, recipient_type, recipient_user_id,
public_token, public_password_hash, perm_view, perm_download, perm_edit,
perm_delete, perm_reshare, perm_add, note, expires_at, access_count,
last_accessed, inherit_to_children, parent_share_id, created_at, updated_at
FROM shares WHERE owner_id = $1 ORDER BY created_at DESC LIMIT $2 OFFSET $3",
&[
&owner_id.0,
&(pagination.limit as i64),
&(pagination.offset as i64),
],
)
.await
.map_err(|e| PinakesError::Database(e.to_string()))?;
rows.iter().map(pg_row_to_share).collect()
}
async fn list_shares_for_user(
&self,
user_id: crate::users::UserId,
pagination: &Pagination,
) -> Result<Vec<crate::sharing::Share>> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("failed to get connection: {e}")))?;
let rows = client
.query(
"SELECT id, target_type, target_id, owner_id, recipient_type, recipient_user_id,
public_token, public_password_hash, perm_view, perm_download, perm_edit,
perm_delete, perm_reshare, perm_add, note, expires_at, access_count,
last_accessed, inherit_to_children, parent_share_id, created_at, updated_at
FROM shares WHERE recipient_user_id = $1 ORDER BY created_at DESC LIMIT $2 OFFSET $3",
&[
&user_id.0,
&(pagination.limit as i64),
&(pagination.offset as i64),
],
)
.await
.map_err(|e| PinakesError::Database(e.to_string()))?;
rows.iter().map(pg_row_to_share).collect()
}
async fn list_shares_for_target(
&self,
target: &crate::sharing::ShareTarget,
) -> Result<Vec<crate::sharing::Share>> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("failed to get connection: {e}")))?;
let target_type = target.target_type();
let target_id = target.target_id();
let rows = client
.query(
"SELECT id, target_type, target_id, owner_id, recipient_type, recipient_user_id,
public_token, public_password_hash, perm_view, perm_download, perm_edit,
perm_delete, perm_reshare, perm_add, note, expires_at, access_count,
last_accessed, inherit_to_children, parent_share_id, created_at, updated_at
FROM shares WHERE target_type = $1 AND target_id = $2",
&[&target_type, &target_id],
)
.await
.map_err(|e| PinakesError::Database(e.to_string()))?;
rows.iter().map(pg_row_to_share).collect()
}
async fn update_share(&self, share: &crate::sharing::Share) -> Result<crate::sharing::Share> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("failed to get connection: {e}")))?;
client
.execute(
"UPDATE shares SET
perm_view = $1, perm_download = $2, perm_edit = $3, perm_delete = $4,
perm_reshare = $5, perm_add = $6, note = $7, expires_at = $8,
inherit_to_children = $9, updated_at = $10
WHERE id = $11",
&[
&share.permissions.can_view,
&share.permissions.can_download,
&share.permissions.can_edit,
&share.permissions.can_delete,
&share.permissions.can_reshare,
&share.permissions.can_add,
&share.note,
&share.expires_at,
&share.inherit_to_children,
&share.updated_at,
&share.id.0,
],
)
.await
.map_err(|e| PinakesError::Database(e.to_string()))?;
Ok(share.clone())
}
async fn delete_share(&self, id: crate::sharing::ShareId) -> Result<()> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("failed to get connection: {e}")))?;
client
.execute("DELETE FROM shares WHERE id = $1", &[&id.0])
.await
.map_err(|e| PinakesError::Database(e.to_string()))?;
Ok(())
}
async fn record_share_access(&self, id: crate::sharing::ShareId) -> Result<()> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("failed to get connection: {e}")))?;
let now = chrono::Utc::now();
client
.execute(
"UPDATE shares SET access_count = access_count + 1, last_accessed = $1 WHERE id = $2",
&[&now, &id.0],
)
.await
.map_err(|e| PinakesError::Database(e.to_string()))?;
Ok(())
}
async fn check_share_access(
&self,
user_id: Option<crate::users::UserId>,
target: &crate::sharing::ShareTarget,
) -> Result<Option<crate::sharing::SharePermissions>> {
let shares = self.list_shares_for_target(target).await?;
let now = chrono::Utc::now();
for share in shares {
// Skip expired shares
if let Some(exp) = share.expires_at {
if exp < now {
continue;
}
}
match (&share.recipient, user_id) {
// Public links are accessible to anyone
(crate::sharing::ShareRecipient::PublicLink { .. }, _) => {
return Ok(Some(share.permissions));
}
// User shares require matching user
(
crate::sharing::ShareRecipient::User {
user_id: share_user,
},
Some(uid),
) if *share_user == uid => {
return Ok(Some(share.permissions));
}
_ => continue,
}
}
Ok(None)
}
async fn get_effective_share_permissions(
&self,
user_id: Option<crate::users::UserId>,
media_id: MediaId,
) -> Result<Option<crate::sharing::SharePermissions>> {
// Check direct media shares
let target = crate::sharing::ShareTarget::Media { media_id };
if let Some(perms) = self.check_share_access(user_id, &target).await? {
return Ok(Some(perms));
}
// Check collection shares (inheritance)
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("failed to get connection: {e}")))?;
// Find collections containing this media
let collection_rows = client
.query(
"SELECT collection_id FROM collection_items WHERE media_id = $1",
&[&media_id.0],
)
.await
.map_err(|e| PinakesError::Database(e.to_string()))?;
for row in collection_rows {
let collection_id: Uuid = row.get(0);
let target = crate::sharing::ShareTarget::Collection { collection_id };
if let Some(perms) = self.check_share_access(user_id, &target).await? {
return Ok(Some(perms));
}
}
// Check tag shares (inheritance)
let tag_rows = client
.query(
"SELECT tag_id FROM media_tags WHERE media_id = $1",
&[&media_id.0],
)
.await
.map_err(|e| PinakesError::Database(e.to_string()))?;
for row in tag_rows {
let tag_id: Uuid = row.get(0);
let target = crate::sharing::ShareTarget::Tag { tag_id };
if let Some(perms) = self.check_share_access(user_id, &target).await? {
return Ok(Some(perms));
}
}
Ok(None)
}
async fn batch_delete_shares(&self, ids: &[crate::sharing::ShareId]) -> Result<u64> {
if ids.is_empty() {
return Ok(0);
}
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("failed to get connection: {e}")))?;
let uuid_ids: Vec<Uuid> = ids.iter().map(|id| id.0).collect();
let result = client
.execute("DELETE FROM shares WHERE id = ANY($1)", &[&uuid_ids])
.await
.map_err(|e| PinakesError::Database(e.to_string()))?;
Ok(result)
}
async fn cleanup_expired_shares(&self) -> Result<u64> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("failed to get connection: {e}")))?;
let now = chrono::Utc::now();
let result = client
.execute(
"DELETE FROM shares WHERE expires_at IS NOT NULL AND expires_at < $1",
&[&now],
)
.await
.map_err(|e| PinakesError::Database(e.to_string()))?;
Ok(result)
}
// =========================================================================
// Share Activity
// =========================================================================
async fn record_share_activity(&self, activity: &crate::sharing::ShareActivity) -> Result<()> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("failed to get connection: {e}")))?;
client
.execute(
"INSERT INTO share_activity (id, share_id, actor_id, actor_ip, action, details, timestamp)
VALUES ($1, $2, $3, $4, $5, $6, $7)",
&[
&activity.id,
&activity.share_id.0,
&activity.actor_id.map(|u| u.0),
&activity.actor_ip,
&activity.action.to_string(),
&activity.details,
&activity.timestamp,
],
)
.await
.map_err(|e| PinakesError::Database(e.to_string()))?;
Ok(())
}
async fn get_share_activity(
&self,
share_id: crate::sharing::ShareId,
pagination: &Pagination,
) -> Result<Vec<crate::sharing::ShareActivity>> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("failed to get connection: {e}")))?;
let rows = client
.query(
"SELECT id, share_id, actor_id, actor_ip, action, details, timestamp
FROM share_activity WHERE share_id = $1 ORDER BY timestamp DESC LIMIT $2 OFFSET $3",
&[
&share_id.0,
&(pagination.limit as i64),
&(pagination.offset as i64),
],
)
.await
.map_err(|e| PinakesError::Database(e.to_string()))?;
Ok(rows
.iter()
.map(|r| crate::sharing::ShareActivity {
id: r.get(0),
share_id: crate::sharing::ShareId(r.get(1)),
actor_id: r.get::<_, Option<Uuid>>(2).map(crate::users::UserId),
actor_ip: r.get(3),
action: r
.get::<_, String>(4)
.parse()
.unwrap_or(crate::sharing::ShareActivityAction::Accessed),
details: r.get(5),
timestamp: r.get(6),
})
.collect())
}
// =========================================================================
// Share Notifications
// =========================================================================
async fn create_share_notification(
&self,
notification: &crate::sharing::ShareNotification,
) -> Result<()> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("failed to get connection: {e}")))?;
client
.execute(
"INSERT INTO share_notifications (id, user_id, share_id, notification_type, is_read, created_at)
VALUES ($1, $2, $3, $4, $5, $6)",
&[
&notification.id,
&notification.user_id.0,
&notification.share_id.0,
&notification.notification_type.to_string(),
&notification.is_read,
&notification.created_at,
],
)
.await
.map_err(|e| PinakesError::Database(e.to_string()))?;
Ok(())
}
async fn get_unread_notifications(
&self,
user_id: crate::users::UserId,
) -> Result<Vec<crate::sharing::ShareNotification>> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("failed to get connection: {e}")))?;
let rows = client
.query(
"SELECT id, user_id, share_id, notification_type, is_read, created_at
FROM share_notifications WHERE user_id = $1 AND is_read = false ORDER BY created_at DESC",
&[&user_id.0],
)
.await
.map_err(|e| PinakesError::Database(e.to_string()))?;
Ok(rows
.iter()
.map(|r| crate::sharing::ShareNotification {
id: r.get(0),
user_id: crate::users::UserId(r.get(1)),
share_id: crate::sharing::ShareId(r.get(2)),
notification_type: r
.get::<_, String>(3)
.parse()
.unwrap_or(crate::sharing::ShareNotificationType::NewShare),
is_read: r.get(4),
created_at: r.get(5),
})
.collect())
}
async fn mark_notification_read(&self, id: Uuid) -> Result<()> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("failed to get connection: {e}")))?;
client
.execute(
"UPDATE share_notifications SET is_read = true WHERE id = $1",
&[&id],
)
.await
.map_err(|e| PinakesError::Database(e.to_string()))?;
Ok(())
}
async fn mark_all_notifications_read(&self, user_id: crate::users::UserId) -> Result<()> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("failed to get connection: {e}")))?;
client
.execute(
"UPDATE share_notifications SET is_read = true WHERE user_id = $1",
&[&user_id.0],
)
.await
.map_err(|e| PinakesError::Database(e.to_string()))?;
Ok(())
}
// ===== File Management =====
async fn rename_media(&self, id: MediaId, new_name: &str) -> Result<String> {
// Validate the new name
if new_name.is_empty() || new_name.contains('/') || new_name.contains('\\') {
return Err(PinakesError::InvalidOperation(
"Invalid file name: must not be empty or contain path separators".into(),
));
}
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
// Get the current path and storage mode
let row = client
.query_one(
"SELECT path, storage_mode FROM media_items WHERE id = $1 AND deleted_at IS NULL",
&[&id.0],
)
.await
.map_err(|e| PinakesError::Database(e.to_string()))?;
let old_path: String = row.get(0);
let storage_mode: String = row.get(1);
let old_path_buf = std::path::PathBuf::from(&old_path);
let parent = old_path_buf.parent().unwrap_or(std::path::Path::new(""));
let new_path = parent.join(new_name);
let new_path_str = new_path.to_string_lossy().to_string();
// For external storage, actually rename the file on disk
if storage_mode == "external" && old_path_buf.exists() {
tokio::fs::rename(&old_path_buf, &new_path)
.await
.map_err(|e| {
PinakesError::Io(std::io::Error::new(
e.kind(),
format!("Failed to rename file: {}", e),
))
})?;
}
// Update the database
client
.execute(
"UPDATE media_items SET file_name = $1, path = $2, updated_at = NOW() WHERE id = $3",
&[&new_name, &new_path_str, &id.0],
)
.await
.map_err(|e| PinakesError::Database(e.to_string()))?;
Ok(old_path)
}
async fn move_media(&self, id: MediaId, new_directory: &std::path::Path) -> Result<String> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
// Get the current path, file_name, and storage mode
let row = client
.query_one(
"SELECT path, file_name, storage_mode FROM media_items WHERE id = $1 AND deleted_at IS NULL",
&[&id.0],
)
.await
.map_err(|e| PinakesError::Database(e.to_string()))?;
let old_path: String = row.get(0);
let file_name: String = row.get(1);
let storage_mode: String = row.get(2);
let old_path_buf = std::path::PathBuf::from(&old_path);
let new_path = new_directory.join(&file_name);
let new_path_str = new_path.to_string_lossy().to_string();
// Ensure the target directory exists
if !new_directory.exists() {
tokio::fs::create_dir_all(new_directory).await?;
}
// For external storage, actually move the file on disk
if storage_mode == "external" && old_path_buf.exists() {
tokio::fs::rename(&old_path_buf, &new_path)
.await
.map_err(|e| {
PinakesError::Io(std::io::Error::new(
e.kind(),
format!("Failed to move file: {}", e),
))
})?;
}
// Update the database
client
.execute(
"UPDATE media_items SET path = $1, updated_at = NOW() WHERE id = $2",
&[&new_path_str, &id.0],
)
.await
.map_err(|e| PinakesError::Database(e.to_string()))?;
Ok(old_path)
}
// ===== Trash / Soft Delete =====
async fn soft_delete_media(&self, id: MediaId) -> Result<()> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
let rows_affected = client
.execute(
"UPDATE media_items SET deleted_at = NOW(), updated_at = NOW() WHERE id = $1 AND deleted_at IS NULL",
&[&id.0],
)
.await
.map_err(|e| PinakesError::Database(e.to_string()))?;
if rows_affected == 0 {
return Err(PinakesError::NotFound(format!(
"Media item {} not found or already deleted",
id
)));
}
Ok(())
}
async fn restore_media(&self, id: MediaId) -> Result<()> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
let rows_affected = client
.execute(
"UPDATE media_items SET deleted_at = NULL, updated_at = NOW() WHERE id = $1 AND deleted_at IS NOT NULL",
&[&id.0],
)
.await
.map_err(|e| PinakesError::Database(e.to_string()))?;
if rows_affected == 0 {
return Err(PinakesError::NotFound(format!(
"Media item {} not found in trash",
id
)));
}
Ok(())
}
async fn list_trash(&self, pagination: &Pagination) -> Result<Vec<MediaItem>> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
let rows = client
.query(
"SELECT id, path, file_name, media_type, content_hash, file_size,
title, artist, album, genre, year, duration_secs, description,
thumbnail_path, created_at, updated_at, file_mtime,
date_taken, latitude, longitude, camera_make, camera_model, rating,
storage_mode, original_filename, uploaded_at, storage_key,
perceptual_hash, deleted_at
FROM media_items
WHERE deleted_at IS NOT NULL
ORDER BY deleted_at DESC
LIMIT $1 OFFSET $2",
&[&(pagination.limit as i64), &(pagination.offset as i64)],
)
.await
.map_err(|e| PinakesError::Database(e.to_string()))?;
let mut items = Vec::new();
for row in rows {
items.push(row_to_media_item(&row)?);
}
Ok(items)
}
async fn empty_trash(&self) -> Result<u64> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
// First, get the IDs to clean up related data
let id_rows = client
.query(
"SELECT id FROM media_items WHERE deleted_at IS NOT NULL",
&[],
)
.await
.map_err(|e| PinakesError::Database(e.to_string()))?;
// Delete related data
for row in &id_rows {
let id: uuid::Uuid = row.get(0);
client
.execute("DELETE FROM media_tags WHERE media_id = $1", &[&id])
.await
.ok();
client
.execute("DELETE FROM collection_items WHERE media_id = $1", &[&id])
.await
.ok();
client
.execute("DELETE FROM custom_fields WHERE media_id = $1", &[&id])
.await
.ok();
}
// Delete the media items
let count = client
.execute("DELETE FROM media_items WHERE deleted_at IS NOT NULL", &[])
.await
.map_err(|e| PinakesError::Database(e.to_string()))?;
Ok(count)
}
async fn purge_old_trash(&self, before: chrono::DateTime<chrono::Utc>) -> Result<u64> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
// First, get the IDs to clean up related data
let id_rows = client
.query(
"SELECT id FROM media_items WHERE deleted_at IS NOT NULL AND deleted_at < $1",
&[&before],
)
.await
.map_err(|e| PinakesError::Database(e.to_string()))?;
// Delete related data
for row in &id_rows {
let id: uuid::Uuid = row.get(0);
client
.execute("DELETE FROM media_tags WHERE media_id = $1", &[&id])
.await
.ok();
client
.execute("DELETE FROM collection_items WHERE media_id = $1", &[&id])
.await
.ok();
client
.execute("DELETE FROM custom_fields WHERE media_id = $1", &[&id])
.await
.ok();
}
// Delete the media items
let count = client
.execute(
"DELETE FROM media_items WHERE deleted_at IS NOT NULL AND deleted_at < $1",
&[&before],
)
.await
.map_err(|e| PinakesError::Database(e.to_string()))?;
Ok(count)
}
async fn count_trash(&self) -> Result<u64> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
let row = client
.query_one(
"SELECT COUNT(*) FROM media_items WHERE deleted_at IS NOT NULL",
&[],
)
.await
.map_err(|e| PinakesError::Database(e.to_string()))?;
let count: i64 = row.get(0);
Ok(count as u64)
}
// ===== Markdown Links (Obsidian-style) =====
async fn save_markdown_links(
&self,
media_id: MediaId,
links: &[crate::model::MarkdownLink],
) -> Result<()> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
let media_id_str = media_id.0.to_string();
// Delete existing links for this source
client
.execute(
"DELETE FROM markdown_links WHERE source_media_id = $1",
&[&media_id_str],
)
.await
.map_err(|e| PinakesError::Database(e.to_string()))?;
// Insert new links
for link in links {
let target_media_id = link.target_media_id.map(|id| id.0.to_string());
client
.execute(
"INSERT INTO markdown_links (
id, source_media_id, target_path, target_media_id,
link_type, link_text, line_number, context, created_at
) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9)",
&[
&link.id.to_string(),
&media_id_str,
&link.target_path,
&target_media_id,
&link.link_type.to_string(),
&link.link_text,
&link.line_number,
&link.context,
&link.created_at,
],
)
.await
.map_err(|e| PinakesError::Database(e.to_string()))?;
}
Ok(())
}
async fn get_outgoing_links(
&self,
media_id: MediaId,
) -> Result<Vec<crate::model::MarkdownLink>> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
let media_id_str = media_id.0.to_string();
let rows = client
.query(
"SELECT id, source_media_id, target_path, target_media_id,
link_type, link_text, line_number, context, created_at
FROM markdown_links
WHERE source_media_id = $1
ORDER BY line_number",
&[&media_id_str],
)
.await
.map_err(|e| PinakesError::Database(e.to_string()))?;
let mut links = Vec::new();
for row in rows {
links.push(row_to_markdown_link(&row)?);
}
Ok(links)
}
async fn get_backlinks(&self, media_id: MediaId) -> Result<Vec<crate::model::BacklinkInfo>> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
let media_id_str = media_id.0.to_string();
let rows = client
.query(
"SELECT l.id, l.source_media_id, m.title, m.path,
l.link_text, l.line_number, l.context, l.link_type
FROM markdown_links l
JOIN media_items m ON l.source_media_id = m.id
WHERE l.target_media_id = $1
ORDER BY m.title, l.line_number",
&[&media_id_str],
)
.await
.map_err(|e| PinakesError::Database(e.to_string()))?;
let mut backlinks = Vec::new();
for row in rows {
let link_id_str: String = row.get(0);
let source_id_str: String = row.get(1);
let source_title: Option<String> = row.get(2);
let source_path: String = row.get(3);
let link_text: Option<String> = row.get(4);
let line_number: Option<i32> = row.get(5);
let context: Option<String> = row.get(6);
let link_type_str: String = row.get(7);
backlinks.push(crate::model::BacklinkInfo {
link_id: Uuid::parse_str(&link_id_str)
.map_err(|e| PinakesError::Database(e.to_string()))?,
source_id: MediaId(
Uuid::parse_str(&source_id_str)
.map_err(|e| PinakesError::Database(e.to_string()))?,
),
source_title,
source_path,
link_text,
line_number,
context,
link_type: link_type_str
.parse()
.unwrap_or(crate::model::LinkType::Wikilink),
});
}
Ok(backlinks)
}
async fn clear_links_for_media(&self, media_id: MediaId) -> Result<()> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
let media_id_str = media_id.0.to_string();
client
.execute(
"DELETE FROM markdown_links WHERE source_media_id = $1",
&[&media_id_str],
)
.await
.map_err(|e| PinakesError::Database(e.to_string()))?;
Ok(())
}
async fn get_graph_data(
&self,
center_id: Option<MediaId>,
depth: u32,
) -> Result<crate::model::GraphData> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
let depth = depth.min(5); // Limit depth
let mut nodes = Vec::new();
let mut edges = Vec::new();
let mut node_ids: std::collections::HashSet<String> = std::collections::HashSet::new();
if let Some(center) = center_id {
// BFS to find connected nodes within depth
let mut frontier = vec![center.0.to_string()];
let mut visited = std::collections::HashSet::new();
visited.insert(center.0.to_string());
for _ in 0..depth {
if frontier.is_empty() {
break;
}
let mut next_frontier = Vec::new();
for node_id in &frontier {
// Get outgoing links
let rows = client
.query(
"SELECT target_media_id FROM markdown_links
WHERE source_media_id = $1 AND target_media_id IS NOT NULL",
&[node_id],
)
.await
.map_err(|e| PinakesError::Database(e.to_string()))?;
for row in rows {
let id: String = row.get(0);
if !visited.contains(&id) {
visited.insert(id.clone());
next_frontier.push(id);
}
}
// Get incoming links
let rows = client
.query(
"SELECT source_media_id FROM markdown_links
WHERE target_media_id = $1",
&[node_id],
)
.await
.map_err(|e| PinakesError::Database(e.to_string()))?;
for row in rows {
let id: String = row.get(0);
if !visited.contains(&id) {
visited.insert(id.clone());
next_frontier.push(id);
}
}
}
frontier = next_frontier;
}
node_ids = visited;
} else {
// Get all markdown files with links (limit to 500)
let rows = client
.query(
"SELECT DISTINCT id FROM media_items
WHERE media_type = 'markdown' AND deleted_at IS NULL
LIMIT 500",
&[],
)
.await
.map_err(|e| PinakesError::Database(e.to_string()))?;
for row in rows {
let id: String = row.get(0);
node_ids.insert(id);
}
}
// Build nodes with metadata
for node_id in &node_ids {
let row = client
.query_opt(
"SELECT id, COALESCE(title, file_name) as label, title, media_type
FROM media_items WHERE id = $1",
&[node_id],
)
.await
.map_err(|e| PinakesError::Database(e.to_string()))?;
if let Some(row) = row {
let id: String = row.get(0);
let label: String = row.get(1);
let title: Option<String> = row.get(2);
let media_type: String = row.get(3);
// Count outgoing links
let link_count_row = client
.query_one(
"SELECT COUNT(*) FROM markdown_links WHERE source_media_id = $1",
&[&id],
)
.await
.map_err(|e| PinakesError::Database(e.to_string()))?;
let link_count: i64 = link_count_row.get(0);
// Count incoming links
let backlink_count_row = client
.query_one(
"SELECT COUNT(*) FROM markdown_links WHERE target_media_id = $1",
&[&id],
)
.await
.map_err(|e| PinakesError::Database(e.to_string()))?;
let backlink_count: i64 = backlink_count_row.get(0);
nodes.push(crate::model::GraphNode {
id: id.clone(),
label,
title,
media_type,
link_count: link_count as u32,
backlink_count: backlink_count as u32,
});
}
}
// Build edges
for node_id in &node_ids {
let rows = client
.query(
"SELECT source_media_id, target_media_id, link_type
FROM markdown_links
WHERE source_media_id = $1 AND target_media_id IS NOT NULL",
&[node_id],
)
.await
.map_err(|e| PinakesError::Database(e.to_string()))?;
for row in rows {
let source: String = row.get(0);
let target: String = row.get(1);
let link_type_str: String = row.get(2);
if node_ids.contains(&target) {
edges.push(crate::model::GraphEdge {
source,
target,
link_type: link_type_str
.parse()
.unwrap_or(crate::model::LinkType::Wikilink),
});
}
}
}
Ok(crate::model::GraphData { nodes, edges })
}
async fn resolve_links(&self) -> Result<u64> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
// Strategy 1: Exact path match
let result1 = client
.execute(
"UPDATE markdown_links
SET target_media_id = (
SELECT id FROM media_items
WHERE path = markdown_links.target_path
AND deleted_at IS NULL
LIMIT 1
)
WHERE target_media_id IS NULL
AND EXISTS (
SELECT 1 FROM media_items
WHERE path = markdown_links.target_path
AND deleted_at IS NULL
)",
&[],
)
.await
.map_err(|e| PinakesError::Database(e.to_string()))?;
// Strategy 2: Filename match
let result2 = client
.execute(
"UPDATE markdown_links
SET target_media_id = (
SELECT id FROM media_items
WHERE (file_name = markdown_links.target_path
OR file_name = markdown_links.target_path || '.md'
OR REPLACE(file_name, '.md', '') = markdown_links.target_path)
AND deleted_at IS NULL
LIMIT 1
)
WHERE target_media_id IS NULL
AND EXISTS (
SELECT 1 FROM media_items
WHERE (file_name = markdown_links.target_path
OR file_name = markdown_links.target_path || '.md'
OR REPLACE(file_name, '.md', '') = markdown_links.target_path)
AND deleted_at IS NULL
)",
&[],
)
.await
.map_err(|e| PinakesError::Database(e.to_string()))?;
Ok(result1 + result2)
}
async fn mark_links_extracted(&self, media_id: MediaId) -> Result<()> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
let media_id_str = media_id.0.to_string();
let now = chrono::Utc::now();
client
.execute(
"UPDATE media_items SET links_extracted_at = $1 WHERE id = $2",
&[&now, &media_id_str],
)
.await
.map_err(|e| PinakesError::Database(e.to_string()))?;
Ok(())
}
async fn count_unresolved_links(&self) -> Result<u64> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
let row = client
.query_one(
"SELECT COUNT(*) FROM markdown_links WHERE target_media_id IS NULL",
&[],
)
.await
.map_err(|e| PinakesError::Database(e.to_string()))?;
let count: i64 = row.get(0);
Ok(count as u64)
}
}
impl PostgresBackend {
async fn load_user_profile(&self, user_id: uuid::Uuid) -> Result<crate::users::UserProfile> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
let row = client
.query_opt(
"SELECT avatar_path, bio, preferences_json FROM user_profiles WHERE user_id = $1",
&[&user_id],
)
.await?;
match row {
Some(row) => {
let prefs_json: serde_json::Value = row.get::<_, serde_json::Value>(2);
let preferences: crate::users::UserPreferences =
serde_json::from_value(prefs_json).unwrap_or_default();
Ok(crate::users::UserProfile {
avatar_path: row.get(0),
bio: row.get(1),
preferences,
})
}
None => Ok(crate::users::UserProfile {
avatar_path: None,
bio: None,
preferences: Default::default(),
}),
}
}
async fn library_statistics_inner(&self) -> Result<super::LibraryStatistics> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
let row = client
.query_one(
"SELECT COUNT(*), COALESCE(SUM(file_size), 0) FROM media_items",
&[],
)
.await?;
let total_media: i64 = row.get(0);
let total_size: i64 = row.get(1);
let avg_size = if total_media > 0 {
total_size / total_media
} else {
0
};
let rows = client.query("SELECT media_type, COUNT(*) FROM media_items GROUP BY media_type ORDER BY COUNT(*) DESC", &[]).await?;
let media_by_type: Vec<(String, u64)> = rows
.iter()
.map(|r| {
let mt: String = r.get(0);
let cnt: i64 = r.get(1);
(mt, cnt as u64)
})
.collect();
let rows = client.query("SELECT media_type, COALESCE(SUM(file_size), 0) FROM media_items GROUP BY media_type ORDER BY SUM(file_size) DESC", &[]).await?;
let storage_by_type: Vec<(String, u64)> = rows
.iter()
.map(|r| {
let mt: String = r.get(0);
let sz: i64 = r.get(1);
(mt, sz as u64)
})
.collect();
let newest: Option<String> = client
.query_opt(
"SELECT created_at::text FROM media_items ORDER BY created_at DESC LIMIT 1",
&[],
)
.await?
.map(|r| r.get(0));
let oldest: Option<String> = client
.query_opt(
"SELECT created_at::text FROM media_items ORDER BY created_at ASC LIMIT 1",
&[],
)
.await?
.map(|r| r.get(0));
let rows = client.query(
"SELECT t.name, COUNT(*) as cnt FROM media_tags mt JOIN tags t ON mt.tag_id = t.id GROUP BY t.id, t.name ORDER BY cnt DESC LIMIT 10",
&[],
).await?;
let top_tags: Vec<(String, u64)> = rows
.iter()
.map(|r| {
let name: String = r.get(0);
let cnt: i64 = r.get(1);
(name, cnt as u64)
})
.collect();
let rows = client.query(
"SELECT c.name, COUNT(*) as cnt FROM collection_members cm JOIN collections c ON cm.collection_id = c.id GROUP BY c.id, c.name ORDER BY cnt DESC LIMIT 10",
&[],
).await?;
let top_collections: Vec<(String, u64)> = rows
.iter()
.map(|r| {
let name: String = r.get(0);
let cnt: i64 = r.get(1);
(name, cnt as u64)
})
.collect();
let total_tags: i64 = client
.query_one("SELECT COUNT(*) FROM tags", &[])
.await?
.get(0);
let total_collections: i64 = client
.query_one("SELECT COUNT(*) FROM collections", &[])
.await?
.get(0);
let total_duplicates: i64 = client.query_one(
"SELECT COUNT(*) FROM (SELECT content_hash FROM media_items GROUP BY content_hash HAVING COUNT(*) > 1) sub",
&[],
).await?.get(0);
Ok(super::LibraryStatistics {
total_media: total_media as u64,
total_size_bytes: total_size as u64,
avg_file_size_bytes: avg_size as u64,
media_by_type,
storage_by_type,
newest_item: newest,
oldest_item: oldest,
top_tags,
top_collections,
total_tags: total_tags as u64,
total_collections: total_collections as u64,
total_duplicates: total_duplicates as u64,
})
}
}
/// Helper function to parse a share row from PostgreSQL
fn pg_row_to_share(row: &Row) -> Result<crate::sharing::Share> {
let id: Uuid = row.get(0);
let target_type: String = row.get(1);
let target_id: Uuid = row.get(2);
let owner_id: Uuid = row.get(3);
let recipient_type: String = row.get(4);
let recipient_user_id: Option<Uuid> = row.get(5);
let public_token: Option<String> = row.get(6);
let password_hash: Option<String> = row.get(7);
let target = match target_type.as_str() {
"media" => crate::sharing::ShareTarget::Media {
media_id: MediaId(target_id),
},
"collection" => crate::sharing::ShareTarget::Collection {
collection_id: target_id,
},
"tag" => crate::sharing::ShareTarget::Tag { tag_id: target_id },
"saved_search" => crate::sharing::ShareTarget::SavedSearch {
search_id: target_id,
},
_ => crate::sharing::ShareTarget::Media {
media_id: MediaId(target_id),
},
};
let recipient = match recipient_type.as_str() {
"public_link" => crate::sharing::ShareRecipient::PublicLink {
token: public_token.unwrap_or_default(),
password_hash,
},
"user" => crate::sharing::ShareRecipient::User {
user_id: crate::users::UserId(recipient_user_id.unwrap_or(Uuid::nil())),
},
"group" => crate::sharing::ShareRecipient::Group {
group_id: Uuid::nil(),
},
_ => crate::sharing::ShareRecipient::PublicLink {
token: public_token.unwrap_or_default(),
password_hash,
},
};
Ok(crate::sharing::Share {
id: crate::sharing::ShareId(id),
target,
owner_id: crate::users::UserId(owner_id),
recipient,
permissions: crate::sharing::SharePermissions {
can_view: row.get(8),
can_download: row.get(9),
can_edit: row.get(10),
can_delete: row.get(11),
can_reshare: row.get(12),
can_add: row.get(13),
},
note: row.get(14),
expires_at: row.get(15),
access_count: row.get::<_, i64>(16) as u64,
last_accessed: row.get(17),
inherit_to_children: row.get(18),
parent_share_id: row.get::<_, Option<Uuid>>(19).map(crate::sharing::ShareId),
created_at: row.get(20),
updated_at: row.get(21),
})
}
/// Check if a SearchQuery tree contains any FullText or Prefix node (i.e. uses the FTS index).
fn query_has_fts(query: &SearchQuery) -> bool {
match query {
SearchQuery::FullText(t) => !t.is_empty(),
SearchQuery::Prefix(_) => true,
SearchQuery::Fuzzy(_) => false,
SearchQuery::FieldMatch { .. } => false,
SearchQuery::TypeFilter(_) => false,
SearchQuery::TagFilter(_) => false,
SearchQuery::RangeQuery { .. } => false,
SearchQuery::CompareQuery { .. } => false,
SearchQuery::DateQuery { .. } => false,
SearchQuery::And(children) | SearchQuery::Or(children) => {
children.iter().any(query_has_fts)
}
SearchQuery::Not(inner) => query_has_fts(inner),
}
}
/// Find the 1-based parameter index of the first FullText query parameter.
/// Used to pass the same text to ts_rank for relevance sorting.
/// Falls back to 1 if not found (should not happen when has_fts is true).
fn find_first_fts_param(query: &SearchQuery) -> i32 {
fn find_inner(query: &SearchQuery, offset: &mut i32) -> Option<i32> {
match query {
SearchQuery::FullText(t) => {
if t.is_empty() {
None
} else {
let idx = *offset;
*offset += 7; // FullText now uses 7 params (fts, prefix, ilike, sim_title, sim_artist, sim_album, sim_filename)
Some(idx)
}
}
SearchQuery::Prefix(_) => {
let idx = *offset;
*offset += 1;
Some(idx)
}
SearchQuery::Fuzzy(_) => {
*offset += 5; // Fuzzy now uses 5 params (sim_title, sim_artist, sim_album, sim_filename, ilike)
None
}
SearchQuery::FieldMatch { .. } => {
*offset += 1;
None
}
SearchQuery::TypeFilter(_) | SearchQuery::TagFilter(_) => None,
SearchQuery::RangeQuery { start, end, .. } => {
// Range queries use 0-2 params depending on bounds
if start.is_some() {
*offset += 1;
}
if end.is_some() {
*offset += 1;
}
None
}
SearchQuery::CompareQuery { .. } => {
*offset += 1;
None
}
SearchQuery::DateQuery { .. } => None, // No params, uses inline SQL
SearchQuery::And(children) | SearchQuery::Or(children) => {
for child in children {
if let Some(idx) = find_inner(child, offset) {
return Some(idx);
}
}
None
}
SearchQuery::Not(inner) => find_inner(inner, offset),
}
}
let mut offset = 1;
find_inner(query, &mut offset).unwrap_or(1)
}
// Helper function to parse a markdown link row
fn row_to_markdown_link(row: &Row) -> Result<crate::model::MarkdownLink> {
let id_str: String = row.get(0);
let source_id_str: String = row.get(1);
let target_path: String = row.get(2);
let target_id: Option<String> = row.get(3);
let link_type_str: String = row.get(4);
let link_text: Option<String> = row.get(5);
let line_number: Option<i32> = row.get(6);
let context: Option<String> = row.get(7);
let created_at: chrono::DateTime<Utc> = row.get(8);
Ok(crate::model::MarkdownLink {
id: Uuid::parse_str(&id_str).map_err(|e| PinakesError::Database(e.to_string()))?,
source_media_id: MediaId(
Uuid::parse_str(&source_id_str).map_err(|e| PinakesError::Database(e.to_string()))?,
),
target_path,
target_media_id: target_id
.and_then(|s| Uuid::parse_str(&s).ok())
.map(MediaId),
link_type: link_type_str
.parse()
.unwrap_or(crate::model::LinkType::Wikilink),
link_text,
line_number,
context,
created_at,
})
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_media_type_roundtrip() {
use crate::media_type::BuiltinMediaType;
let mt = MediaType::Builtin(BuiltinMediaType::Mp3);
let s = media_type_to_string(&mt);
assert_eq!(s, "mp3");
let parsed = media_type_from_string(&s).unwrap();
assert_eq!(parsed, mt);
}
#[test]
fn test_audit_action_roundtrip() {
let action = AuditAction::AddedToCollection;
let s = audit_action_to_string(&action);
assert_eq!(s, "added_to_collection");
let parsed = audit_action_from_string(&s).unwrap();
assert_eq!(parsed, action);
}
#[test]
fn test_collection_kind_roundtrip() {
let kind = CollectionKind::Virtual;
let s = collection_kind_to_string(&kind);
assert_eq!(s, "virtual");
let parsed = collection_kind_from_string(&s).unwrap();
assert_eq!(parsed, kind);
}
#[test]
fn test_custom_field_type_roundtrip() {
let ft = CustomFieldType::Boolean;
let s = custom_field_type_to_string(&ft);
assert_eq!(s, "boolean");
let parsed = custom_field_type_from_string(&s).unwrap();
assert_eq!(parsed, ft);
}
#[test]
fn test_build_search_fulltext() {
let query = SearchQuery::FullText("hello world".into());
let mut offset = 1;
let mut params: Vec<Box<dyn ToSql + Sync + Send>> = Vec::new();
let (clause, types, tags) = build_search_clause(&query, &mut offset, &mut params).unwrap();
// Fuzzy search combines FTS, prefix, ILIKE, and trigram similarity
assert!(clause.contains("plainto_tsquery"));
assert!(clause.contains("to_tsquery"));
assert!(clause.contains("LIKE"));
assert!(clause.contains("similarity"));
assert!(types.is_empty());
assert!(tags.is_empty());
// FullText now uses 7 parameters
assert_eq!(offset, 8);
}
#[test]
fn test_build_search_and() {
let query = SearchQuery::And(vec![
SearchQuery::FullText("foo".into()),
SearchQuery::TypeFilter("pdf".into()),
]);
let mut offset = 1;
let mut params: Vec<Box<dyn ToSql + Sync + Send>> = Vec::new();
let (clause, types, _tags) = build_search_clause(&query, &mut offset, &mut params).unwrap();
assert!(clause.contains("AND"));
assert_eq!(types, vec!["pdf"]);
}
#[test]
fn test_query_has_fts() {
assert!(query_has_fts(&SearchQuery::FullText("test".into())));
assert!(!query_has_fts(&SearchQuery::FullText(String::new())));
assert!(query_has_fts(&SearchQuery::Prefix("te".into())));
assert!(!query_has_fts(&SearchQuery::Fuzzy("test".into())));
assert!(query_has_fts(&SearchQuery::And(vec![
SearchQuery::Fuzzy("x".into()),
SearchQuery::FullText("y".into()),
])));
}
#[test]
fn test_sort_order_clause() {
assert_eq!(sort_order_clause(&SortOrder::DateAsc), "created_at ASC");
assert_eq!(sort_order_clause(&SortOrder::NameDesc), "file_name DESC");
assert_eq!(sort_order_clause(&SortOrder::SizeAsc), "file_size ASC");
}
}