pinakes/crates/pinakes-core/src/storage/sqlite.rs
NotAShelf aa68d742c9
pinakes-core: fix minor clippy warnings; add toggle for Swagger UI generation
Signed-off-by: NotAShelf <raf@notashelf.dev>
Change-Id: Ie33a5d17b774289023e3855789d3adc86a6a6964
2026-03-22 22:04:49 +03:00

8736 lines
286 KiB
Rust

use std::{
path::{Path, PathBuf},
sync::{Arc, Mutex},
};
use chrono::{DateTime, NaiveDateTime, Utc};
use rusqlite::{Connection, Row, params};
use rustc_hash::FxHashMap;
use uuid::Uuid;
use crate::{
error::{PinakesError, Result},
media_type::MediaType,
model::{
AuditAction,
AuditEntry,
Collection,
CollectionKind,
ContentHash,
CustomField,
CustomFieldType,
ManagedBlob,
ManagedStorageStats,
MediaId,
MediaItem,
Pagination,
Tag,
},
search::{SearchQuery, SearchRequest, SearchResults, SortOrder},
storage::StorageBackend,
};
/// Parse a UUID string from the database, returning a proper error on
/// corruption.
fn parse_uuid(s: &str) -> rusqlite::Result<Uuid> {
Uuid::parse_str(s).map_err(|e| {
rusqlite::Error::FromSqlConversionFailure(
0,
rusqlite::types::Type::Text,
Box::new(e),
)
})
}
/// `SQLite` storage backend using WAL mode for concurrent reads.
///
/// All async trait methods delegate to `tokio::task::spawn_blocking` because
/// `rusqlite::Connection` is synchronous. The connection is wrapped in an
/// `Arc<Mutex<_>>` so it can be shared across tasks safely.
pub struct SqliteBackend {
conn: Arc<Mutex<Connection>>,
}
impl SqliteBackend {
/// Open (or create) a database at the given file path.
///
/// # Errors
///
/// Returns an error if the database cannot be opened or configured.
pub fn new(path: &Path) -> Result<Self> {
let conn = Connection::open(path)?;
Self::configure(conn)
}
/// Create an in-memory database -- useful for tests.
///
/// # Errors
///
/// Returns an error if the in-memory database cannot be created or
/// configured.
pub fn in_memory() -> Result<Self> {
let conn = Connection::open_in_memory()?;
Self::configure(conn)
}
fn configure(conn: Connection) -> Result<Self> {
conn
.execute_batch("PRAGMA journal_mode = WAL; PRAGMA foreign_keys = ON;")?;
Ok(Self {
conn: Arc::new(Mutex::new(conn)),
})
}
}
fn parse_datetime(s: &str) -> DateTime<Utc> {
// Try RFC 3339 first (includes timezone), then fall back to a naive format.
if let Ok(dt) = DateTime::parse_from_rfc3339(s) {
return dt.with_timezone(&Utc);
}
if let Ok(naive) = NaiveDateTime::parse_from_str(s, "%Y-%m-%dT%H:%M:%S%.f") {
return naive.and_utc();
}
if let Ok(naive) = NaiveDateTime::parse_from_str(s, "%Y-%m-%d %H:%M:%S") {
return naive.and_utc();
}
// Last resort -- epoch
tracing::warn!(value = %s, "failed to parse datetime, falling back to epoch");
DateTime::default()
}
fn parse_media_type(s: &str) -> MediaType {
use crate::media_type::BuiltinMediaType;
// MediaType derives Serialize/Deserialize with serde rename_all =
// "lowercase", so a JSON round-trip uses e.g. `"mp3"`. We store the bare
// lowercase string in the database, so we must wrap it in quotes for
// serde_json.
let quoted = format!("\"{s}\"");
serde_json::from_str(&quoted)
.unwrap_or(MediaType::Builtin(BuiltinMediaType::PlainText))
}
fn media_type_to_str(mt: &MediaType) -> String {
// Produces e.g. `"mp3"`, strip the surrounding quotes.
let s =
serde_json::to_string(mt).unwrap_or_else(|_| "\"plaintext\"".to_string());
s.trim_matches('"').to_string()
}
fn row_to_media_item(row: &Row) -> rusqlite::Result<MediaItem> {
let id_str: String = row.get("id")?;
let path_str: String = row.get("path")?;
let media_type_str: String = row.get("media_type")?;
let hash_str: String = row.get("content_hash")?;
let created_str: String = row.get("created_at")?;
let updated_str: String = row.get("updated_at")?;
Ok(MediaItem {
id: MediaId(parse_uuid(&id_str)?),
path: PathBuf::from(path_str),
file_name: row.get("file_name")?,
media_type: parse_media_type(&media_type_str),
content_hash: ContentHash(hash_str),
file_size: row.get::<_, i64>("file_size")?.cast_unsigned(),
title: row.get("title")?,
artist: row.get("artist")?,
album: row.get("album")?,
genre: row.get("genre")?,
year: row.get("year")?,
duration_secs: row.get("duration_secs")?,
description: row.get("description")?,
thumbnail_path: row
.get::<_, Option<String>>("thumbnail_path")?
.map(PathBuf::from),
custom_fields: FxHashMap::default(), // loaded separately
// file_mtime may not be present in all queries, so handle gracefully
file_mtime: row.get::<_, Option<i64>>("file_mtime").unwrap_or(None),
// Photo-specific fields (may not be present in all queries)
date_taken: row
.get::<_, Option<String>>("date_taken")
.ok()
.flatten()
.and_then(|s| DateTime::parse_from_rfc3339(&s).ok())
.map(|dt| dt.with_timezone(&Utc)),
latitude: row.get::<_, Option<f64>>("latitude").ok().flatten(),
longitude: row.get::<_, Option<f64>>("longitude").ok().flatten(),
camera_make: row.get::<_, Option<String>>("camera_make").ok().flatten(),
camera_model: row
.get::<_, Option<String>>("camera_model")
.ok()
.flatten(),
rating: row.get::<_, Option<i32>>("rating").ok().flatten(),
perceptual_hash: row
.get::<_, Option<String>>("perceptual_hash")
.ok()
.flatten(),
// Managed storage fields (may not be present in all queries)
storage_mode: row
.get::<_, Option<String>>("storage_mode")
.ok()
.flatten()
.and_then(|s| s.parse().ok())
.unwrap_or_default(),
original_filename: row
.get::<_, Option<String>>("original_filename")
.ok()
.flatten(),
uploaded_at: row
.get::<_, Option<String>>("uploaded_at")
.ok()
.flatten()
.and_then(|s| DateTime::parse_from_rfc3339(&s).ok())
.map(|dt| dt.with_timezone(&Utc)),
storage_key: row
.get::<_, Option<String>>("storage_key")
.ok()
.flatten(),
created_at: parse_datetime(&created_str),
updated_at: parse_datetime(&updated_str),
// Trash support
deleted_at: row
.get::<_, Option<String>>("deleted_at")
.ok()
.flatten()
.and_then(|s| DateTime::parse_from_rfc3339(&s).ok())
.map(|dt| dt.with_timezone(&Utc)),
// Markdown links extraction timestamp
links_extracted_at: row
.get::<_, Option<String>>("links_extracted_at")
.ok()
.flatten()
.and_then(|s| DateTime::parse_from_rfc3339(&s).ok())
.map(|dt| dt.with_timezone(&Utc)),
})
}
fn row_to_tag(row: &Row) -> rusqlite::Result<Tag> {
let id_str: String = row.get("id")?;
let parent_str: Option<String> = row.get("parent_id")?;
let created_str: String = row.get("created_at")?;
Ok(Tag {
id: parse_uuid(&id_str)?,
name: row.get("name")?,
parent_id: parent_str.and_then(|s| Uuid::parse_str(&s).ok()),
created_at: parse_datetime(&created_str),
})
}
fn row_to_collection(row: &Row) -> rusqlite::Result<Collection> {
let id_str: String = row.get("id")?;
let kind_str: String = row.get("kind")?;
let created_str: String = row.get("created_at")?;
let updated_str: String = row.get("updated_at")?;
let kind = match kind_str.as_str() {
"virtual" => CollectionKind::Virtual,
_ => CollectionKind::Manual,
};
Ok(Collection {
id: parse_uuid(&id_str)?,
name: row.get("name")?,
description: row.get("description")?,
kind,
filter_query: row.get("filter_query")?,
created_at: parse_datetime(&created_str),
updated_at: parse_datetime(&updated_str),
})
}
fn row_to_audit_entry(row: &Row) -> rusqlite::Result<AuditEntry> {
let id_str: String = row.get("id")?;
let media_id_str: Option<String> = row.get("media_id")?;
let action_str: String = row.get("action")?;
let ts_str: String = row.get("timestamp")?;
let action = match action_str.as_str() {
"imported" => AuditAction::Imported,
"updated" => AuditAction::Updated,
"deleted" => AuditAction::Deleted,
"tagged" => AuditAction::Tagged,
"untagged" => AuditAction::Untagged,
"added_to_collection" => AuditAction::AddedToCollection,
"removed_from_collection" => AuditAction::RemovedFromCollection,
"opened" => AuditAction::Opened,
"scanned" => AuditAction::Scanned,
"login_success" => AuditAction::LoginSuccess,
"login_failed" => AuditAction::LoginFailed,
"logout" => AuditAction::Logout,
"session_expired" => AuditAction::SessionExpired,
"permission_denied" => AuditAction::PermissionDenied,
"role_changed" => AuditAction::RoleChanged,
"library_access_granted" => AuditAction::LibraryAccessGranted,
"library_access_revoked" => AuditAction::LibraryAccessRevoked,
"user_created" => AuditAction::UserCreated,
"user_updated" => AuditAction::UserUpdated,
"user_deleted" => AuditAction::UserDeleted,
"plugin_installed" => AuditAction::PluginInstalled,
"plugin_uninstalled" => AuditAction::PluginUninstalled,
"plugin_enabled" => AuditAction::PluginEnabled,
"plugin_disabled" => AuditAction::PluginDisabled,
"config_changed" => AuditAction::ConfigChanged,
"root_directory_added" => AuditAction::RootDirectoryAdded,
"root_directory_removed" => AuditAction::RootDirectoryRemoved,
"share_link_created" => AuditAction::ShareLinkCreated,
"share_link_accessed" => AuditAction::ShareLinkAccessed,
"database_vacuumed" => AuditAction::DatabaseVacuumed,
"database_cleared" => AuditAction::DatabaseCleared,
"export_completed" => AuditAction::ExportCompleted,
"integrity_check_completed" => AuditAction::IntegrityCheckCompleted,
_ => AuditAction::Updated,
};
Ok(AuditEntry {
id: parse_uuid(&id_str)?,
media_id: media_id_str.and_then(|s| Uuid::parse_str(&s).ok().map(MediaId)),
action,
details: row.get("details")?,
timestamp: parse_datetime(&ts_str),
})
}
const fn collection_kind_to_str(kind: CollectionKind) -> &'static str {
match kind {
CollectionKind::Manual => "manual",
CollectionKind::Virtual => "virtual",
}
}
const fn custom_field_type_to_str(ft: CustomFieldType) -> &'static str {
match ft {
CustomFieldType::Text => "text",
CustomFieldType::Number => "number",
CustomFieldType::Date => "date",
CustomFieldType::Boolean => "boolean",
}
}
fn str_to_custom_field_type(s: &str) -> CustomFieldType {
match s {
"number" => CustomFieldType::Number,
"date" => CustomFieldType::Date,
"boolean" => CustomFieldType::Boolean,
_ => CustomFieldType::Text,
}
}
fn load_user_profile_sync(
db: &Connection,
user_id_str: &str,
) -> rusqlite::Result<crate::users::UserProfile> {
let result = db.query_row(
"SELECT avatar_path, bio, preferences_json FROM user_profiles WHERE \
user_id = ?",
[user_id_str],
|row| {
let avatar_path: Option<String> = row.get(0)?;
let bio: Option<String> = row.get(1)?;
let prefs_str: String = row.get(2)?;
let preferences: crate::users::UserPreferences =
serde_json::from_str(&prefs_str).unwrap_or_default();
Ok(crate::users::UserProfile {
avatar_path,
bio,
preferences,
})
},
);
match result {
Ok(profile) => Ok(profile),
Err(rusqlite::Error::QueryReturnedNoRows) => {
Ok(crate::users::UserProfile {
avatar_path: None,
bio: None,
preferences: crate::users::UserPreferences::default(),
})
},
Err(e) => Err(e),
}
}
fn load_custom_fields_sync(
db: &Connection,
media_id: MediaId,
) -> rusqlite::Result<FxHashMap<String, CustomField>> {
let mut stmt = db.prepare(
"SELECT field_name, field_type, field_value FROM custom_fields WHERE \
media_id = ?1",
)?;
let rows = stmt.query_map(params![media_id.0.to_string()], |row| {
let name: String = row.get(0)?;
let ft_str: String = row.get(1)?;
let value: String = row.get(2)?;
Ok((name, CustomField {
field_type: str_to_custom_field_type(&ft_str),
value,
}))
})?;
let mut map = FxHashMap::default();
for r in rows {
let (name, field) = r?;
map.insert(name, field);
}
Ok(map)
}
fn load_custom_fields_batch(
db: &Connection,
items: &mut [MediaItem],
) -> rusqlite::Result<()> {
if items.is_empty() {
return Ok(());
}
// Build a simple query for all IDs
let ids: Vec<String> = items.iter().map(|i| i.id.0.to_string()).collect();
let placeholders: Vec<String> =
(1..=ids.len()).map(|i| format!("?{i}")).collect();
let sql = format!(
"SELECT media_id, field_name, field_type, field_value FROM custom_fields \
WHERE media_id IN ({})",
placeholders.join(", ")
);
let mut stmt = db.prepare(&sql)?;
let params: Vec<&dyn rusqlite::types::ToSql> = ids
.iter()
.map(|s| s as &dyn rusqlite::types::ToSql)
.collect();
let rows = stmt.query_map(params.as_slice(), |row| {
let mid_str: String = row.get(0)?;
let name: String = row.get(1)?;
let ft_str: String = row.get(2)?;
let value: String = row.get(3)?;
Ok((mid_str, name, ft_str, value))
})?;
let mut fields_map: FxHashMap<String, FxHashMap<String, CustomField>> =
FxHashMap::default();
for r in rows {
let (mid_str, name, ft_str, value) = r?;
fields_map
.entry(mid_str)
.or_default()
.insert(name, CustomField {
field_type: str_to_custom_field_type(&ft_str),
value,
});
}
for item in items.iter_mut() {
if let Some(fields) = fields_map.remove(&item.id.0.to_string()) {
item.custom_fields = fields;
}
}
Ok(())
}
/// Translate a `SearchQuery` into components that can be assembled into SQL.
///
/// Returns `(fts_expr, like_terms, where_clauses, join_clauses, params)` where:
/// - `fts_expr` is an FTS5 MATCH expression (may be empty),
/// - `like_terms` are search terms for LIKE fallback matching,
/// - `where_clauses` are extra WHERE predicates (e.g. type filters),
/// - `join_clauses` are extra JOIN snippets (e.g. tag filters).
/// - `params` are bind parameter values corresponding to `?` placeholders in
/// `where_clauses` and `join_clauses`.
fn search_query_to_fts(
query: &SearchQuery,
) -> (String, Vec<String>, Vec<String>, Vec<String>, Vec<String>) {
let mut wheres = Vec::new();
let mut joins = Vec::new();
let mut params = Vec::new();
let mut like_terms = Vec::new();
let fts = build_fts_expr(
query,
&mut wheres,
&mut joins,
&mut params,
&mut like_terms,
);
(fts, like_terms, wheres, joins, params)
}
fn build_fts_expr(
query: &SearchQuery,
wheres: &mut Vec<String>,
joins: &mut Vec<String>,
params: &mut Vec<String>,
like_terms: &mut Vec<String>,
) -> String {
match query {
SearchQuery::FullText(text) => {
if text.is_empty() {
String::new()
} else {
// Collect term for LIKE fallback matching
like_terms.push(text.clone());
// Add implicit prefix matching for better partial matches
// This allows "mus" to match "music", "musician", etc.
let sanitized = sanitize_fts_token(text);
// If it's a single word, add prefix matching
if !sanitized.contains(' ') && !sanitized.contains('"') {
format!("{sanitized}*")
} else {
// For phrases, use as-is but also add NEAR for proximity
sanitized
}
}
},
SearchQuery::Prefix(prefix) => {
like_terms.push(prefix.clone());
format!("{}*", sanitize_fts_token(prefix))
},
SearchQuery::Fuzzy(term) => {
// FTS5 does not natively support fuzzy; use prefix match
// as a best-effort approximation.
like_terms.push(term.clone());
format!("{}*", sanitize_fts_token(term))
},
SearchQuery::FieldMatch { field, value } => {
// FTS5 column filter syntax: `column:term`
let safe_field = sanitize_fts_token(field);
let safe_value = sanitize_fts_token(value);
format!("{safe_field}:{safe_value}")
},
SearchQuery::Not(inner) => {
let inner_expr = build_fts_expr(inner, wheres, joins, params, like_terms);
if inner_expr.is_empty() {
String::new()
} else {
format!("NOT {inner_expr}")
}
},
SearchQuery::And(terms) => {
let parts: Vec<String> = terms
.iter()
.map(|t| build_fts_expr(t, wheres, joins, params, like_terms))
.filter(|s| !s.is_empty())
.collect();
parts.join(" ")
},
SearchQuery::Or(terms) => {
let parts: Vec<String> = terms
.iter()
.map(|t| build_fts_expr(t, wheres, joins, params, like_terms))
.filter(|s| !s.is_empty())
.collect();
if parts.len() <= 1 {
parts.into_iter().next().unwrap_or_default()
} else {
format!("({})", parts.join(" OR "))
}
},
SearchQuery::TypeFilter(type_name) => {
wheres.push("m.media_type = ?".to_string());
params.push(type_name.clone());
String::new()
},
SearchQuery::TagFilter(tag_name) => {
// Use a unique alias per tag join to allow multiple tag filters.
let alias_idx = joins.len();
let alias_mt = format!("mt{alias_idx}");
let alias_t = format!("t{alias_idx}");
joins.push(format!(
"JOIN media_tags {alias_mt} ON {alias_mt}.media_id = m.id JOIN tags \
{alias_t} ON {alias_t}.id = {alias_mt}.tag_id AND {alias_t}.name = ?",
));
params.push(tag_name.clone());
String::new()
},
SearchQuery::RangeQuery { field, start, end } => {
let col = match field.as_str() {
"year" => "m.year",
"size" | "file_size" => "m.file_size",
"duration" => "m.duration_secs",
_ => return String::new(), // Unknown field, ignore
};
match (start, end) {
(Some(s), Some(e)) => {
wheres.push(format!("{col} >= ? AND {col} <= ?"));
params.push(s.to_string());
params.push(e.to_string());
},
(Some(s), None) => {
wheres.push(format!("{col} >= ?"));
params.push(s.to_string());
},
(None, Some(e)) => {
wheres.push(format!("{col} <= ?"));
params.push(e.to_string());
},
(None, None) => {},
}
String::new()
},
SearchQuery::CompareQuery { field, op, value } => {
let col = match field.as_str() {
"year" => "m.year",
"size" | "file_size" => "m.file_size",
"duration" => "m.duration_secs",
_ => return String::new(), // Unknown field, ignore
};
let op_sql = match op {
crate::search::CompareOp::GreaterThan => ">",
crate::search::CompareOp::GreaterOrEqual => ">=",
crate::search::CompareOp::LessThan => "<",
crate::search::CompareOp::LessOrEqual => "<=",
};
wheres.push(format!("{col} {op_sql} ?"));
params.push(value.to_string());
String::new()
},
SearchQuery::DateQuery { field, value } => {
let col = match field.as_str() {
"created" => "m.created_at",
"modified" | "updated" => "m.updated_at",
_ => return String::new(),
};
let sql = date_value_to_sqlite_expr(col, value);
if !sql.is_empty() {
wheres.push(sql);
}
String::new()
},
}
}
/// Convert a `DateValue` to a `SQLite` datetime comparison expression
fn date_value_to_sqlite_expr(
col: &str,
value: &crate::search::DateValue,
) -> String {
use crate::search::DateValue;
match value {
DateValue::Today => format!("date({col}) = date('now')"),
DateValue::Yesterday => format!("date({col}) = date('now', '-1 day')"),
DateValue::ThisWeek => {
format!("{col} >= datetime('now', 'weekday 0', '-7 days')")
},
DateValue::LastWeek => {
format!(
"{col} >= datetime('now', 'weekday 0', '-14 days') AND {col} < \
datetime('now', 'weekday 0', '-7 days')"
)
},
DateValue::ThisMonth => {
format!("{col} >= datetime('now', 'start of month')")
},
DateValue::LastMonth => {
format!(
"{col} >= datetime('now', 'start of month', '-1 month') AND {col} < \
datetime('now', 'start of month')"
)
},
DateValue::ThisYear => format!("{col} >= datetime('now', 'start of year')"),
DateValue::LastYear => {
format!(
"{col} >= datetime('now', 'start of year', '-1 year') AND {col} < \
datetime('now', 'start of year')"
)
},
DateValue::DaysAgo(days) => {
format!("{col} >= datetime('now', '-{days} days')")
},
}
}
/// Sanitize a string for use in FTS5 query expressions.
///
/// Strips control characters, escapes double quotes, and wraps the result
/// in double quotes so it is treated as a single FTS5 term.
fn sanitize_fts_token(s: &str) -> String {
let cleaned: String = s
.chars()
.filter(|c| !c.is_control())
.filter(|c| c.is_alphanumeric() || *c == '_' || *c == ' ')
.collect();
let escaped = cleaned.replace('"', "\"\"");
format!("\"{escaped}\"")
}
const fn sort_order_to_sql(sort: SortOrder) -> &'static str {
match sort {
// FTS rank is not easily portable; use date for Relevance
SortOrder::Relevance | SortOrder::DateDesc => "m.created_at DESC",
SortOrder::DateAsc => "m.created_at ASC",
SortOrder::NameAsc => "m.file_name ASC",
SortOrder::NameDesc => "m.file_name DESC",
SortOrder::SizeAsc => "m.file_size ASC",
SortOrder::SizeDesc => "m.file_size DESC",
}
}
#[async_trait::async_trait]
impl StorageBackend for SqliteBackend {
// Migrations
async fn run_migrations(&self) -> Result<()> {
let conn = Arc::clone(&self.conn);
tokio::task::spawn_blocking(move || {
let mut db = conn
.lock()
.map_err(|e| PinakesError::Database(e.to_string()))?;
crate::storage::migrations::run_sqlite_migrations(&mut db)
})
.await
.map_err(|e| PinakesError::Database(format!("run_migrations: {e}")))?
}
async fn add_root_dir(&self, path: PathBuf) -> Result<()> {
let path_display = path.display().to_string();
let conn = Arc::clone(&self.conn);
tokio::task::spawn_blocking(move || {
{
let db = conn
.lock()
.map_err(|e| PinakesError::Database(e.to_string()))?;
db.execute(
"INSERT OR IGNORE INTO root_dirs (path) VALUES (?1)",
params![path.to_string_lossy().as_ref()],
)?;
}
Ok(())
})
.await
.map_err(|e| {
PinakesError::Database(format!("add_root_dir {path_display}: {e}"))
})?
}
async fn list_root_dirs(&self) -> Result<Vec<PathBuf>> {
let conn = Arc::clone(&self.conn);
tokio::task::spawn_blocking(move || {
let rows = {
let db = conn
.lock()
.map_err(|e| PinakesError::Database(e.to_string()))?;
let mut stmt =
db.prepare("SELECT path FROM root_dirs ORDER BY path")?;
let rows = stmt
.query_map([], |row| {
let p: String = row.get(0)?;
Ok(PathBuf::from(p))
})?
.collect::<rusqlite::Result<Vec<_>>>()?;
drop(stmt);
drop(db);
rows
};
Ok(rows)
})
.await
.map_err(|e| PinakesError::Database(format!("list_root_dirs: {e}")))?
}
async fn remove_root_dir(&self, path: &Path) -> Result<()> {
let path = path.to_path_buf();
let path_display = path.display().to_string();
let conn = Arc::clone(&self.conn);
tokio::task::spawn_blocking(move || {
{
let db = conn
.lock()
.map_err(|e| PinakesError::Database(e.to_string()))?;
db.execute("DELETE FROM root_dirs WHERE path = ?1", params![
path.to_string_lossy().as_ref()
])?;
}
Ok(())
})
.await
.map_err(|e| {
PinakesError::Database(format!("remove_root_dir {path_display}: {e}"))
})?
}
async fn insert_media(&self, item: &MediaItem) -> Result<()> {
let item = item.clone();
let conn = Arc::clone(&self.conn);
tokio::task::spawn_blocking(move || {
{
let db = conn
.lock()
.map_err(|e| PinakesError::Database(e.to_string()))?;
db.execute(
"INSERT INTO media_items (id, path, file_name, media_type, \
content_hash, file_size, title, artist, album, genre, year, \
duration_secs, description, thumbnail_path, file_mtime, \
date_taken, latitude, longitude, camera_make, camera_model, \
rating, perceptual_hash, created_at, updated_at) VALUES (?1, ?2, \
?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13, ?14, ?15, ?16, \
?17, ?18, ?19, ?20, ?21, ?22, ?23, ?24)",
params![
item.id.0.to_string(),
item.path.to_string_lossy().as_ref(),
item.file_name,
media_type_to_str(&item.media_type),
item.content_hash.0,
item.file_size.cast_signed(),
item.title,
item.artist,
item.album,
item.genre,
item.year,
item.duration_secs,
item.description,
item
.thumbnail_path
.as_ref()
.map(|p| p.to_string_lossy().to_string()),
item.file_mtime,
item.date_taken.as_ref().map(chrono::DateTime::to_rfc3339),
item.latitude,
item.longitude,
item.camera_make,
item.camera_model,
item.rating,
item.perceptual_hash,
item.created_at.to_rfc3339(),
item.updated_at.to_rfc3339(),
],
)
.map_err(crate::error::db_ctx("insert_media", &item.id))?;
}
Ok(())
})
.await
.map_err(|e| PinakesError::Database(e.to_string()))?
}
async fn count_media(&self) -> Result<u64> {
let conn = Arc::clone(&self.conn);
tokio::task::spawn_blocking(move || {
let count = {
let db = conn
.lock()
.map_err(|e| PinakesError::Database(e.to_string()))?;
let count: i64 = db.query_row(
"SELECT COUNT(*) FROM media_items WHERE deleted_at IS NULL",
[],
|row| row.get(0),
)?;
count
};
Ok(count.cast_unsigned())
})
.await
.map_err(|e| PinakesError::Database(e.to_string()))?
}
async fn get_media(&self, id: MediaId) -> Result<MediaItem> {
let conn = Arc::clone(&self.conn);
tokio::task::spawn_blocking(move || {
let item = {
let db = conn
.lock()
.map_err(|e| PinakesError::Database(e.to_string()))?;
let mut stmt = db.prepare(
"SELECT id, path, file_name, media_type, content_hash, file_size, \
title, artist, album, genre, year, duration_secs, description, \
thumbnail_path, file_mtime, date_taken, latitude, longitude, \
camera_make, camera_model, rating, perceptual_hash, storage_mode, \
original_filename, uploaded_at, storage_key, created_at, \
updated_at, deleted_at, links_extracted_at FROM media_items WHERE \
id = ?1",
)?;
let mut item = stmt
.query_row(params![id.0.to_string()], row_to_media_item)
.map_err(|e| {
match e {
rusqlite::Error::QueryReturnedNoRows => {
PinakesError::NotFound(format!("media item {id}"))
},
other => PinakesError::from(other),
}
})?;
drop(stmt);
item.custom_fields = load_custom_fields_sync(&db, item.id)?;
drop(db);
item
};
Ok(item)
})
.await
.map_err(|e| PinakesError::Database(e.to_string()))?
}
async fn get_media_by_hash(
&self,
hash: &ContentHash,
) -> Result<Option<MediaItem>> {
let hash = hash.clone();
let conn = Arc::clone(&self.conn);
tokio::task::spawn_blocking(move || {
let result = {
let db = conn
.lock()
.map_err(|e| PinakesError::Database(e.to_string()))?;
let mut stmt = db.prepare(
"SELECT id, path, file_name, media_type, content_hash, file_size, \
title, artist, album, genre, year, duration_secs, description, \
thumbnail_path, file_mtime, date_taken, latitude, longitude, \
camera_make, camera_model, rating, perceptual_hash, storage_mode, \
original_filename, uploaded_at, storage_key, created_at, \
updated_at, deleted_at, links_extracted_at FROM media_items WHERE \
content_hash = ?1",
)?;
let result = stmt
.query_row(params![hash.0], row_to_media_item)
.optional()?;
drop(stmt);
if let Some(mut item) = result {
item.custom_fields = load_custom_fields_sync(&db, item.id)?;
drop(db);
Some(item)
} else {
drop(db);
None
}
};
Ok(result)
})
.await
.map_err(|e| PinakesError::Database(e.to_string()))?
}
async fn get_media_by_path(
&self,
path: &std::path::Path,
) -> Result<Option<MediaItem>> {
let path_str = path.to_string_lossy().to_string();
let conn = Arc::clone(&self.conn);
tokio::task::spawn_blocking(move || {
let result = {
let db = conn
.lock()
.map_err(|e| PinakesError::Database(e.to_string()))?;
let mut stmt = db.prepare(
"SELECT id, path, file_name, media_type, content_hash, file_size, \
title, artist, album, genre, year, duration_secs, description, \
thumbnail_path, file_mtime, date_taken, latitude, longitude, \
camera_make, camera_model, rating, perceptual_hash, storage_mode, \
original_filename, uploaded_at, storage_key, created_at, \
updated_at, deleted_at, links_extracted_at FROM media_items WHERE \
path = ?1",
)?;
let result = stmt
.query_row(params![path_str], row_to_media_item)
.optional()?;
drop(stmt);
if let Some(mut item) = result {
item.custom_fields = load_custom_fields_sync(&db, item.id)?;
drop(db);
Some(item)
} else {
drop(db);
None
}
};
Ok(result)
})
.await
.map_err(|e| PinakesError::Database(e.to_string()))?
}
async fn list_media(
&self,
pagination: &Pagination,
) -> Result<Vec<MediaItem>> {
let pagination = pagination.clone();
let conn = Arc::clone(&self.conn);
tokio::task::spawn_blocking(move || {
let rows = {
let db = conn
.lock()
.map_err(|e| PinakesError::Database(e.to_string()))?;
let order_by = match pagination.sort.as_deref() {
Some("created_at_asc") => "created_at ASC",
Some("name_asc") => "file_name ASC",
Some("name_desc") => "file_name DESC",
Some("size_asc") => "file_size ASC",
Some("size_desc") => "file_size DESC",
Some("type_asc") => "media_type ASC",
Some("type_desc") => "media_type DESC",
// "created_at_desc" or any unrecognized value falls back to default
_ => "created_at DESC",
};
let sql = format!(
"SELECT id, path, file_name, media_type, content_hash, file_size, \
title, artist, album, genre, year, duration_secs, description, \
thumbnail_path, file_mtime, date_taken, latitude, longitude, \
camera_make, camera_model, rating, perceptual_hash, storage_mode, \
original_filename, uploaded_at, storage_key, created_at, \
updated_at, deleted_at, links_extracted_at FROM media_items WHERE \
deleted_at IS NULL ORDER BY {order_by} LIMIT ?1 OFFSET ?2"
);
let mut stmt = db.prepare(&sql)?;
let mut rows = stmt
.query_map(
params![
pagination.limit.cast_signed(),
pagination.offset.cast_signed()
],
row_to_media_item,
)?
.collect::<rusqlite::Result<Vec<_>>>()?;
drop(stmt);
load_custom_fields_batch(&db, &mut rows)?;
drop(db);
rows
};
Ok(rows)
})
.await
.map_err(|e| PinakesError::Database(e.to_string()))?
}
async fn update_media(&self, item: &MediaItem) -> Result<()> {
let item = item.clone();
let conn = Arc::clone(&self.conn);
tokio::task::spawn_blocking(move || {
{
let db = conn
.lock()
.map_err(|e| PinakesError::Database(e.to_string()))?;
let changed = db
.execute(
"UPDATE media_items SET path = ?2, file_name = ?3, media_type = \
?4, content_hash = ?5, file_size = ?6, title = ?7, artist = ?8, \
album = ?9, genre = ?10, year = ?11, duration_secs = ?12, \
description = ?13, thumbnail_path = ?14, file_mtime = ?15, \
date_taken = ?16, latitude = ?17, longitude = ?18, camera_make = \
?19, camera_model = ?20, rating = ?21, perceptual_hash = ?22, \
updated_at = ?23 WHERE id = ?1",
params![
item.id.0.to_string(),
item.path.to_string_lossy().as_ref(),
item.file_name,
media_type_to_str(&item.media_type),
item.content_hash.0,
item.file_size.cast_signed(),
item.title,
item.artist,
item.album,
item.genre,
item.year,
item.duration_secs,
item.description,
item
.thumbnail_path
.as_ref()
.map(|p| p.to_string_lossy().to_string()),
item.file_mtime,
item.date_taken.as_ref().map(chrono::DateTime::to_rfc3339),
item.latitude,
item.longitude,
item.camera_make,
item.camera_model,
item.rating,
item.perceptual_hash,
item.updated_at.to_rfc3339(),
],
)
.map_err(crate::error::db_ctx("update_media", &item.id))?;
drop(db);
if changed == 0 {
return Err(PinakesError::NotFound(format!(
"media item {}",
item.id
)));
}
}
Ok(())
})
.await
.map_err(|e| PinakesError::Database(e.to_string()))?
}
async fn delete_media(&self, id: MediaId) -> Result<()> {
let conn = Arc::clone(&self.conn);
tokio::task::spawn_blocking(move || {
{
let db = conn
.lock()
.map_err(|e| PinakesError::Database(e.to_string()))?;
let changed = db
.execute("DELETE FROM media_items WHERE id = ?1", params![
id.0.to_string()
])
.map_err(crate::error::db_ctx("delete_media", id))?;
drop(db);
if changed == 0 {
return Err(PinakesError::NotFound(format!("media item {id}")));
}
}
Ok(())
})
.await
.map_err(|e| PinakesError::Database(e.to_string()))?
}
async fn delete_all_media(&self) -> Result<u64> {
let conn = Arc::clone(&self.conn);
tokio::task::spawn_blocking(move || {
let count = {
let db = conn
.lock()
.map_err(|e| PinakesError::Database(e.to_string()))?;
let count: u64 =
db.query_row("SELECT COUNT(*) FROM media_items", [], |row| {
row.get(0)
})?;
db.execute("DELETE FROM media_items", [])?;
count
};
Ok(count)
})
.await
.map_err(|e| PinakesError::Database(e.to_string()))?
}
async fn create_tag(
&self,
name: &str,
parent_id: Option<Uuid>,
) -> Result<Tag> {
let name = name.to_string();
let conn = Arc::clone(&self.conn);
tokio::task::spawn_blocking(move || {
let tag = {
let db = conn
.lock()
.map_err(|e| PinakesError::Database(e.to_string()))?;
let id = Uuid::now_v7();
let now = Utc::now();
db.execute(
"INSERT INTO tags (id, name, parent_id, created_at) VALUES (?1, ?2, \
?3, ?4)",
params![
id.to_string(),
name,
parent_id.map(|p| p.to_string()),
now.to_rfc3339(),
],
)
.map_err(crate::error::db_ctx("create_tag", &name))?;
drop(db);
Tag {
id,
name,
parent_id,
created_at: now,
}
};
Ok(tag)
})
.await
.map_err(|e| PinakesError::Database(e.to_string()))?
}
async fn get_tag(&self, id: Uuid) -> Result<Tag> {
let conn = Arc::clone(&self.conn);
tokio::task::spawn_blocking(move || {
let tag = {
let db = conn
.lock()
.map_err(|e| PinakesError::Database(e.to_string()))?;
let mut stmt = db.prepare(
"SELECT id, name, parent_id, created_at FROM tags WHERE id = ?1",
)?;
let tag = stmt
.query_row(params![id.to_string()], row_to_tag)
.map_err(|e| {
match e {
rusqlite::Error::QueryReturnedNoRows => {
PinakesError::TagNotFound(id.to_string())
},
other => PinakesError::from(other),
}
})?;
drop(stmt);
drop(db);
tag
};
Ok(tag)
})
.await
.map_err(|e| PinakesError::Database(e.to_string()))?
}
async fn list_tags(&self) -> Result<Vec<Tag>> {
let conn = Arc::clone(&self.conn);
tokio::task::spawn_blocking(move || {
let rows = {
let db = conn
.lock()
.map_err(|e| PinakesError::Database(e.to_string()))?;
let mut stmt = db.prepare(
"SELECT id, name, parent_id, created_at FROM tags ORDER BY name",
)?;
let rows = stmt
.query_map([], row_to_tag)?
.collect::<rusqlite::Result<Vec<_>>>()?;
drop(stmt);
drop(db);
rows
};
Ok(rows)
})
.await
.map_err(|e| PinakesError::Database(e.to_string()))?
}
async fn delete_tag(&self, id: Uuid) -> Result<()> {
let conn = Arc::clone(&self.conn);
tokio::task::spawn_blocking(move || {
{
let db = conn
.lock()
.map_err(|e| PinakesError::Database(e.to_string()))?;
let changed = db
.execute("DELETE FROM tags WHERE id = ?1", params![id.to_string()])
.map_err(crate::error::db_ctx("delete_tag", id))?;
drop(db);
if changed == 0 {
return Err(PinakesError::TagNotFound(id.to_string()));
}
}
Ok(())
})
.await
.map_err(|e| PinakesError::Database(e.to_string()))?
}
async fn tag_media(&self, media_id: MediaId, tag_id: Uuid) -> Result<()> {
let conn = Arc::clone(&self.conn);
tokio::task::spawn_blocking(move || {
{
let db = conn
.lock()
.map_err(|e| PinakesError::Database(e.to_string()))?;
db.execute(
"INSERT OR IGNORE INTO media_tags (media_id, tag_id) VALUES (?1, ?2)",
params![media_id.0.to_string(), tag_id.to_string()],
)
.map_err(crate::error::db_ctx(
"tag_media",
format!("{media_id} x {tag_id}"),
))?;
}
Ok(())
})
.await
.map_err(|e| PinakesError::Database(e.to_string()))?
}
async fn untag_media(&self, media_id: MediaId, tag_id: Uuid) -> Result<()> {
let conn = Arc::clone(&self.conn);
tokio::task::spawn_blocking(move || {
{
let db = conn
.lock()
.map_err(|e| PinakesError::Database(e.to_string()))?;
db.execute(
"DELETE FROM media_tags WHERE media_id = ?1 AND tag_id = ?2",
params![media_id.0.to_string(), tag_id.to_string()],
)
.map_err(crate::error::db_ctx(
"untag_media",
format!("{media_id} x {tag_id}"),
))?;
}
Ok(())
})
.await
.map_err(|e| PinakesError::Database(e.to_string()))?
}
async fn get_media_tags(&self, media_id: MediaId) -> Result<Vec<Tag>> {
let conn = Arc::clone(&self.conn);
tokio::task::spawn_blocking(move || {
let rows = {
let db = conn
.lock()
.map_err(|e| PinakesError::Database(e.to_string()))?;
let mut stmt = db.prepare(
"SELECT t.id, t.name, t.parent_id, t.created_at FROM tags t JOIN \
media_tags mt ON mt.tag_id = t.id WHERE mt.media_id = ?1 ORDER BY \
t.name",
)?;
let rows = stmt
.query_map(params![media_id.0.to_string()], row_to_tag)?
.collect::<rusqlite::Result<Vec<_>>>()?;
drop(stmt);
drop(db);
rows
};
Ok(rows)
})
.await
.map_err(|e| PinakesError::Database(e.to_string()))?
}
async fn get_tag_descendants(&self, tag_id: Uuid) -> Result<Vec<Tag>> {
let conn = Arc::clone(&self.conn);
tokio::task::spawn_blocking(move || {
let rows = {
let db = conn
.lock()
.map_err(|e| PinakesError::Database(e.to_string()))?;
let mut stmt = db.prepare(
"WITH RECURSIVE descendants(id, name, parent_id, created_at) AS ( \
SELECT id, name, parent_id, created_at FROM tags WHERE parent_id = \
?1 UNION ALL SELECT t.id, t.name, t.parent_id, t.created_at FROM \
tags t JOIN descendants d ON t.parent_id = d.id ) SELECT id, name, \
parent_id, created_at FROM descendants ORDER BY name",
)?;
let rows = stmt
.query_map(params![tag_id.to_string()], row_to_tag)?
.collect::<rusqlite::Result<Vec<_>>>()?;
drop(stmt);
drop(db);
rows
};
Ok(rows)
})
.await
.map_err(|e| PinakesError::Database(e.to_string()))?
}
async fn create_collection(
&self,
name: &str,
kind: CollectionKind,
description: Option<&str>,
filter_query: Option<&str>,
) -> Result<Collection> {
let name = name.to_string();
let description = description.map(std::string::ToString::to_string);
let filter_query = filter_query.map(std::string::ToString::to_string);
let conn = Arc::clone(&self.conn);
tokio::task::spawn_blocking(move || {
let collection = {
let db = conn
.lock()
.map_err(|e| PinakesError::Database(e.to_string()))?;
let id = Uuid::now_v7();
let now = Utc::now();
db.execute(
"INSERT INTO collections (id, name, description, kind, \
filter_query, created_at, updated_at) VALUES (?1, ?2, ?3, ?4, ?5, \
?6, ?7)",
params![
id.to_string(),
name,
description,
collection_kind_to_str(kind),
filter_query,
now.to_rfc3339(),
now.to_rfc3339(),
],
)
.map_err(crate::error::db_ctx("create_collection", &name))?;
drop(db);
Collection {
id,
name,
description,
kind,
filter_query,
created_at: now,
updated_at: now,
}
};
Ok(collection)
})
.await
.map_err(|e| PinakesError::Database(e.to_string()))?
}
async fn get_collection(&self, id: Uuid) -> Result<Collection> {
let conn = Arc::clone(&self.conn);
tokio::task::spawn_blocking(move || {
let collection = {
let db = conn
.lock()
.map_err(|e| PinakesError::Database(e.to_string()))?;
let mut stmt = db.prepare(
"SELECT id, name, description, kind, filter_query, created_at, \
updated_at FROM collections WHERE id = ?1",
)?;
let collection = stmt
.query_row(params![id.to_string()], row_to_collection)
.map_err(|e| {
match e {
rusqlite::Error::QueryReturnedNoRows => {
PinakesError::CollectionNotFound(id.to_string())
},
other => PinakesError::from(other),
}
})?;
drop(stmt);
drop(db);
collection
};
Ok(collection)
})
.await
.map_err(|e| PinakesError::Database(e.to_string()))?
}
async fn list_collections(&self) -> Result<Vec<Collection>> {
let conn = Arc::clone(&self.conn);
tokio::task::spawn_blocking(move || {
let rows = {
let db = conn
.lock()
.map_err(|e| PinakesError::Database(e.to_string()))?;
let mut stmt = db.prepare(
"SELECT id, name, description, kind, filter_query, created_at, \
updated_at FROM collections ORDER BY name",
)?;
let rows = stmt
.query_map([], row_to_collection)?
.collect::<rusqlite::Result<Vec<_>>>()?;
drop(stmt);
drop(db);
rows
};
Ok(rows)
})
.await
.map_err(|e| PinakesError::Database(e.to_string()))?
}
async fn delete_collection(&self, id: Uuid) -> Result<()> {
let conn = Arc::clone(&self.conn);
tokio::task::spawn_blocking(move || {
{
let db = conn
.lock()
.map_err(|e| PinakesError::Database(e.to_string()))?;
let changed = db
.execute("DELETE FROM collections WHERE id = ?1", params![
id.to_string()
])
.map_err(crate::error::db_ctx("delete_collection", id))?;
drop(db);
if changed == 0 {
return Err(PinakesError::CollectionNotFound(id.to_string()));
}
}
Ok(())
})
.await
.map_err(|e| PinakesError::Database(e.to_string()))?
}
async fn add_to_collection(
&self,
collection_id: Uuid,
media_id: MediaId,
position: i32,
) -> Result<()> {
let conn = Arc::clone(&self.conn);
tokio::task::spawn_blocking(move || {
{
let db = conn
.lock()
.map_err(|e| PinakesError::Database(e.to_string()))?;
let now = Utc::now();
db.execute(
"INSERT OR REPLACE INTO collection_members (collection_id, \
media_id, position, added_at) VALUES (?1, ?2, ?3, ?4)",
params![
collection_id.to_string(),
media_id.0.to_string(),
position,
now.to_rfc3339(),
],
)
.map_err(crate::error::db_ctx(
"add_to_collection",
format!("{collection_id} <- {media_id}"),
))?;
}
Ok(())
})
.await
.map_err(|e| PinakesError::Database(e.to_string()))?
}
async fn remove_from_collection(
&self,
collection_id: Uuid,
media_id: MediaId,
) -> Result<()> {
let conn = Arc::clone(&self.conn);
tokio::task::spawn_blocking(move || {
{
let db = conn
.lock()
.map_err(|e| PinakesError::Database(e.to_string()))?;
db.execute(
"DELETE FROM collection_members WHERE collection_id = ?1 AND \
media_id = ?2",
params![collection_id.to_string(), media_id.0.to_string()],
)
.map_err(crate::error::db_ctx(
"remove_from_collection",
format!("{collection_id} <- {media_id}"),
))?;
}
Ok(())
})
.await
.map_err(|e| PinakesError::Database(e.to_string()))?
}
async fn get_collection_members(
&self,
collection_id: Uuid,
) -> Result<Vec<MediaItem>> {
let conn = Arc::clone(&self.conn);
tokio::task::spawn_blocking(move || {
let rows = {
let db = conn
.lock()
.map_err(|e| PinakesError::Database(e.to_string()))?;
let mut stmt = db.prepare(
"SELECT m.id, m.path, m.file_name, m.media_type, m.content_hash, \
m.file_size, m.title, m.artist, m.album, m.genre, m.year, \
m.duration_secs, m.description, m.thumbnail_path, m.file_mtime, \
m.date_taken, m.latitude, m.longitude, m.camera_make, \
m.camera_model, m.rating, m.perceptual_hash, m.storage_mode, \
m.original_filename, m.uploaded_at, m.storage_key, m.created_at, \
m.updated_at, m.deleted_at, m.links_extracted_at FROM media_items \
m JOIN collection_members cm ON cm.media_id = m.id WHERE \
cm.collection_id = ?1 ORDER BY cm.position",
)?;
let mut rows = stmt
.query_map(params![collection_id.to_string()], row_to_media_item)?
.collect::<rusqlite::Result<Vec<_>>>()?;
drop(stmt);
load_custom_fields_batch(&db, &mut rows)?;
drop(db);
rows
};
Ok(rows)
})
.await
.map_err(|e| PinakesError::Database(e.to_string()))?
}
async fn search(&self, request: &SearchRequest) -> Result<SearchResults> {
let request = request.clone();
let conn = Arc::clone(&self.conn);
tokio::task::spawn_blocking(move || {
let results = {
let db = conn
.lock()
.map_err(|e| PinakesError::Database(e.to_string()))?;
let (fts_expr, _like_terms, where_clauses, join_clauses, bind_params) =
search_query_to_fts(&request.query);
let use_fts = !fts_expr.is_empty();
let order_by = sort_order_to_sql(request.sort);
// Build the base query.
let mut sql = String::from(
"SELECT m.id, m.path, m.file_name, m.media_type, m.content_hash, \
m.file_size, m.title, m.artist, m.album, m.genre, m.year, \
m.duration_secs, m.description, m.thumbnail_path, m.file_mtime, \
m.date_taken, m.latitude, m.longitude, m.camera_make, \
m.camera_model, m.rating, m.perceptual_hash, m.storage_mode, \
m.original_filename, m.uploaded_at, m.storage_key, m.created_at, \
m.updated_at, m.deleted_at, m.links_extracted_at FROM media_items \
m ",
);
if use_fts {
sql.push_str("JOIN media_fts ON media_fts.rowid = m.rowid ");
}
for j in &join_clauses {
sql.push_str(j);
sql.push(' ');
}
// Collect all bind parameters: first the filter params, then FTS
// match (if any), then LIMIT and OFFSET.
let mut all_params: Vec<String> = bind_params.clone();
let mut conditions = where_clauses;
if use_fts {
conditions.push("media_fts MATCH ?".to_string());
all_params.push(fts_expr.clone());
}
if !conditions.is_empty() {
sql.push_str("WHERE ");
sql.push_str(&conditions.join(" AND "));
sql.push(' ');
}
sql.push_str("ORDER BY ");
sql.push_str(order_by);
sql.push_str(" LIMIT ? OFFSET ?");
all_params.push(request.pagination.limit.to_string());
all_params.push(request.pagination.offset.to_string());
let mut stmt = db.prepare(&sql)?;
let param_refs: Vec<&dyn rusqlite::types::ToSql> = all_params
.iter()
.map(|s| s as &dyn rusqlite::types::ToSql)
.collect();
let mut items = stmt
.query_map(param_refs.as_slice(), row_to_media_item)?
.collect::<rusqlite::Result<Vec<_>>>()?;
drop(stmt);
load_custom_fields_batch(&db, &mut items)?;
// Count query (same filters, no LIMIT/OFFSET)
let mut count_sql = String::from("SELECT COUNT(*) FROM media_items m ");
if use_fts {
count_sql.push_str("JOIN media_fts ON media_fts.rowid = m.rowid ");
}
for j in &join_clauses {
count_sql.push_str(j);
count_sql.push(' ');
}
if !conditions.is_empty() {
count_sql.push_str("WHERE ");
count_sql.push_str(&conditions.join(" AND "));
}
// Count query uses the same filter params (+ FTS match) but no
// LIMIT/OFFSET
let mut count_params: Vec<String> = bind_params;
if use_fts {
count_params.push(fts_expr);
}
let count_param_refs: Vec<&dyn rusqlite::types::ToSql> = count_params
.iter()
.map(|s| s as &dyn rusqlite::types::ToSql)
.collect();
let total_count: i64 =
db.query_row(&count_sql, count_param_refs.as_slice(), |row| {
row.get(0)
})?;
drop(db);
SearchResults {
items,
total_count: total_count.cast_unsigned(),
}
};
Ok(results)
})
.await
.map_err(|e| PinakesError::Database(e.to_string()))?
}
async fn record_audit(&self, entry: &AuditEntry) -> Result<()> {
let entry = entry.clone();
let conn = Arc::clone(&self.conn);
tokio::task::spawn_blocking(move || {
{
let db = conn
.lock()
.map_err(|e| PinakesError::Database(e.to_string()))?;
db.execute(
"INSERT INTO audit_log (id, media_id, action, details, timestamp) \
VALUES (?1, ?2, ?3, ?4, ?5)",
params![
entry.id.to_string(),
entry.media_id.map(|mid| mid.0.to_string()),
entry.action.to_string(),
entry.details,
entry.timestamp.to_rfc3339(),
],
)?;
}
Ok(())
})
.await
.map_err(|e| PinakesError::Database(e.to_string()))?
}
async fn list_audit_entries(
&self,
media_id: Option<MediaId>,
pagination: &Pagination,
) -> Result<Vec<AuditEntry>> {
let pagination = pagination.clone();
let conn = Arc::clone(&self.conn);
tokio::task::spawn_blocking(move || {
let rows = {
let db = conn
.lock()
.map_err(|e| PinakesError::Database(e.to_string()))?;
let (sql, bind_media_id) = media_id.map_or_else(
|| {
(
"SELECT id, media_id, action, details, timestamp FROM audit_log \
ORDER BY timestamp DESC LIMIT ?1 OFFSET ?2"
.to_string(),
None,
)
},
|mid| {
(
"SELECT id, media_id, action, details, timestamp FROM audit_log \
WHERE media_id = ?1 ORDER BY timestamp DESC LIMIT ?2 OFFSET ?3"
.to_string(),
Some(mid.0.to_string()),
)
},
);
let mut stmt = db.prepare(&sql)?;
let rows = if let Some(ref mid_str) = bind_media_id {
stmt
.query_map(
params![
mid_str,
pagination.limit.cast_signed(),
pagination.offset.cast_signed()
],
row_to_audit_entry,
)?
.collect::<rusqlite::Result<Vec<_>>>()?
} else {
stmt
.query_map(
params![
pagination.limit.cast_signed(),
pagination.offset.cast_signed()
],
row_to_audit_entry,
)?
.collect::<rusqlite::Result<Vec<_>>>()?
};
drop(stmt);
drop(db);
rows
};
Ok(rows)
})
.await
.map_err(|e| PinakesError::Database(e.to_string()))?
}
async fn set_custom_field(
&self,
media_id: MediaId,
name: &str,
field: &CustomField,
) -> Result<()> {
let name = name.to_string();
let field = field.clone();
let conn = Arc::clone(&self.conn);
tokio::task::spawn_blocking(move || {
{
let db = conn
.lock()
.map_err(|e| PinakesError::Database(e.to_string()))?;
db.execute(
"INSERT OR REPLACE INTO custom_fields (media_id, field_name, \
field_type, field_value) VALUES (?1, ?2, ?3, ?4)",
params![
media_id.0.to_string(),
name,
custom_field_type_to_str(field.field_type),
field.value,
],
)?;
}
Ok(())
})
.await
.map_err(|e| PinakesError::Database(e.to_string()))?
}
async fn get_custom_fields(
&self,
media_id: MediaId,
) -> Result<FxHashMap<String, CustomField>> {
let conn = Arc::clone(&self.conn);
tokio::task::spawn_blocking(move || {
let map = {
let db = conn
.lock()
.map_err(|e| PinakesError::Database(e.to_string()))?;
let mut stmt = db.prepare(
"SELECT field_name, field_type, field_value FROM custom_fields \
WHERE media_id = ?1",
)?;
let rows = stmt.query_map(params![media_id.0.to_string()], |row| {
let name: String = row.get(0)?;
let ft_str: String = row.get(1)?;
let value: String = row.get(2)?;
Ok((name, CustomField {
field_type: str_to_custom_field_type(&ft_str),
value,
}))
})?;
let mut map = FxHashMap::default();
for r in rows {
let (name, field) = r?;
map.insert(name, field);
}
drop(stmt);
drop(db);
map
};
Ok(map)
})
.await
.map_err(|e| PinakesError::Database(e.to_string()))?
}
async fn delete_custom_field(
&self,
media_id: MediaId,
name: &str,
) -> Result<()> {
let name = name.to_string();
let conn = Arc::clone(&self.conn);
tokio::task::spawn_blocking(move || {
{
let db = conn
.lock()
.map_err(|e| PinakesError::Database(e.to_string()))?;
db.execute(
"DELETE FROM custom_fields WHERE media_id = ?1 AND field_name = ?2",
params![media_id.0.to_string(), name],
)?;
}
Ok(())
})
.await
.map_err(|e| PinakesError::Database(e.to_string()))?
}
async fn batch_delete_media(&self, ids: &[MediaId]) -> Result<u64> {
if ids.is_empty() {
return Ok(0);
}
let n = ids.len();
let ids: Vec<String> = ids.iter().map(|id| id.0.to_string()).collect();
let conn = Arc::clone(&self.conn);
tokio::task::spawn_blocking(move || {
const CHUNK_SIZE: usize = 500;
let count = {
let db = conn
.lock()
.map_err(|e| PinakesError::Database(e.to_string()))?;
let ctx = format!("{n} items");
let tx = db
.unchecked_transaction()
.map_err(crate::error::db_ctx("batch_delete_media", &ctx))?;
let mut count = 0u64;
for chunk in ids.chunks(CHUNK_SIZE) {
let placeholders: Vec<String> =
(1..=chunk.len()).map(|i| format!("?{i}")).collect();
let sql = format!(
"DELETE FROM media_items WHERE id IN ({})",
placeholders.join(", ")
);
let params: Vec<&dyn rusqlite::ToSql> =
chunk.iter().map(|s| s as &dyn rusqlite::ToSql).collect();
let rows = tx
.execute(&sql, params.as_slice())
.map_err(crate::error::db_ctx("batch_delete_media", &ctx))?;
count += rows as u64;
}
tx.commit()
.map_err(crate::error::db_ctx("batch_delete_media", &ctx))?;
count
};
Ok(count)
})
.await
.map_err(|e| PinakesError::Database(e.to_string()))?
}
async fn batch_tag_media(
&self,
media_ids: &[MediaId],
tag_ids: &[Uuid],
) -> Result<u64> {
if media_ids.is_empty() || tag_ids.is_empty() {
return Ok(0);
}
let media_ids: Vec<String> =
media_ids.iter().map(|id| id.0.to_string()).collect();
let tag_ids: Vec<String> = tag_ids
.iter()
.map(std::string::ToString::to_string)
.collect();
let conn = Arc::clone(&self.conn);
tokio::task::spawn_blocking(move || {
let count = {
let db = conn
.lock()
.map_err(|e| PinakesError::Database(e.to_string()))?;
let ctx = format!("{} media x {} tags", media_ids.len(), tag_ids.len());
let tx = db
.unchecked_transaction()
.map_err(crate::error::db_ctx("batch_tag_media", &ctx))?;
// Prepare statement once for reuse
let mut stmt = tx
.prepare_cached(
"INSERT OR IGNORE INTO media_tags (media_id, tag_id) VALUES (?1, \
?2)",
)
.map_err(crate::error::db_ctx("batch_tag_media", &ctx))?;
let mut count = 0u64;
for mid in &media_ids {
for tid in &tag_ids {
let rows = stmt
.execute(params![mid, tid])
.map_err(crate::error::db_ctx("batch_tag_media", &ctx))?;
count += rows as u64; // INSERT OR IGNORE: rows=1 if new, 0 if existed
}
}
drop(stmt);
tx.commit()
.map_err(crate::error::db_ctx("batch_tag_media", &ctx))?;
count
};
Ok(count)
})
.await
.map_err(|e| PinakesError::Database(e.to_string()))?
}
async fn batch_update_media(
&self,
ids: &[MediaId],
title: Option<&str>,
artist: Option<&str>,
album: Option<&str>,
genre: Option<&str>,
year: Option<i32>,
description: Option<&str>,
) -> Result<u64> {
if ids.is_empty() {
return Ok(0);
}
let ids: Vec<String> = ids.iter().map(|id| id.0.to_string()).collect();
let title = title.map(String::from);
let artist = artist.map(String::from);
let album = album.map(String::from);
let genre = genre.map(String::from);
let description = description.map(String::from);
let conn = Arc::clone(&self.conn);
tokio::task::spawn_blocking(move || {
const CHUNK_SIZE: usize = 500;
// Build SET clause dynamically from provided fields
let mut set_parts = Vec::new();
let mut params_vec: Vec<Box<dyn rusqlite::ToSql>> = Vec::new();
let mut idx = 1;
if let Some(ref v) = title {
set_parts.push(format!("title = ?{idx}"));
params_vec.push(Box::new(v.clone()));
idx += 1;
}
if let Some(ref v) = artist {
set_parts.push(format!("artist = ?{idx}"));
params_vec.push(Box::new(v.clone()));
idx += 1;
}
if let Some(ref v) = album {
set_parts.push(format!("album = ?{idx}"));
params_vec.push(Box::new(v.clone()));
idx += 1;
}
if let Some(ref v) = genre {
set_parts.push(format!("genre = ?{idx}"));
params_vec.push(Box::new(v.clone()));
idx += 1;
}
if let Some(v) = year {
set_parts.push(format!("year = ?{idx}"));
params_vec.push(Box::new(v));
idx += 1;
}
if let Some(ref v) = description {
set_parts.push(format!("description = ?{idx}"));
params_vec.push(Box::new(v.clone()));
idx += 1;
}
// Always update updated_at
let now = chrono::Utc::now().to_rfc3339();
set_parts.push(format!("updated_at = ?{idx}"));
params_vec.push(Box::new(now));
idx += 1;
if set_parts.len() == 1 {
// Only updated_at, nothing to change
return Ok(0);
}
let count = {
let db = conn
.lock()
.map_err(|e| PinakesError::Database(e.to_string()))?;
let ctx = format!("{} items", ids.len());
let tx = db
.unchecked_transaction()
.map_err(crate::error::db_ctx("batch_update_media", &ctx))?;
let mut count = 0u64;
for chunk in ids.chunks(CHUNK_SIZE) {
let id_placeholders: Vec<String> =
(0..chunk.len()).map(|i| format!("?{}", idx + i)).collect();
let sql = format!(
"UPDATE media_items SET {} WHERE id IN ({})",
set_parts.join(", "),
id_placeholders.join(", ")
);
let mut all_params: Vec<&dyn rusqlite::ToSql> =
params_vec.iter().map(std::convert::AsRef::as_ref).collect();
let id_params: Vec<&dyn rusqlite::ToSql> =
chunk.iter().map(|s| s as &dyn rusqlite::ToSql).collect();
all_params.extend(id_params);
let rows = tx
.execute(&sql, all_params.as_slice())
.map_err(crate::error::db_ctx("batch_update_media", &ctx))?;
count += rows as u64;
}
tx.commit()
.map_err(crate::error::db_ctx("batch_update_media", &ctx))?;
count
};
Ok(count)
})
.await
.map_err(|e| PinakesError::Database(e.to_string()))?
}
async fn find_duplicates(&self) -> Result<Vec<Vec<MediaItem>>> {
let conn = Arc::clone(&self.conn);
tokio::task::spawn_blocking(move || {
let groups = {
let db = conn
.lock()
.map_err(|e| PinakesError::Database(e.to_string()))?;
let mut stmt = db.prepare(
"SELECT * FROM media_items WHERE deleted_at IS NULL AND \
content_hash IN (
SELECT content_hash FROM media_items WHERE deleted_at IS \
NULL
GROUP BY content_hash HAVING COUNT(*) > 1
) ORDER BY content_hash, created_at",
)?;
let mut rows: Vec<MediaItem> = stmt
.query_map([], row_to_media_item)?
.collect::<rusqlite::Result<Vec<_>>>()?;
load_custom_fields_batch(&db, &mut rows)?;
// Group by content_hash
let mut groups: Vec<Vec<MediaItem>> = Vec::new();
let mut current_hash = String::new();
for item in rows {
if item.content_hash.0 != current_hash {
current_hash.clone_from(&item.content_hash.0);
groups.push(Vec::new());
}
if let Some(group) = groups.last_mut() {
group.push(item);
}
}
groups
};
Ok(groups)
})
.await
.map_err(|e| PinakesError::Database(e.to_string()))?
}
async fn find_perceptual_duplicates(
&self,
threshold: u32,
) -> Result<Vec<Vec<MediaItem>>> {
let conn = Arc::clone(&self.conn);
tokio::task::spawn_blocking(move || {
use image_hasher::ImageHash;
let items = {
let db = conn
.lock()
.map_err(|e| PinakesError::Database(e.to_string()))?;
// Get all images with perceptual hashes
let mut stmt = db.prepare(
"SELECT * FROM media_items WHERE perceptual_hash IS NOT NULL ORDER \
BY id",
)?;
let mut items: Vec<MediaItem> = stmt
.query_map([], row_to_media_item)?
.collect::<rusqlite::Result<Vec<_>>>()?;
load_custom_fields_batch(&db, &mut items)?;
items
};
// Compare each pair and build groups
let mut groups: Vec<Vec<MediaItem>> = Vec::new();
let mut grouped_indices: rustc_hash::FxHashSet<usize> =
rustc_hash::FxHashSet::default();
for i in 0..items.len() {
if grouped_indices.contains(&i) {
continue;
}
let hash_a = match &items[i].perceptual_hash {
Some(h) => {
match ImageHash::<Vec<u8>>::from_base64(h) {
Ok(hash) => hash,
Err(_) => continue,
}
},
None => continue,
};
let mut group = vec![items[i].clone()];
grouped_indices.insert(i);
for (j, item_j) in items.iter().enumerate().skip(i + 1) {
if grouped_indices.contains(&j) {
continue;
}
let hash_b = match &item_j.perceptual_hash {
Some(h) => {
match ImageHash::<Vec<u8>>::from_base64(h) {
Ok(hash) => hash,
Err(_) => continue,
}
},
None => continue,
};
let distance = hash_a.dist(&hash_b);
if distance <= threshold {
group.push(item_j.clone());
grouped_indices.insert(j);
}
}
// Only add groups with more than one item (actual duplicates)
if group.len() > 1 {
groups.push(group);
}
}
Ok(groups)
})
.await
.map_err(|e| {
PinakesError::Database(format!(
"find_perceptual_duplicates (threshold={threshold}): {e}"
))
})?
}
async fn database_stats(&self) -> Result<crate::storage::DatabaseStats> {
let conn = Arc::clone(&self.conn);
tokio::task::spawn_blocking(move || {
let stats = {
let db = conn
.lock()
.map_err(|e| PinakesError::Database(e.to_string()))?;
let media_count: i64 =
db.query_row("SELECT COUNT(*) FROM media_items", [], |row| {
row.get(0)
})?;
let tag_count: i64 =
db.query_row("SELECT COUNT(*) FROM tags", [], |row| row.get(0))?;
let collection_count: i64 =
db.query_row("SELECT COUNT(*) FROM collections", [], |row| {
row.get(0)
})?;
let audit_count: i64 =
db.query_row("SELECT COUNT(*) FROM audit_log", [], |row| row.get(0))?;
let page_count: i64 =
db.query_row("PRAGMA page_count", [], |row| row.get(0))?;
let page_size: i64 =
db.query_row("PRAGMA page_size", [], |row| row.get(0))?;
let database_size_bytes = (page_count * page_size).cast_unsigned();
crate::storage::DatabaseStats {
media_count: media_count.cast_unsigned(),
tag_count: tag_count.cast_unsigned(),
collection_count: collection_count.cast_unsigned(),
audit_count: audit_count.cast_unsigned(),
database_size_bytes,
backend_name: "sqlite".to_string(),
}
};
Ok(stats)
})
.await
.map_err(|e| PinakesError::Database(format!("database_stats: {e}")))?
}
async fn vacuum(&self) -> Result<()> {
let conn = Arc::clone(&self.conn);
tokio::task::spawn_blocking(move || {
{
let db = conn
.lock()
.map_err(|e| PinakesError::Database(e.to_string()))?;
db.execute_batch("VACUUM")?;
}
Ok(())
})
.await
.map_err(|e| PinakesError::Database(format!("vacuum: {e}")))?
}
async fn clear_all_data(&self) -> Result<()> {
let conn = Arc::clone(&self.conn);
tokio::task::spawn_blocking(move || {
{
let db = conn
.lock()
.map_err(|e| PinakesError::Database(e.to_string()))?;
db.execute_batch(
"DELETE FROM audit_log;
DELETE FROM custom_fields;
DELETE FROM collection_members;
DELETE FROM media_tags;
DELETE FROM media_items;
DELETE FROM tags;
DELETE FROM collections;",
)?;
}
Ok(())
})
.await
.map_err(|e| PinakesError::Database(format!("clear_all_data: {e}")))?
}
async fn list_media_paths(
&self,
) -> Result<Vec<(MediaId, std::path::PathBuf, ContentHash)>> {
let conn = Arc::clone(&self.conn);
tokio::task::spawn_blocking(move || {
let results = {
let db = conn
.lock()
.map_err(|e| PinakesError::Database(e.to_string()))?;
let mut stmt = db.prepare(
"SELECT id, path, content_hash FROM media_items WHERE deleted_at IS \
NULL",
)?;
let rows = stmt.query_map([], |row| {
let id_str: String = row.get(0)?;
let path_str: String = row.get(1)?;
let hash_str: String = row.get(2)?;
let id = parse_uuid(&id_str)?;
Ok((
MediaId(id),
PathBuf::from(path_str),
ContentHash::new(hash_str),
))
})?;
let mut results = Vec::new();
for row in rows {
results.push(row?);
}
results
};
Ok(results)
})
.await
.map_err(|e| PinakesError::Database(format!("list_media_paths: {e}")))?
}
async fn save_search(
&self,
id: Uuid,
name: &str,
query: &str,
sort_order: Option<&str>,
) -> Result<()> {
let conn = Arc::clone(&self.conn);
let id_str = id.to_string();
let name = name.to_string();
let query = query.to_string();
let sort_order = sort_order.map(std::string::ToString::to_string);
let now = chrono::Utc::now().to_rfc3339();
tokio::task::spawn_blocking(move || {
{
let db = conn
.lock()
.map_err(|e| PinakesError::Database(e.to_string()))?;
db.execute(
"INSERT OR REPLACE INTO saved_searches (id, name, query, \
sort_order, created_at) VALUES (?1, ?2, ?3, ?4, ?5)",
params![id_str, name, query, sort_order, now],
)?;
}
Ok(())
})
.await
.map_err(|e| PinakesError::Database(format!("save_search {id}: {e}")))?
}
async fn list_saved_searches(
&self,
) -> Result<Vec<crate::model::SavedSearch>> {
let conn = Arc::clone(&self.conn);
tokio::task::spawn_blocking(move || {
let results = {
let db = conn
.lock()
.map_err(|e| PinakesError::Database(e.to_string()))?;
let mut stmt = db.prepare(
"SELECT id, name, query, sort_order, created_at FROM saved_searches \
ORDER BY created_at DESC",
)?;
let rows = stmt.query_map([], |row| {
let id_str: String = row.get(0)?;
let name: String = row.get(1)?;
let query: String = row.get(2)?;
let sort_order: Option<String> = row.get(3)?;
let created_at_str: String = row.get(4)?;
let id = parse_uuid(&id_str)?;
Ok(crate::model::SavedSearch {
id,
name,
query,
sort_order,
created_at: parse_datetime(&created_at_str),
})
})?;
let mut results = Vec::new();
for row in rows {
results.push(row?);
}
results
};
Ok(results)
})
.await
.map_err(|e| PinakesError::Database(format!("list_saved_searches: {e}")))?
}
async fn get_saved_search(
&self,
id: Uuid,
) -> Result<crate::model::SavedSearch> {
let conn = Arc::clone(&self.conn);
let id_str = id.to_string();
tokio::task::spawn_blocking(move || {
let result = {
let db = conn
.lock()
.map_err(|e| PinakesError::Database(e.to_string()))?;
db.query_row(
"SELECT id, name, query, sort_order, created_at FROM saved_searches \
WHERE id = ?1",
params![id_str],
|row| {
let rid: String = row.get(0)?;
let name: String = row.get(1)?;
let query: String = row.get(2)?;
let sort_order: Option<String> = row.get(3)?;
let created_at_str: String = row.get(4)?;
let uid = parse_uuid(&rid)?;
Ok(crate::model::SavedSearch {
id: uid,
name,
query,
sort_order,
created_at: parse_datetime(&created_at_str),
})
},
)
.map_err(|e| {
match e {
rusqlite::Error::QueryReturnedNoRows => {
PinakesError::NotFound(format!("saved search {id}"))
},
other => PinakesError::Database(other.to_string()),
}
})?
};
Ok(result)
})
.await
.map_err(|e| PinakesError::Database(format!("get_saved_search: {e}")))?
}
async fn delete_saved_search(&self, id: Uuid) -> Result<()> {
let conn = Arc::clone(&self.conn);
let id_str = id.to_string();
tokio::task::spawn_blocking(move || {
{
let db = conn
.lock()
.map_err(|e| PinakesError::Database(e.to_string()))?;
db.execute("DELETE FROM saved_searches WHERE id = ?1", params![
id_str
])?;
}
Ok(())
})
.await
.map_err(|e| {
PinakesError::Database(format!("delete_saved_search {id}: {e}"))
})?
}
async fn list_media_ids_for_thumbnails(
&self,
only_missing: bool,
) -> Result<Vec<MediaId>> {
let conn = Arc::clone(&self.conn);
tokio::task::spawn_blocking(move || {
let db = conn
.lock()
.map_err(|e| PinakesError::Database(e.to_string()))?;
let sql = if only_missing {
"SELECT id FROM media_items WHERE thumbnail_path IS NULL ORDER BY \
created_at DESC"
} else {
"SELECT id FROM media_items ORDER BY created_at DESC"
};
let mut stmt = db.prepare(sql)?;
let ids: Vec<MediaId> = stmt
.query_map([], |r| {
let s: String = r.get(0)?;
Ok(MediaId(uuid::Uuid::parse_str(&s).unwrap_or_default()))
})?
.filter_map(std::result::Result::ok)
.collect();
Ok(ids)
})
.await
.map_err(|e| {
PinakesError::Database(format!(
"list_media_ids_for_thumbnails (only_missing={only_missing}): {e}"
))
})?
}
async fn library_statistics(&self) -> Result<super::LibraryStatistics> {
let conn = Arc::clone(&self.conn);
let fut = tokio::task::spawn_blocking(move || {
let db = conn
.lock()
.map_err(|e| PinakesError::Database(e.to_string()))?;
let total_media: u64 =
db.query_row("SELECT COUNT(*) FROM media_items", [], |r| r.get(0))?;
let total_size: u64 = db.query_row(
"SELECT COALESCE(SUM(file_size), 0) FROM media_items",
[],
|r| r.get(0),
)?;
let avg_size: u64 = total_size.checked_div(total_media).unwrap_or(0);
// Media count by type
let mut stmt = db.prepare(
"SELECT media_type, COUNT(*) FROM media_items GROUP BY media_type \
ORDER BY COUNT(*) DESC",
)?;
let media_by_type: Vec<(String, u64)> = stmt
.query_map([], |r| Ok((r.get::<_, String>(0)?, r.get::<_, u64>(1)?)))?
.filter_map(std::result::Result::ok)
.collect();
// Storage by type
let mut stmt = db.prepare(
"SELECT media_type, COALESCE(SUM(file_size), 0) FROM media_items \
GROUP BY media_type ORDER BY SUM(file_size) DESC",
)?;
let storage_by_type: Vec<(String, u64)> = stmt
.query_map([], |r| Ok((r.get::<_, String>(0)?, r.get::<_, u64>(1)?)))?
.filter_map(std::result::Result::ok)
.collect();
// Newest / oldest
let newest: Option<String> = db
.query_row(
"SELECT created_at FROM media_items ORDER BY created_at DESC LIMIT 1",
[],
|r| r.get(0),
)
.optional()?;
let oldest: Option<String> = db
.query_row(
"SELECT created_at FROM media_items ORDER BY created_at ASC LIMIT 1",
[],
|r| r.get(0),
)
.optional()?;
// Top tags
let mut stmt = db.prepare(
"SELECT t.name, COUNT(*) as cnt FROM media_tags mt JOIN tags t ON \
mt.tag_id = t.id GROUP BY t.id ORDER BY cnt DESC LIMIT 10",
)?;
let top_tags: Vec<(String, u64)> = stmt
.query_map([], |r| Ok((r.get::<_, String>(0)?, r.get::<_, u64>(1)?)))?
.filter_map(std::result::Result::ok)
.collect();
// Top collections
let mut stmt = db.prepare(
"SELECT c.name, COUNT(*) as cnt FROM collection_members cm JOIN \
collections c ON cm.collection_id = c.id GROUP BY c.id ORDER BY cnt \
DESC LIMIT 10",
)?;
let top_collections: Vec<(String, u64)> = stmt
.query_map([], |r| Ok((r.get::<_, String>(0)?, r.get::<_, u64>(1)?)))?
.filter_map(std::result::Result::ok)
.collect();
let total_tags: u64 =
db.query_row("SELECT COUNT(*) FROM tags", [], |r| r.get(0))?;
let total_collections: u64 =
db.query_row("SELECT COUNT(*) FROM collections", [], |r| r.get(0))?;
// Duplicates: count of hashes that appear more than once
let total_duplicates: u64 = db.query_row(
"SELECT COUNT(*) FROM (SELECT content_hash FROM media_items GROUP BY \
content_hash HAVING COUNT(*) > 1)",
[],
|r| r.get(0),
)?;
Ok(super::LibraryStatistics {
total_media,
total_size_bytes: total_size,
avg_file_size_bytes: avg_size,
media_by_type,
storage_by_type,
newest_item: newest,
oldest_item: oldest,
top_tags,
top_collections,
total_tags,
total_collections,
total_duplicates,
})
});
tokio::time::timeout(std::time::Duration::from_secs(30), fut)
.await
.map_err(|_| {
PinakesError::Database("library_statistics query timed out".to_string())
})?
.map_err(|e| PinakesError::Database(format!("library_statistics: {e}")))?
}
async fn list_users(&self) -> Result<Vec<crate::users::User>> {
let conn = Arc::clone(&self.conn);
let fut = tokio::task::spawn_blocking(move || {
let db = conn.lock().map_err(|e| {
PinakesError::Database(format!("failed to acquire database lock: {e}"))
})?;
let mut stmt = db.prepare(
"SELECT id, username, password_hash, role, created_at, updated_at \
FROM users ORDER BY created_at DESC",
)?;
let users = stmt
.query_map([], |row| {
let id_str: String = row.get(0)?;
let profile = load_user_profile_sync(&db, &id_str)?;
Ok(crate::users::User {
id: crate::users::UserId(parse_uuid(&id_str)?),
username: row.get(1)?,
password_hash: row.get(2)?,
role: serde_json::from_str(&row.get::<_, String>(3)?)
.unwrap_or(crate::config::UserRole::Viewer),
profile,
created_at: chrono::DateTime::parse_from_rfc3339(
&row.get::<_, String>(4)?,
)
.unwrap_or_else(|_| chrono::Utc::now().into())
.with_timezone(&chrono::Utc),
updated_at: chrono::DateTime::parse_from_rfc3339(
&row.get::<_, String>(5)?,
)
.unwrap_or_else(|_| chrono::Utc::now().into())
.with_timezone(&chrono::Utc),
})
})?
.collect::<std::result::Result<Vec<_>, _>>()?;
Ok(users)
});
tokio::time::timeout(std::time::Duration::from_secs(10), fut)
.await
.map_err(|_| {
PinakesError::Database("list_users query timed out".to_string())
})?
.map_err(|e: tokio::task::JoinError| {
PinakesError::Database(format!("list_users: {e}"))
})?
}
async fn get_user(
&self,
id: crate::users::UserId,
) -> Result<crate::users::User> {
let conn = Arc::clone(&self.conn);
let id_str = id.0.to_string();
let id_str_for_err = id_str.clone();
let fut = tokio::task::spawn_blocking(move || {
let db = conn.lock().map_err(|e| {
PinakesError::Database(format!("failed to acquire database lock: {e}"))
})?;
let opt = db
.query_row(
"SELECT id, username, password_hash, role, created_at, updated_at \
FROM users WHERE id = ?",
[&id_str],
|row| {
let id_str: String = row.get(0)?;
let profile = load_user_profile_sync(&db, &id_str)?;
Ok(crate::users::User {
id: crate::users::UserId(parse_uuid(&id_str)?),
username: row.get(1)?,
password_hash: row.get(2)?,
role: serde_json::from_str(&row.get::<_, String>(3)?)
.unwrap_or(crate::config::UserRole::Viewer),
profile,
created_at: chrono::DateTime::parse_from_rfc3339(
&row.get::<_, String>(4)?,
)
.unwrap_or_else(|_| chrono::Utc::now().into())
.with_timezone(&chrono::Utc),
updated_at: chrono::DateTime::parse_from_rfc3339(
&row.get::<_, String>(5)?,
)
.unwrap_or_else(|_| chrono::Utc::now().into())
.with_timezone(&chrono::Utc),
})
},
)
.optional()?;
opt.ok_or_else(|| PinakesError::NotFound(format!("user {id_str}")))
});
tokio::time::timeout(std::time::Duration::from_secs(10), fut)
.await
.map_err(|_| {
PinakesError::Database(format!(
"get_user query timed out for {id_str_for_err}"
))
})?
.map_err(|e: tokio::task::JoinError| {
PinakesError::Database(format!("get_user {id_str_for_err}: {e}"))
})?
}
async fn get_user_by_username(
&self,
username: &str,
) -> Result<crate::users::User> {
let conn = Arc::clone(&self.conn);
let username = username.to_string();
let username_for_err = username.clone();
let fut = tokio::task::spawn_blocking(move || {
let db = conn.lock().map_err(|e| {
PinakesError::Database(format!("failed to acquire database lock: {e}"))
})?;
let opt = db
.query_row(
"SELECT id, username, password_hash, role, created_at, updated_at \
FROM users WHERE username = ?",
[&username],
|row| {
let id_str: String = row.get(0)?;
let profile = load_user_profile_sync(&db, &id_str)?;
Ok(crate::users::User {
id: crate::users::UserId(parse_uuid(&id_str)?),
username: row.get(1)?,
password_hash: row.get(2)?,
role: serde_json::from_str(&row.get::<_, String>(3)?)
.unwrap_or(crate::config::UserRole::Viewer),
profile,
created_at: chrono::DateTime::parse_from_rfc3339(
&row.get::<_, String>(4)?,
)
.unwrap_or_else(|_| chrono::Utc::now().into())
.with_timezone(&chrono::Utc),
updated_at: chrono::DateTime::parse_from_rfc3339(
&row.get::<_, String>(5)?,
)
.unwrap_or_else(|_| chrono::Utc::now().into())
.with_timezone(&chrono::Utc),
})
},
)
.optional()?;
opt.ok_or_else(|| {
PinakesError::NotFound(format!("user with username {username}"))
})
});
tokio::time::timeout(std::time::Duration::from_secs(10), fut)
.await
.map_err(|_| {
PinakesError::Database(format!(
"get_user_by_username query timed out for {username_for_err}"
))
})?
.map_err(|e: tokio::task::JoinError| {
PinakesError::Database(format!(
"get_user_by_username {username_for_err}: {e}"
))
})?
}
async fn create_user(
&self,
username: &str,
password_hash: &str,
role: crate::config::UserRole,
profile: Option<crate::users::UserProfile>,
) -> Result<crate::users::User> {
let conn = Arc::clone(&self.conn);
let username = username.to_string();
let password_hash = password_hash.to_string();
let fut =
tokio::task::spawn_blocking(move || -> Result<crate::users::User> {
let db = conn.lock().map_err(|e| {
PinakesError::Database(format!(
"failed to acquire database lock: {e}"
))
})?;
let tx = db.unchecked_transaction()?;
let id = crate::users::UserId(uuid::Uuid::now_v7());
let id_str = id.0.to_string();
let now = chrono::Utc::now();
let role_str = serde_json::to_string(&role).map_err(|e| {
PinakesError::Serialization(format!("failed to serialize role: {e}"))
})?;
tx.execute(
"INSERT INTO users (id, username, password_hash, role, created_at, \
updated_at) VALUES (?, ?, ?, ?, ?, ?)",
rusqlite::params![
&id_str,
&username,
&password_hash,
&role_str,
now.to_rfc3339(),
now.to_rfc3339()
],
)?;
let user_profile = if let Some(prof) = profile.clone() {
let prefs_json =
serde_json::to_string(&prof.preferences).map_err(|e| {
PinakesError::Serialization(format!(
"failed to serialize preferences: {e}"
))
})?;
tx.execute(
"INSERT INTO user_profiles (user_id, avatar_path, bio, \
preferences_json, created_at, updated_at) VALUES (?, ?, ?, ?, ?, \
?)",
rusqlite::params![
&id_str,
&prof.avatar_path,
&prof.bio,
&prefs_json,
now.to_rfc3339(),
now.to_rfc3339()
],
)?;
prof
} else {
crate::users::UserProfile {
avatar_path: None,
bio: None,
preferences: Default::default(),
}
};
tx.commit()?;
Ok(crate::users::User {
id,
username,
password_hash,
role,
profile: user_profile,
created_at: now,
updated_at: now,
})
});
tokio::time::timeout(std::time::Duration::from_secs(10), fut)
.await
.map_err(|_| {
PinakesError::Database("create_user query timed out".to_string())
})?
.map_err(|e: tokio::task::JoinError| {
PinakesError::Database(format!("create_user: {e}"))
})?
}
async fn update_user(
&self,
id: crate::users::UserId,
password_hash: Option<&str>,
role: Option<crate::config::UserRole>,
profile: Option<crate::users::UserProfile>,
) -> Result<crate::users::User> {
let conn = Arc::clone(&self.conn);
let password_hash = password_hash.map(std::string::ToString::to_string);
let id_str = id.0.to_string();
let fut =
tokio::task::spawn_blocking(move || -> Result<crate::users::User> {
let db = conn.lock().map_err(|e| {
PinakesError::Database(format!(
"failed to acquire database lock: {e}"
))
})?;
let tx = db.unchecked_transaction()?;
let now = chrono::Utc::now();
// Update password and/or role if provided
if password_hash.is_some() || role.is_some() {
let mut updates = vec!["updated_at = ?"];
let mut params: Vec<Box<dyn rusqlite::ToSql>> = Vec::new();
params.push(Box::new(now.to_rfc3339()));
if let Some(ref pw) = password_hash {
updates.push("password_hash = ?");
params.push(Box::new(pw.clone()));
}
if let Some(ref r) = role {
updates.push("role = ?");
let role_str = serde_json::to_string(r).map_err(|e| {
PinakesError::Serialization(format!(
"failed to serialize role: {e}"
))
})?;
params.push(Box::new(role_str));
}
params.push(Box::new(id_str.clone()));
let sql =
format!("UPDATE users SET {} WHERE id = ?", updates.join(", "));
let param_refs: Vec<&dyn rusqlite::ToSql> =
params.iter().map(std::convert::AsRef::as_ref).collect();
tx.execute(&sql, param_refs.as_slice())?;
}
// Update profile if provided
if let Some(prof) = profile {
let prefs_json =
serde_json::to_string(&prof.preferences).map_err(|e| {
PinakesError::Serialization(format!(
"failed to serialize preferences: {e}"
))
})?;
tx.execute(
"INSERT OR REPLACE INTO user_profiles (user_id, avatar_path, bio, \
preferences_json, created_at, updated_at) VALUES (?, ?, ?, ?, \
COALESCE((SELECT created_at FROM user_profiles WHERE user_id = \
?), ?), ?)",
rusqlite::params![
&id_str,
&prof.avatar_path,
&prof.bio,
&prefs_json,
&id_str,
now.to_rfc3339(),
now.to_rfc3339()
],
)?;
}
tx.commit()?;
// Fetch updated user
Ok(db.query_row(
"SELECT id, username, password_hash, role, created_at, updated_at \
FROM users WHERE id = ?",
[&id_str],
|row| {
let id_str: String = row.get(0)?;
let profile = load_user_profile_sync(&db, &id_str)?;
Ok(crate::users::User {
id: crate::users::UserId(parse_uuid(&id_str)?),
username: row.get(1)?,
password_hash: row.get(2)?,
role: serde_json::from_str(&row.get::<_, String>(3)?)
.unwrap_or(crate::config::UserRole::Viewer),
profile,
created_at: chrono::DateTime::parse_from_rfc3339(
&row.get::<_, String>(4)?,
)
.unwrap_or_else(|_| chrono::Utc::now().into())
.with_timezone(&chrono::Utc),
updated_at: chrono::DateTime::parse_from_rfc3339(
&row.get::<_, String>(5)?,
)
.unwrap_or_else(|_| chrono::Utc::now().into())
.with_timezone(&chrono::Utc),
})
},
)?)
});
tokio::time::timeout(std::time::Duration::from_secs(10), fut)
.await
.map_err(|_| {
PinakesError::Database("update_user query timed out".to_string())
})?
.map_err(|e: tokio::task::JoinError| {
PinakesError::Database(format!("update_user: {e}"))
})?
}
async fn delete_user(&self, id: crate::users::UserId) -> Result<()> {
let conn = Arc::clone(&self.conn);
let id_str = id.0.to_string();
let fut = tokio::task::spawn_blocking(move || -> Result<()> {
let db = conn.lock().map_err(|e| {
PinakesError::Database(format!("failed to acquire database lock: {e}"))
})?;
let tx = db.unchecked_transaction()?;
// Delete profile first due to foreign key
tx.execute("DELETE FROM user_profiles WHERE user_id = ?", [&id_str])?;
// Delete library access
tx.execute("DELETE FROM user_libraries WHERE user_id = ?", [&id_str])?;
// Delete user
let affected = tx.execute("DELETE FROM users WHERE id = ?", [&id_str])?;
if affected == 0 {
return Err(PinakesError::NotFound(format!("user {id_str}")));
}
tx.commit()?;
Ok(())
});
tokio::time::timeout(std::time::Duration::from_secs(10), fut)
.await
.map_err(|_| {
PinakesError::Database("delete_user query timed out".to_string())
})?
.map_err(|e: tokio::task::JoinError| {
PinakesError::Database(format!("delete_user: {e}"))
})?
}
async fn get_user_libraries(
&self,
user_id: crate::users::UserId,
) -> Result<Vec<crate::users::UserLibraryAccess>> {
let conn = Arc::clone(&self.conn);
let user_id_str = user_id.0.to_string();
let fut = tokio::task::spawn_blocking(move || {
let db = conn.lock().map_err(|e| {
PinakesError::Database(format!("failed to acquire database lock: {e}"))
})?;
let mut stmt = db.prepare(
"SELECT user_id, root_path, permission, granted_at FROM \
user_libraries WHERE user_id = ?",
)?;
let libraries = stmt
.query_map([&user_id_str], |row| {
let id_str: String = row.get(0)?;
Ok(crate::users::UserLibraryAccess {
user_id: crate::users::UserId(parse_uuid(&id_str)?),
root_path: row.get(1)?,
permission: serde_json::from_str(&row.get::<_, String>(2)?)
.unwrap_or(crate::users::LibraryPermission::Read),
granted_at: chrono::DateTime::parse_from_rfc3339(
&row.get::<_, String>(3)?,
)
.unwrap_or_else(|_| chrono::Utc::now().into())
.with_timezone(&chrono::Utc),
})
})?
.filter_map(std::result::Result::ok)
.collect::<Vec<_>>();
Ok(libraries)
});
tokio::time::timeout(std::time::Duration::from_secs(10), fut)
.await
.map_err(|_| {
PinakesError::Database("get_user_libraries query timed out".to_string())
})?
.map_err(|e: tokio::task::JoinError| {
PinakesError::Database(e.to_string())
})?
}
async fn grant_library_access(
&self,
user_id: crate::users::UserId,
root_path: &str,
permission: crate::users::LibraryPermission,
) -> Result<()> {
let conn = Arc::clone(&self.conn);
let root_path = root_path.to_string();
let user_id_str = user_id.0.to_string();
let fut = tokio::task::spawn_blocking(move || -> Result<()> {
let db = conn.lock().map_err(|e| {
PinakesError::Database(format!("failed to acquire database lock: {e}"))
})?;
let perm_str = serde_json::to_string(&permission).map_err(|e| {
PinakesError::Serialization(format!(
"failed to serialize permission: {e}"
))
})?;
let now = chrono::Utc::now();
db.execute(
"INSERT OR REPLACE INTO user_libraries (user_id, root_path, \
permission, granted_at) VALUES (?, ?, ?, ?)",
rusqlite::params![
&user_id_str,
&root_path,
&perm_str,
now.to_rfc3339()
],
)?;
Ok(())
});
tokio::time::timeout(std::time::Duration::from_secs(10), fut)
.await
.map_err(|_| {
PinakesError::Database(
"grant_library_access query timed out".to_string(),
)
})?
.map_err(|e: tokio::task::JoinError| {
PinakesError::Database(format!("grant_library_access: {e}"))
})?
}
async fn revoke_library_access(
&self,
user_id: crate::users::UserId,
root_path: &str,
) -> Result<()> {
let conn = Arc::clone(&self.conn);
let root_path = root_path.to_string();
let user_id_str = user_id.0.to_string();
let fut = tokio::task::spawn_blocking(move || {
let db = conn.lock().map_err(|e| {
PinakesError::Database(format!("failed to acquire database lock: {e}"))
})?;
db.execute(
"DELETE FROM user_libraries WHERE user_id = ? AND root_path = ?",
rusqlite::params![&user_id_str, &root_path],
)?;
Ok(())
});
tokio::time::timeout(std::time::Duration::from_secs(10), fut)
.await
.map_err(|_| {
PinakesError::Database(
"revoke_library_access query timed out".to_string(),
)
})?
.map_err(|e: tokio::task::JoinError| {
PinakesError::Database(format!("revoke_library_access: {e}"))
})?
}
async fn rate_media(
&self,
user_id: crate::users::UserId,
media_id: MediaId,
stars: u8,
review: Option<&str>,
) -> Result<crate::social::Rating> {
let conn = Arc::clone(&self.conn);
let user_id_str = user_id.0.to_string();
let media_id_str = media_id.0.to_string();
let review = review.map(String::from);
let fut = tokio::task::spawn_blocking(move || {
let db = conn.lock().map_err(|e| {
PinakesError::Database(format!("failed to acquire database lock: {e}"))
})?;
let id = Uuid::now_v7();
let id_str = id.to_string();
let now = chrono::Utc::now();
db.execute(
"INSERT OR REPLACE INTO ratings (id, user_id, media_id, stars, \
review_text, created_at) VALUES (?, ?, ?, ?, ?, ?)",
params![
&id_str,
&user_id_str,
&media_id_str,
i32::from(stars),
&review,
now.to_rfc3339()
],
)?;
// SELECT the actual row to get the real id and created_at (INSERT OR
// REPLACE may have kept existing values)
let (actual_id, actual_created_at) = db.query_row(
"SELECT id, created_at FROM ratings WHERE user_id = ? AND media_id = ?",
params![&user_id_str, &media_id_str],
|row| {
let rid_str: String = row.get(0)?;
let created_str: String = row.get(1)?;
Ok((parse_uuid(&rid_str)?, parse_datetime(&created_str)))
},
)?;
Ok(crate::social::Rating {
id: actual_id,
user_id,
media_id,
stars,
review_text: review,
created_at: actual_created_at,
})
});
tokio::time::timeout(std::time::Duration::from_secs(10), fut)
.await
.map_err(|_| PinakesError::Database("rate_media timed out".into()))?
.map_err(|e: tokio::task::JoinError| {
PinakesError::Database(format!("rate_media: {e}"))
})?
}
async fn get_media_ratings(
&self,
media_id: MediaId,
) -> Result<Vec<crate::social::Rating>> {
let conn = Arc::clone(&self.conn);
let media_id_str = media_id.0.to_string();
let fut = tokio::task::spawn_blocking(move || {
let db = conn.lock().map_err(|e| {
PinakesError::Database(format!("failed to acquire database lock: {e}"))
})?;
let mut stmt = db.prepare(
"SELECT id, user_id, media_id, stars, review_text, created_at FROM \
ratings WHERE media_id = ? ORDER BY created_at DESC",
)?;
let ratings = stmt
.query_map([&media_id_str], |row| {
let id_str: String = row.get(0)?;
let uid_str: String = row.get(1)?;
let mid_str: String = row.get(2)?;
let created_str: String = row.get(5)?;
Ok(crate::social::Rating {
id: parse_uuid(&id_str)?,
user_id: crate::users::UserId(parse_uuid(&uid_str)?),
media_id: MediaId(parse_uuid(&mid_str)?),
stars: u8::try_from(row.get::<_, i32>(3)?).unwrap_or(0),
review_text: row.get(4)?,
created_at: parse_datetime(&created_str),
})
})?
.filter_map(std::result::Result::ok)
.collect();
Ok(ratings)
});
tokio::time::timeout(std::time::Duration::from_secs(10), fut)
.await
.map_err(|_| {
PinakesError::Database("get_media_ratings timed out".into())
})?
.map_err(|e: tokio::task::JoinError| {
PinakesError::Database(format!("get_media_ratings: {e}"))
})?
}
async fn get_user_rating(
&self,
user_id: crate::users::UserId,
media_id: MediaId,
) -> Result<Option<crate::social::Rating>> {
let conn = Arc::clone(&self.conn);
let user_id_str = user_id.0.to_string();
let media_id_str = media_id.0.to_string();
let fut = tokio::task::spawn_blocking(move || {
let db = conn.lock().map_err(|e| {
PinakesError::Database(format!("failed to acquire database lock: {e}"))
})?;
let result = db
.query_row(
"SELECT id, user_id, media_id, stars, review_text, created_at FROM \
ratings WHERE user_id = ? AND media_id = ?",
params![&user_id_str, &media_id_str],
|row| {
let id_str: String = row.get(0)?;
let uid_str: String = row.get(1)?;
let mid_str: String = row.get(2)?;
let created_str: String = row.get(5)?;
Ok(crate::social::Rating {
id: parse_uuid(&id_str)?,
user_id: crate::users::UserId(parse_uuid(&uid_str)?),
media_id: MediaId(parse_uuid(&mid_str)?),
stars: u8::try_from(row.get::<_, i32>(3)?).unwrap_or(0),
review_text: row.get(4)?,
created_at: parse_datetime(&created_str),
})
},
)
.optional()?;
Ok(result)
});
tokio::time::timeout(std::time::Duration::from_secs(10), fut)
.await
.map_err(|_| PinakesError::Database("get_user_rating timed out".into()))?
.map_err(|e: tokio::task::JoinError| {
PinakesError::Database(format!("get_user_rating: {e}"))
})?
}
async fn delete_rating(&self, id: Uuid) -> Result<()> {
let conn = Arc::clone(&self.conn);
let id_str = id.to_string();
let fut = tokio::task::spawn_blocking(move || {
let db = conn.lock().map_err(|e| {
PinakesError::Database(format!("failed to acquire database lock: {e}"))
})?;
db.execute("DELETE FROM ratings WHERE id = ?", [&id_str])?;
Ok(())
});
tokio::time::timeout(std::time::Duration::from_secs(10), fut)
.await
.map_err(|_| PinakesError::Database("delete_rating timed out".into()))?
.map_err(|e: tokio::task::JoinError| {
PinakesError::Database(format!("delete_rating: {e}"))
})?
}
async fn add_comment(
&self,
user_id: crate::users::UserId,
media_id: MediaId,
text: &str,
parent_id: Option<Uuid>,
) -> Result<crate::social::Comment> {
let conn = Arc::clone(&self.conn);
let user_id_str = user_id.0.to_string();
let media_id_str = media_id.0.to_string();
let text = text.to_string();
let parent_str = parent_id.map(|p| p.to_string());
let fut = tokio::task::spawn_blocking(move || {
let db = conn.lock().map_err(|e| {
PinakesError::Database(format!("failed to acquire database lock: {e}"))
})?;
let id = Uuid::now_v7();
let id_str = id.to_string();
let now = chrono::Utc::now();
db.execute(
"INSERT INTO comments (id, user_id, media_id, parent_comment_id, \
text, created_at) VALUES (?, ?, ?, ?, ?, ?)",
params![
&id_str,
&user_id_str,
&media_id_str,
&parent_str,
&text,
now.to_rfc3339()
],
)?;
Ok(crate::social::Comment {
id,
user_id,
media_id,
parent_comment_id: parent_id,
text,
created_at: now,
})
});
tokio::time::timeout(std::time::Duration::from_secs(10), fut)
.await
.map_err(|_| PinakesError::Database("add_comment timed out".into()))?
.map_err(|e: tokio::task::JoinError| {
PinakesError::Database(format!("add_comment: {e}"))
})?
}
async fn get_media_comments(
&self,
media_id: MediaId,
) -> Result<Vec<crate::social::Comment>> {
let conn = Arc::clone(&self.conn);
let media_id_str = media_id.0.to_string();
let fut = tokio::task::spawn_blocking(move || {
let db = conn.lock().map_err(|e| {
PinakesError::Database(format!("failed to acquire database lock: {e}"))
})?;
let mut stmt = db.prepare(
"SELECT id, user_id, media_id, parent_comment_id, text, created_at \
FROM comments WHERE media_id = ? ORDER BY created_at ASC",
)?;
let comments = stmt
.query_map([&media_id_str], |row| {
let id_str: String = row.get(0)?;
let uid_str: String = row.get(1)?;
let mid_str: String = row.get(2)?;
let parent_str: Option<String> = row.get(3)?;
let created_str: String = row.get(5)?;
Ok(crate::social::Comment {
id: parse_uuid(&id_str)?,
user_id: crate::users::UserId(parse_uuid(&uid_str)?),
media_id: MediaId(parse_uuid(&mid_str)?),
parent_comment_id: parent_str
.and_then(|s| Uuid::parse_str(&s).ok()),
text: row.get(4)?,
created_at: parse_datetime(&created_str),
})
})?
.filter_map(std::result::Result::ok)
.collect();
Ok(comments)
});
tokio::time::timeout(std::time::Duration::from_secs(10), fut)
.await
.map_err(|_| {
PinakesError::Database("get_media_comments timed out".into())
})?
.map_err(|e: tokio::task::JoinError| {
PinakesError::Database(format!("get_media_comments: {e}"))
})?
}
async fn delete_comment(&self, id: Uuid) -> Result<()> {
let conn = Arc::clone(&self.conn);
let id_str = id.to_string();
let fut = tokio::task::spawn_blocking(move || {
let db = conn.lock().map_err(|e| {
PinakesError::Database(format!("failed to acquire database lock: {e}"))
})?;
db.execute("DELETE FROM comments WHERE id = ?", [&id_str])?;
Ok(())
});
tokio::time::timeout(std::time::Duration::from_secs(10), fut)
.await
.map_err(|_| PinakesError::Database("delete_comment timed out".into()))?
.map_err(|e: tokio::task::JoinError| {
PinakesError::Database(format!("delete_comment: {e}"))
})?
}
async fn add_favorite(
&self,
user_id: crate::users::UserId,
media_id: MediaId,
) -> Result<()> {
let conn = Arc::clone(&self.conn);
let user_id_str = user_id.0.to_string();
let media_id_str = media_id.0.to_string();
let fut = tokio::task::spawn_blocking(move || {
let db = conn.lock().map_err(|e| {
PinakesError::Database(format!("failed to acquire database lock: {e}"))
})?;
let now = chrono::Utc::now();
db.execute(
"INSERT OR IGNORE INTO favorites (user_id, media_id, created_at) \
VALUES (?, ?, ?)",
params![&user_id_str, &media_id_str, now.to_rfc3339()],
)?;
Ok(())
});
tokio::time::timeout(std::time::Duration::from_secs(10), fut)
.await
.map_err(|_| PinakesError::Database("add_favorite timed out".into()))?
.map_err(|e: tokio::task::JoinError| {
PinakesError::Database(format!("add_favorite: {e}"))
})?
}
async fn remove_favorite(
&self,
user_id: crate::users::UserId,
media_id: MediaId,
) -> Result<()> {
let conn = Arc::clone(&self.conn);
let user_id_str = user_id.0.to_string();
let media_id_str = media_id.0.to_string();
let fut = tokio::task::spawn_blocking(move || {
let db = conn.lock().map_err(|e| {
PinakesError::Database(format!("failed to acquire database lock: {e}"))
})?;
db.execute(
"DELETE FROM favorites WHERE user_id = ? AND media_id = ?",
params![&user_id_str, &media_id_str],
)?;
Ok(())
});
tokio::time::timeout(std::time::Duration::from_secs(10), fut)
.await
.map_err(|_| PinakesError::Database("remove_favorite timed out".into()))?
.map_err(|e: tokio::task::JoinError| {
PinakesError::Database(format!("remove_favorite: {e}"))
})?
}
async fn get_user_favorites(
&self,
user_id: crate::users::UserId,
pagination: &Pagination,
) -> Result<Vec<MediaItem>> {
let conn = Arc::clone(&self.conn);
let user_id_str = user_id.0.to_string();
let limit = pagination.limit.cast_signed();
let offset = pagination.offset.cast_signed();
let fut = tokio::task::spawn_blocking(move || {
let db = conn.lock().map_err(|e| {
PinakesError::Database(format!("failed to acquire database lock: {e}"))
})?;
let mut stmt = db.prepare(
"SELECT m.id, m.path, m.file_name, m.media_type, m.content_hash, \
m.file_size, m.title, m.artist, m.album, m.genre, m.year, \
m.duration_secs, m.description, m.thumbnail_path, m.created_at, \
m.updated_at FROM media_items m JOIN favorites f ON m.id = \
f.media_id WHERE f.user_id = ? ORDER BY f.created_at DESC LIMIT ? \
OFFSET ?",
)?;
let mut items: Vec<MediaItem> = stmt
.query_map(params![&user_id_str, limit, offset], row_to_media_item)?
.filter_map(std::result::Result::ok)
.collect();
load_custom_fields_batch(&db, &mut items)?;
Ok(items)
});
tokio::time::timeout(std::time::Duration::from_secs(10), fut)
.await
.map_err(|_| {
PinakesError::Database("get_user_favorites timed out".into())
})?
.map_err(|e: tokio::task::JoinError| {
PinakesError::Database(format!("get_user_favorites: {e}"))
})?
}
async fn is_favorite(
&self,
user_id: crate::users::UserId,
media_id: MediaId,
) -> Result<bool> {
let conn = Arc::clone(&self.conn);
let user_id_str = user_id.0.to_string();
let media_id_str = media_id.0.to_string();
let fut = tokio::task::spawn_blocking(move || {
let db = conn.lock().map_err(|e| {
PinakesError::Database(format!("failed to acquire database lock: {e}"))
})?;
let count: i64 = db.query_row(
"SELECT COUNT(*) FROM favorites WHERE user_id = ? AND media_id = ?",
params![&user_id_str, &media_id_str],
|row| row.get(0),
)?;
Ok(count > 0)
});
tokio::time::timeout(std::time::Duration::from_secs(10), fut)
.await
.map_err(|_| PinakesError::Database("is_favorite timed out".into()))?
.map_err(|e: tokio::task::JoinError| {
PinakesError::Database(format!("is_favorite: {e}"))
})?
}
async fn create_share_link(
&self,
media_id: MediaId,
created_by: crate::users::UserId,
token: &str,
password_hash: Option<&str>,
expires_at: Option<chrono::DateTime<chrono::Utc>>,
) -> Result<crate::social::ShareLink> {
let conn = Arc::clone(&self.conn);
let media_id_str = media_id.0.to_string();
let created_by_str = created_by.0.to_string();
let token = token.to_string();
let password_hash = password_hash.map(String::from);
let expires_str = expires_at.map(|dt| dt.to_rfc3339());
let fut = tokio::task::spawn_blocking(move || {
let db = conn.lock().map_err(|e| {
PinakesError::Database(format!("failed to acquire database lock: {e}"))
})?;
let id = Uuid::now_v7();
let id_str = id.to_string();
let now = chrono::Utc::now();
db.execute(
"INSERT INTO share_links (id, media_id, created_by, token, \
password_hash, expires_at, view_count, created_at) VALUES (?, ?, ?, \
?, ?, ?, 0, ?)",
params![
&id_str,
&media_id_str,
&created_by_str,
&token,
&password_hash,
&expires_str,
now.to_rfc3339()
],
)?;
Ok(crate::social::ShareLink {
id,
media_id,
created_by,
token,
password_hash,
expires_at,
view_count: 0,
created_at: now,
})
});
tokio::time::timeout(std::time::Duration::from_secs(10), fut)
.await
.map_err(|_| {
PinakesError::Database("create_share_link timed out".into())
})?
.map_err(|e: tokio::task::JoinError| {
PinakesError::Database(format!("create_share_link: {e}"))
})?
}
async fn get_share_link(
&self,
token: &str,
) -> Result<crate::social::ShareLink> {
let conn = Arc::clone(&self.conn);
let token = token.to_string();
let fut = tokio::task::spawn_blocking(move || {
let db = conn.lock().map_err(|e| {
PinakesError::Database(format!("failed to acquire database lock: {e}"))
})?;
db.query_row(
"SELECT id, media_id, created_by, token, password_hash, expires_at, \
view_count, created_at FROM share_links WHERE token = ?",
[&token],
|row| {
let id_str: String = row.get(0)?;
let mid_str: String = row.get(1)?;
let uid_str: String = row.get(2)?;
let expires_str: Option<String> = row.get(5)?;
let created_str: String = row.get(7)?;
Ok(crate::social::ShareLink {
id: parse_uuid(&id_str)?,
media_id: MediaId(parse_uuid(&mid_str)?),
created_by: crate::users::UserId(parse_uuid(&uid_str)?),
token: row.get(3)?,
password_hash: row.get(4)?,
expires_at: expires_str.map(|s| parse_datetime(&s)),
view_count: row.get::<_, i64>(6)?.cast_unsigned(),
created_at: parse_datetime(&created_str),
})
},
)
.map_err(|e| {
match e {
rusqlite::Error::QueryReturnedNoRows => {
PinakesError::NotFound("share link not found".into())
},
_ => PinakesError::Database(e.to_string()),
}
})
});
tokio::time::timeout(std::time::Duration::from_secs(10), fut)
.await
.map_err(|_| PinakesError::Database("get_share_link timed out".into()))?
.map_err(|e: tokio::task::JoinError| {
PinakesError::Database(format!("get_share_link: {e}"))
})?
}
async fn increment_share_views(&self, token: &str) -> Result<()> {
let conn = Arc::clone(&self.conn);
let token = token.to_string();
let fut = tokio::task::spawn_blocking(move || {
let db = conn.lock().map_err(|e| {
PinakesError::Database(format!("failed to acquire database lock: {e}"))
})?;
db.execute(
"UPDATE share_links SET view_count = view_count + 1 WHERE token = ?",
[&token],
)?;
Ok(())
});
tokio::time::timeout(std::time::Duration::from_secs(10), fut)
.await
.map_err(|_| {
PinakesError::Database("increment_share_views timed out".into())
})?
.map_err(|e: tokio::task::JoinError| {
PinakesError::Database(format!("increment_share_views: {e}"))
})?
}
async fn delete_share_link(&self, id: Uuid) -> Result<()> {
let conn = Arc::clone(&self.conn);
let id_str = id.to_string();
let fut = tokio::task::spawn_blocking(move || {
let db = conn.lock().map_err(|e| {
PinakesError::Database(format!("failed to acquire database lock: {e}"))
})?;
db.execute("DELETE FROM share_links WHERE id = ?", [&id_str])?;
Ok(())
});
tokio::time::timeout(std::time::Duration::from_secs(10), fut)
.await
.map_err(|_| {
PinakesError::Database("delete_share_link timed out".into())
})?
.map_err(|e: tokio::task::JoinError| {
PinakesError::Database(format!("delete_share_link: {e}"))
})?
}
async fn create_playlist(
&self,
owner_id: crate::users::UserId,
name: &str,
description: Option<&str>,
is_public: bool,
is_smart: bool,
filter_query: Option<&str>,
) -> Result<crate::playlists::Playlist> {
let conn = Arc::clone(&self.conn);
let owner_id_str = owner_id.0.to_string();
let name = name.to_string();
let description = description.map(String::from);
let filter_query = filter_query.map(String::from);
let fut = tokio::task::spawn_blocking(move || {
let db = conn.lock().map_err(|e| {
PinakesError::Database(format!("failed to acquire database lock: {e}"))
})?;
let id = Uuid::now_v7();
let id_str = id.to_string();
let now = chrono::Utc::now();
db.execute(
"INSERT INTO playlists (id, owner_id, name, description, is_public, \
is_smart, filter_query, created_at, updated_at) VALUES (?, ?, ?, ?, \
?, ?, ?, ?, ?)",
params![
&id_str,
&owner_id_str,
&name,
&description,
i32::from(is_public),
i32::from(is_smart),
&filter_query,
now.to_rfc3339(),
now.to_rfc3339()
],
)?;
Ok(crate::playlists::Playlist {
id,
owner_id,
name,
description,
is_public,
is_smart,
filter_query,
created_at: now,
updated_at: now,
})
});
tokio::time::timeout(std::time::Duration::from_secs(10), fut)
.await
.map_err(|_| PinakesError::Database("create_playlist timed out".into()))?
.map_err(|e: tokio::task::JoinError| {
PinakesError::Database(format!("create_playlist: {e}"))
})?
}
async fn get_playlist(&self, id: Uuid) -> Result<crate::playlists::Playlist> {
let conn = Arc::clone(&self.conn);
let id_str = id.to_string();
let fut = tokio::task::spawn_blocking(move || {
let db = conn.lock().map_err(|e| {
PinakesError::Database(format!("failed to acquire database lock: {e}"))
})?;
db.query_row(
"SELECT id, owner_id, name, description, is_public, is_smart, \
filter_query, created_at, updated_at FROM playlists WHERE id = ?",
[&id_str],
|row| {
let id_str: String = row.get(0)?;
let owner_str: String = row.get(1)?;
let created_str: String = row.get(7)?;
let updated_str: String = row.get(8)?;
Ok(crate::playlists::Playlist {
id: parse_uuid(&id_str)?,
owner_id: crate::users::UserId(parse_uuid(&owner_str)?),
name: row.get(2)?,
description: row.get(3)?,
is_public: row.get::<_, i32>(4)? != 0,
is_smart: row.get::<_, i32>(5)? != 0,
filter_query: row.get(6)?,
created_at: parse_datetime(&created_str),
updated_at: parse_datetime(&updated_str),
})
},
)
.map_err(|e| {
match e {
rusqlite::Error::QueryReturnedNoRows => {
PinakesError::NotFound(format!("playlist {id}"))
},
_ => PinakesError::Database(e.to_string()),
}
})
});
tokio::time::timeout(std::time::Duration::from_secs(10), fut)
.await
.map_err(|_| PinakesError::Database("get_playlist timed out".into()))?
.map_err(|e: tokio::task::JoinError| {
PinakesError::Database(format!("get_playlist: {e}"))
})?
}
async fn list_playlists(
&self,
owner_id: Option<crate::users::UserId>,
) -> Result<Vec<crate::playlists::Playlist>> {
let conn = Arc::clone(&self.conn);
let fut = tokio::task::spawn_blocking(move || {
let db = conn.lock().map_err(|e| {
PinakesError::Database(format!("failed to acquire database lock: {e}"))
})?;
let (sql, param): (String, Option<String>) = owner_id.map_or_else(
|| {
(
"SELECT id, owner_id, name, description, is_public, is_smart, \
filter_query, created_at, updated_at FROM playlists ORDER BY \
updated_at DESC"
.to_string(),
None,
)
},
|uid| {
(
"SELECT id, owner_id, name, description, is_public, is_smart, \
filter_query, created_at, updated_at FROM playlists WHERE \
owner_id = ? OR is_public = 1 ORDER BY updated_at DESC"
.to_string(),
Some(uid.0.to_string()),
)
},
);
let mut stmt = db.prepare(&sql)?;
let rows = if let Some(ref p) = param {
stmt
.query_map([p], |row| {
let id_str: String = row.get(0)?;
let owner_str: String = row.get(1)?;
let created_str: String = row.get(7)?;
let updated_str: String = row.get(8)?;
Ok(crate::playlists::Playlist {
id: parse_uuid(&id_str)?,
owner_id: crate::users::UserId(parse_uuid(&owner_str)?),
name: row.get(2)?,
description: row.get(3)?,
is_public: row.get::<_, i32>(4)? != 0,
is_smart: row.get::<_, i32>(5)? != 0,
filter_query: row.get(6)?,
created_at: parse_datetime(&created_str),
updated_at: parse_datetime(&updated_str),
})
})?
.filter_map(std::result::Result::ok)
.collect()
} else {
stmt
.query_map([], |row| {
let id_str: String = row.get(0)?;
let owner_str: String = row.get(1)?;
let created_str: String = row.get(7)?;
let updated_str: String = row.get(8)?;
Ok(crate::playlists::Playlist {
id: parse_uuid(&id_str)?,
owner_id: crate::users::UserId(parse_uuid(&owner_str)?),
name: row.get(2)?,
description: row.get(3)?,
is_public: row.get::<_, i32>(4)? != 0,
is_smart: row.get::<_, i32>(5)? != 0,
filter_query: row.get(6)?,
created_at: parse_datetime(&created_str),
updated_at: parse_datetime(&updated_str),
})
})?
.filter_map(std::result::Result::ok)
.collect()
};
Ok(rows)
});
tokio::time::timeout(std::time::Duration::from_secs(10), fut)
.await
.map_err(|_| PinakesError::Database("list_playlists timed out".into()))?
.map_err(|e: tokio::task::JoinError| {
PinakesError::Database(format!("list_playlists: {e}"))
})?
}
async fn update_playlist(
&self,
id: Uuid,
name: Option<&str>,
description: Option<&str>,
is_public: Option<bool>,
) -> Result<crate::playlists::Playlist> {
let conn = Arc::clone(&self.conn);
let id_str = id.to_string();
let name = name.map(String::from);
let description = description.map(String::from);
let fut = tokio::task::spawn_blocking(move || {
let db = conn.lock().map_err(|e| {
PinakesError::Database(format!("failed to acquire database lock: {e}"))
})?;
let now = chrono::Utc::now();
let mut updates = vec!["updated_at = ?".to_string()];
let mut sql_params: Vec<Box<dyn rusqlite::ToSql>> =
vec![Box::new(now.to_rfc3339())];
if let Some(ref n) = name {
updates.push("name = ?".to_string());
sql_params.push(Box::new(n.clone()));
}
if let Some(ref d) = description {
updates.push("description = ?".to_string());
sql_params.push(Box::new(d.clone()));
}
if let Some(p) = is_public {
updates.push("is_public = ?".to_string());
sql_params.push(Box::new(i32::from(p)));
}
sql_params.push(Box::new(id_str.clone()));
let sql =
format!("UPDATE playlists SET {} WHERE id = ?", updates.join(", "));
let param_refs: Vec<&dyn rusqlite::ToSql> =
sql_params.iter().map(std::convert::AsRef::as_ref).collect();
db.execute(&sql, param_refs.as_slice())?;
// Fetch updated
db.query_row(
"SELECT id, owner_id, name, description, is_public, is_smart, \
filter_query, created_at, updated_at FROM playlists WHERE id = ?",
[&id_str],
|row| {
let id_str: String = row.get(0)?;
let owner_str: String = row.get(1)?;
let created_str: String = row.get(7)?;
let updated_str: String = row.get(8)?;
Ok(crate::playlists::Playlist {
id: parse_uuid(&id_str)?,
owner_id: crate::users::UserId(parse_uuid(&owner_str)?),
name: row.get(2)?,
description: row.get(3)?,
is_public: row.get::<_, i32>(4)? != 0,
is_smart: row.get::<_, i32>(5)? != 0,
filter_query: row.get(6)?,
created_at: parse_datetime(&created_str),
updated_at: parse_datetime(&updated_str),
})
},
)
.map_err(|e| {
match e {
rusqlite::Error::QueryReturnedNoRows => {
PinakesError::NotFound(format!("playlist {id_str}"))
},
_ => PinakesError::Database(e.to_string()),
}
})
});
tokio::time::timeout(std::time::Duration::from_secs(10), fut)
.await
.map_err(|_| PinakesError::Database("update_playlist timed out".into()))?
.map_err(|e: tokio::task::JoinError| {
PinakesError::Database(format!("update_playlist: {e}"))
})?
}
async fn delete_playlist(&self, id: Uuid) -> Result<()> {
let conn = Arc::clone(&self.conn);
let id_str = id.to_string();
let fut = tokio::task::spawn_blocking(move || {
let db = conn.lock().map_err(|e| {
PinakesError::Database(format!("failed to acquire database lock: {e}"))
})?;
db.execute("DELETE FROM playlists WHERE id = ?", [&id_str])?;
Ok(())
});
tokio::time::timeout(std::time::Duration::from_secs(10), fut)
.await
.map_err(|_| PinakesError::Database("delete_playlist timed out".into()))?
.map_err(|e: tokio::task::JoinError| {
PinakesError::Database(format!("delete_playlist: {e}"))
})?
}
async fn add_to_playlist(
&self,
playlist_id: Uuid,
media_id: MediaId,
position: i32,
) -> Result<()> {
let conn = Arc::clone(&self.conn);
let playlist_id_str = playlist_id.to_string();
let media_id_str = media_id.0.to_string();
let fut = tokio::task::spawn_blocking(move || {
let db = conn.lock().map_err(|e| {
PinakesError::Database(format!("failed to acquire database lock: {e}"))
})?;
let now = chrono::Utc::now();
db.execute(
"INSERT OR REPLACE INTO playlist_items (playlist_id, media_id, \
position, added_at) VALUES (?, ?, ?, ?)",
params![&playlist_id_str, &media_id_str, position, now.to_rfc3339()],
)?;
Ok(())
});
tokio::time::timeout(std::time::Duration::from_secs(10), fut)
.await
.map_err(|_| PinakesError::Database("add_to_playlist timed out".into()))?
.map_err(|e: tokio::task::JoinError| {
PinakesError::Database(format!("add_to_playlist: {e}"))
})?
}
async fn remove_from_playlist(
&self,
playlist_id: Uuid,
media_id: MediaId,
) -> Result<()> {
let conn = Arc::clone(&self.conn);
let playlist_id_str = playlist_id.to_string();
let media_id_str = media_id.0.to_string();
let fut = tokio::task::spawn_blocking(move || {
let db = conn.lock().map_err(|e| {
PinakesError::Database(format!("failed to acquire database lock: {e}"))
})?;
db.execute(
"DELETE FROM playlist_items WHERE playlist_id = ? AND media_id = ?",
params![&playlist_id_str, &media_id_str],
)?;
Ok(())
});
tokio::time::timeout(std::time::Duration::from_secs(10), fut)
.await
.map_err(|_| {
PinakesError::Database("remove_from_playlist timed out".into())
})?
.map_err(|e: tokio::task::JoinError| {
PinakesError::Database(format!("remove_from_playlist: {e}"))
})?
}
async fn get_playlist_items(
&self,
playlist_id: Uuid,
) -> Result<Vec<MediaItem>> {
let conn = Arc::clone(&self.conn);
let playlist_id_str = playlist_id.to_string();
let fut = tokio::task::spawn_blocking(move || {
let db = conn.lock().map_err(|e| {
PinakesError::Database(format!("failed to acquire database lock: {e}"))
})?;
let mut stmt = db.prepare(
"SELECT m.id, m.path, m.file_name, m.media_type, m.content_hash, \
m.file_size, m.title, m.artist, m.album, m.genre, m.year, \
m.duration_secs, m.description, m.thumbnail_path, m.created_at, \
m.updated_at FROM media_items m JOIN playlist_items pi ON m.id = \
pi.media_id WHERE pi.playlist_id = ? ORDER BY pi.position ASC",
)?;
let mut items: Vec<MediaItem> = stmt
.query_map([&playlist_id_str], row_to_media_item)?
.filter_map(std::result::Result::ok)
.collect();
load_custom_fields_batch(&db, &mut items)?;
Ok(items)
});
tokio::time::timeout(std::time::Duration::from_secs(10), fut)
.await
.map_err(|_| {
PinakesError::Database("get_playlist_items timed out".into())
})?
.map_err(|e: tokio::task::JoinError| {
PinakesError::Database(format!("get_playlist_items: {e}"))
})?
}
async fn reorder_playlist(
&self,
playlist_id: Uuid,
media_id: MediaId,
new_position: i32,
) -> Result<()> {
let conn = Arc::clone(&self.conn);
let playlist_id_str = playlist_id.to_string();
let media_id_str = media_id.0.to_string();
let fut = tokio::task::spawn_blocking(move || {
let db = conn.lock().map_err(|e| {
PinakesError::Database(format!("failed to acquire database lock: {e}"))
})?;
db.execute(
"UPDATE playlist_items SET position = ? WHERE playlist_id = ? AND \
media_id = ?",
params![new_position, &playlist_id_str, &media_id_str],
)?;
Ok(())
});
tokio::time::timeout(std::time::Duration::from_secs(10), fut)
.await
.map_err(|_| PinakesError::Database("reorder_playlist timed out".into()))?
.map_err(|e: tokio::task::JoinError| {
PinakesError::Database(format!("reorder_playlist: {e}"))
})?
}
async fn record_usage_event(
&self,
event: &crate::analytics::UsageEvent,
) -> Result<()> {
let conn = Arc::clone(&self.conn);
let id_str = event.id.to_string();
let media_id_str = event.media_id.map(|m| m.0.to_string());
let user_id_str = event.user_id.map(|u| u.0.to_string());
let event_type = event.event_type.to_string();
let ts = event.timestamp.to_rfc3339();
let duration = event.duration_secs;
let context = event.context_json.clone();
let fut = tokio::task::spawn_blocking(move || {
let db = conn.lock().map_err(|e| {
PinakesError::Database(format!("failed to acquire database lock: {e}"))
})?;
db.execute(
"INSERT INTO usage_events (id, media_id, user_id, event_type, \
timestamp, duration_secs, context_json) VALUES (?, ?, ?, ?, ?, ?, ?)",
params![
&id_str,
&media_id_str,
&user_id_str,
&event_type,
&ts,
&duration,
&context
],
)?;
Ok(())
});
tokio::time::timeout(std::time::Duration::from_secs(10), fut)
.await
.map_err(|_| {
PinakesError::Database("record_usage_event timed out".into())
})?
.map_err(|e: tokio::task::JoinError| {
PinakesError::Database(format!("record_usage_event: {e}"))
})?
}
async fn get_usage_events(
&self,
media_id: Option<MediaId>,
user_id: Option<crate::users::UserId>,
limit: u64,
) -> Result<Vec<crate::analytics::UsageEvent>> {
let conn = Arc::clone(&self.conn);
let fut = tokio::task::spawn_blocking(move || {
let db = conn.lock().map_err(|e| {
PinakesError::Database(format!("failed to acquire database lock: {e}"))
})?;
let mut conditions = Vec::new();
let mut sql_params: Vec<Box<dyn rusqlite::ToSql>> = Vec::new();
if let Some(mid) = media_id {
conditions.push("media_id = ?".to_string());
sql_params.push(Box::new(mid.0.to_string()));
}
if let Some(uid) = user_id {
conditions.push("user_id = ?".to_string());
sql_params.push(Box::new(uid.0.to_string()));
}
let where_clause = if conditions.is_empty() {
String::new()
} else {
format!("WHERE {}", conditions.join(" AND "))
};
sql_params.push(Box::new(limit.cast_signed()));
let sql = format!(
"SELECT id, media_id, user_id, event_type, timestamp, duration_secs, \
context_json FROM usage_events {where_clause} ORDER BY timestamp \
DESC LIMIT ?"
);
let mut stmt = db.prepare(&sql)?;
let param_refs: Vec<&dyn rusqlite::ToSql> =
sql_params.iter().map(std::convert::AsRef::as_ref).collect();
let events = stmt
.query_map(param_refs.as_slice(), |row| {
let id_str: String = row.get(0)?;
let mid_str: Option<String> = row.get(1)?;
let uid_str: Option<String> = row.get(2)?;
let event_type_str: String = row.get(3)?;
let ts_str: String = row.get(4)?;
Ok(crate::analytics::UsageEvent {
id: parse_uuid(&id_str)?,
media_id: mid_str
.and_then(|s| Uuid::parse_str(&s).ok())
.map(MediaId),
user_id: uid_str
.and_then(|s| Uuid::parse_str(&s).ok())
.map(crate::users::UserId),
event_type: event_type_str
.parse()
.unwrap_or(crate::analytics::UsageEventType::View),
timestamp: parse_datetime(&ts_str),
duration_secs: row.get(5)?,
context_json: row.get(6)?,
})
})?
.filter_map(std::result::Result::ok)
.collect();
Ok(events)
});
tokio::time::timeout(std::time::Duration::from_secs(10), fut)
.await
.map_err(|_| PinakesError::Database("get_usage_events timed out".into()))?
.map_err(|e: tokio::task::JoinError| {
PinakesError::Database(format!("get_usage_events: {e}"))
})?
}
async fn get_most_viewed(&self, limit: u64) -> Result<Vec<(MediaItem, u64)>> {
let conn = Arc::clone(&self.conn);
let fut = tokio::task::spawn_blocking(move || {
let db = conn.lock().map_err(|e| {
PinakesError::Database(format!("failed to acquire database lock: {e}"))
})?;
let mut stmt = db.prepare(
"SELECT m.id, m.path, m.file_name, m.media_type, m.content_hash, \
m.file_size, m.title, m.artist, m.album, m.genre, m.year, \
m.duration_secs, m.description, m.thumbnail_path, m.created_at, \
m.updated_at, COUNT(ue.id) as view_count FROM media_items m JOIN \
usage_events ue ON m.id = ue.media_id WHERE ue.event_type IN \
('view', 'play') GROUP BY m.id ORDER BY view_count DESC LIMIT ?",
)?;
let mut items: Vec<(MediaItem, u64)> = stmt
.query_map([limit.cast_signed()], |row| {
let item = row_to_media_item(row)?;
let count: i64 = row.get(16)?;
Ok((item, count.cast_unsigned()))
})?
.filter_map(std::result::Result::ok)
.collect();
// Load custom fields for each item
let mut media_items: Vec<MediaItem> =
items.iter().map(|(i, _)| i.clone()).collect();
load_custom_fields_batch(&db, &mut media_items)?;
for (i, (item, _)) in items.iter_mut().enumerate() {
item.custom_fields = std::mem::take(&mut media_items[i].custom_fields);
}
Ok(items)
});
tokio::time::timeout(std::time::Duration::from_secs(10), fut)
.await
.map_err(|_| PinakesError::Database("get_most_viewed timed out".into()))?
.map_err(|e: tokio::task::JoinError| {
PinakesError::Database(format!("get_most_viewed: {e}"))
})?
}
async fn get_recently_viewed(
&self,
user_id: crate::users::UserId,
limit: u64,
) -> Result<Vec<MediaItem>> {
let conn = Arc::clone(&self.conn);
let user_id_str = user_id.0.to_string();
let fut = tokio::task::spawn_blocking(move || {
let db = conn.lock().map_err(|e| {
PinakesError::Database(format!("failed to acquire database lock: {e}"))
})?;
let mut stmt = db.prepare(
"SELECT m.id, m.path, m.file_name, m.media_type, m.content_hash, \
m.file_size, m.title, m.artist, m.album, m.genre, m.year, \
m.duration_secs, m.description, m.thumbnail_path, m.created_at, \
m.updated_at FROM media_items m JOIN usage_events ue ON m.id = \
ue.media_id WHERE ue.user_id = ? AND ue.event_type IN ('view', \
'play') GROUP BY m.id ORDER BY MAX(ue.timestamp) DESC LIMIT ?",
)?;
let mut items: Vec<MediaItem> = stmt
.query_map(
params![&user_id_str, limit.cast_signed()],
row_to_media_item,
)?
.filter_map(std::result::Result::ok)
.collect();
load_custom_fields_batch(&db, &mut items)?;
Ok(items)
});
tokio::time::timeout(std::time::Duration::from_secs(10), fut)
.await
.map_err(|_| {
PinakesError::Database("get_recently_viewed timed out".into())
})?
.map_err(|e: tokio::task::JoinError| {
PinakesError::Database(format!("get_recently_viewed: {e}"))
})?
}
async fn update_watch_progress(
&self,
user_id: crate::users::UserId,
media_id: MediaId,
progress_secs: f64,
) -> Result<()> {
let conn = Arc::clone(&self.conn);
let user_id_str = user_id.0.to_string();
let media_id_str = media_id.0.to_string();
let fut = tokio::task::spawn_blocking(move || {
let db = conn.lock().map_err(|e| {
PinakesError::Database(format!("failed to acquire database lock: {e}"))
})?;
let id = Uuid::now_v7().to_string();
let now = chrono::Utc::now();
db.execute(
"INSERT INTO watch_history (id, user_id, media_id, progress_secs, \
last_watched) VALUES (?, ?, ?, ?, ?) ON CONFLICT(user_id, media_id) \
DO UPDATE SET progress_secs = excluded.progress_secs, last_watched = \
excluded.last_watched",
params![
&id,
&user_id_str,
&media_id_str,
progress_secs,
now.to_rfc3339()
],
)?;
Ok(())
});
tokio::time::timeout(std::time::Duration::from_secs(10), fut)
.await
.map_err(|_| {
PinakesError::Database("update_watch_progress timed out".into())
})?
.map_err(|e: tokio::task::JoinError| {
PinakesError::Database(format!("update_watch_progress: {e}"))
})?
}
async fn get_watch_progress(
&self,
user_id: crate::users::UserId,
media_id: MediaId,
) -> Result<Option<f64>> {
let conn = Arc::clone(&self.conn);
let user_id_str = user_id.0.to_string();
let media_id_str = media_id.0.to_string();
let fut = tokio::task::spawn_blocking(move || {
let db = conn.lock().map_err(|e| {
PinakesError::Database(format!("failed to acquire database lock: {e}"))
})?;
let result = db
.query_row(
"SELECT progress_secs FROM watch_history WHERE user_id = ? AND \
media_id = ?",
params![&user_id_str, &media_id_str],
|row| row.get(0),
)
.optional()?;
Ok(result)
});
tokio::time::timeout(std::time::Duration::from_secs(10), fut)
.await
.map_err(|_| {
PinakesError::Database("get_watch_progress timed out".into())
})?
.map_err(|e: tokio::task::JoinError| {
PinakesError::Database(format!("get_watch_progress: {e}"))
})?
}
async fn cleanup_old_events(
&self,
before: chrono::DateTime<chrono::Utc>,
) -> Result<u64> {
let conn = Arc::clone(&self.conn);
let before_str = before.to_rfc3339();
let fut = tokio::task::spawn_blocking(move || {
let db = conn.lock().map_err(|e| {
PinakesError::Database(format!("failed to acquire database lock: {e}"))
})?;
let affected = db
.execute("DELETE FROM usage_events WHERE timestamp < ?", [
&before_str,
])?;
Ok(affected as u64)
});
tokio::time::timeout(std::time::Duration::from_secs(10), fut)
.await
.map_err(|_| {
PinakesError::Database("cleanup_old_events timed out".into())
})?
.map_err(|e: tokio::task::JoinError| {
PinakesError::Database(format!("cleanup_old_events: {e}"))
})?
}
async fn add_subtitle(
&self,
subtitle: &crate::subtitles::Subtitle,
) -> Result<()> {
let conn = Arc::clone(&self.conn);
let id_str = subtitle.id.to_string();
let media_id_str = subtitle.media_id.0.to_string();
let language = subtitle.language.clone();
let format = subtitle.format.to_string();
let file_path = subtitle
.file_path
.as_ref()
.map(|p| p.to_string_lossy().to_string());
let is_embedded = subtitle.is_embedded;
let track_index = subtitle.track_index.map(i64::from);
let offset_ms = subtitle.offset_ms;
let now = subtitle.created_at.to_rfc3339();
let fut = tokio::task::spawn_blocking(move || {
let db = conn.lock().map_err(|e| {
PinakesError::Database(format!("failed to acquire database lock: {e}"))
})?;
db.execute(
"INSERT INTO subtitles (id, media_id, language, format, file_path, \
is_embedded, track_index, offset_ms, created_at) VALUES (?, ?, ?, ?, \
?, ?, ?, ?, ?)",
params![
&id_str,
&media_id_str,
&language,
&format,
&file_path,
i32::from(is_embedded),
&track_index,
offset_ms,
&now
],
)?;
Ok(())
});
tokio::time::timeout(std::time::Duration::from_secs(10), fut)
.await
.map_err(|_| PinakesError::Database("add_subtitle timed out".into()))?
.map_err(|e: tokio::task::JoinError| {
PinakesError::Database(format!("add_subtitle: {e}"))
})?
}
async fn get_media_subtitles(
&self,
media_id: MediaId,
) -> Result<Vec<crate::subtitles::Subtitle>> {
let conn = Arc::clone(&self.conn);
let media_id_str = media_id.0.to_string();
let fut = tokio::task::spawn_blocking(move || {
let db = conn.lock().map_err(|e| {
PinakesError::Database(format!("failed to acquire database lock: {e}"))
})?;
let mut stmt = db.prepare(
"SELECT id, media_id, language, format, file_path, is_embedded, \
track_index, offset_ms, created_at FROM subtitles WHERE media_id = ?",
)?;
let subtitles = stmt
.query_map([&media_id_str], |row| {
let id_str: String = row.get(0)?;
let mid_str: String = row.get(1)?;
let format_str: String = row.get(3)?;
let created_str: String = row.get(8)?;
Ok(crate::subtitles::Subtitle {
id: parse_uuid(&id_str)?,
media_id: MediaId(parse_uuid(&mid_str)?),
language: row.get(2)?,
format: format_str
.parse()
.unwrap_or(crate::subtitles::SubtitleFormat::Srt),
file_path: row
.get::<_, Option<String>>(4)?
.map(std::path::PathBuf::from),
is_embedded: row.get::<_, i32>(5)? != 0,
track_index: row
.get::<_, Option<i64>>(6)?
.and_then(|i| u32::try_from(i).ok()),
offset_ms: row.get(7)?,
created_at: parse_datetime(&created_str),
})
})?
.filter_map(std::result::Result::ok)
.collect();
Ok(subtitles)
});
tokio::time::timeout(std::time::Duration::from_secs(10), fut)
.await
.map_err(|_| {
PinakesError::Database("get_media_subtitles timed out".into())
})?
.map_err(|e: tokio::task::JoinError| {
PinakesError::Database(format!("get_media_subtitles: {e}"))
})?
}
async fn delete_subtitle(&self, id: Uuid) -> Result<()> {
let conn = Arc::clone(&self.conn);
let id_str = id.to_string();
let fut = tokio::task::spawn_blocking(move || {
let db = conn.lock().map_err(|e| {
PinakesError::Database(format!("failed to acquire database lock: {e}"))
})?;
db.execute("DELETE FROM subtitles WHERE id = ?", [&id_str])?;
Ok(())
});
tokio::time::timeout(std::time::Duration::from_secs(10), fut)
.await
.map_err(|_| PinakesError::Database("delete_subtitle timed out".into()))?
.map_err(|e: tokio::task::JoinError| {
PinakesError::Database(format!("delete_subtitle: {e}"))
})?
}
async fn update_subtitle_offset(
&self,
id: Uuid,
offset_ms: i64,
) -> Result<()> {
let conn = Arc::clone(&self.conn);
let id_str = id.to_string();
let fut = tokio::task::spawn_blocking(move || {
let db = conn.lock().map_err(|e| {
PinakesError::Database(format!("failed to acquire database lock: {e}"))
})?;
db.execute("UPDATE subtitles SET offset_ms = ? WHERE id = ?", params![
offset_ms, &id_str
])?;
Ok(())
});
tokio::time::timeout(std::time::Duration::from_secs(10), fut)
.await
.map_err(|_| {
PinakesError::Database("update_subtitle_offset timed out".into())
})?
.map_err(|e: tokio::task::JoinError| {
PinakesError::Database(format!("update_subtitle_offset: {e}"))
})?
}
async fn store_external_metadata(
&self,
meta: &crate::enrichment::ExternalMetadata,
) -> Result<()> {
let conn = Arc::clone(&self.conn);
let id_str = meta.id.to_string();
let media_id_str = meta.media_id.0.to_string();
let source = meta.source.to_string();
let external_id = meta.external_id.clone();
let metadata_json = meta.metadata_json.clone();
let confidence = meta.confidence;
let last_updated = meta.last_updated.to_rfc3339();
let fut = tokio::task::spawn_blocking(move || {
let db = conn.lock().map_err(|e| {
PinakesError::Database(format!("failed to acquire database lock: {e}"))
})?;
db.execute(
"INSERT OR REPLACE INTO external_metadata (id, media_id, source, \
external_id, metadata_json, confidence, last_updated) VALUES (?, ?, \
?, ?, ?, ?, ?)",
params![
&id_str,
&media_id_str,
&source,
&external_id,
&metadata_json,
confidence,
&last_updated
],
)?;
Ok(())
});
tokio::time::timeout(std::time::Duration::from_secs(10), fut)
.await
.map_err(|_| {
PinakesError::Database("store_external_metadata timed out".into())
})?
.map_err(|e: tokio::task::JoinError| {
PinakesError::Database(format!("store_external_metadata: {e}"))
})?
}
async fn get_external_metadata(
&self,
media_id: MediaId,
) -> Result<Vec<crate::enrichment::ExternalMetadata>> {
let conn = Arc::clone(&self.conn);
let media_id_str = media_id.0.to_string();
let fut = tokio::task::spawn_blocking(move || {
let db = conn.lock().map_err(|e| {
PinakesError::Database(format!("failed to acquire database lock: {e}"))
})?;
let mut stmt = db.prepare(
"SELECT id, media_id, source, external_id, metadata_json, confidence, \
last_updated FROM external_metadata WHERE media_id = ?",
)?;
let metas = stmt
.query_map([&media_id_str], |row| {
let id_str: String = row.get(0)?;
let mid_str: String = row.get(1)?;
let source_str: String = row.get(2)?;
let updated_str: String = row.get(6)?;
Ok(crate::enrichment::ExternalMetadata {
id: parse_uuid(&id_str)?,
media_id: MediaId(parse_uuid(&mid_str)?),
source: source_str
.parse()
.unwrap_or(crate::enrichment::EnrichmentSourceType::MusicBrainz),
external_id: row.get(3)?,
metadata_json: row.get(4)?,
confidence: row.get(5)?,
last_updated: parse_datetime(&updated_str),
})
})?
.filter_map(std::result::Result::ok)
.collect();
Ok(metas)
});
tokio::time::timeout(std::time::Duration::from_secs(10), fut)
.await
.map_err(|_| {
PinakesError::Database("get_external_metadata timed out".into())
})?
.map_err(|e: tokio::task::JoinError| {
PinakesError::Database(format!("get_external_metadata: {e}"))
})?
}
async fn delete_external_metadata(&self, id: Uuid) -> Result<()> {
let conn = Arc::clone(&self.conn);
let id_str = id.to_string();
let fut = tokio::task::spawn_blocking(move || {
let db = conn.lock().map_err(|e| {
PinakesError::Database(format!("failed to acquire database lock: {e}"))
})?;
db.execute("DELETE FROM external_metadata WHERE id = ?", [&id_str])?;
Ok(())
});
tokio::time::timeout(std::time::Duration::from_secs(10), fut)
.await
.map_err(|_| {
PinakesError::Database("delete_external_metadata timed out".into())
})?
.map_err(|e: tokio::task::JoinError| {
PinakesError::Database(format!("delete_external_metadata: {e}"))
})?
}
async fn create_transcode_session(
&self,
session: &crate::transcode::TranscodeSession,
) -> Result<()> {
let conn = Arc::clone(&self.conn);
let id_str = session.id.to_string();
let media_id_str = session.media_id.0.to_string();
let user_id_str = session.user_id.map(|u| u.0.to_string());
let profile = session.profile.clone();
let cache_path = session.cache_path.to_string_lossy().to_string();
let status = session.status.as_str().to_string();
let progress = session.progress;
let error_message = session.status.error_message().map(String::from);
let created_at = session.created_at.to_rfc3339();
let expires_at = session.expires_at.map(|dt| dt.to_rfc3339());
let fut = tokio::task::spawn_blocking(move || {
let db = conn.lock().map_err(|e| {
PinakesError::Database(format!("failed to acquire database lock: {e}"))
})?;
db.execute(
"INSERT INTO transcode_sessions (id, media_id, user_id, profile, \
cache_path, status, progress, error_message, created_at, expires_at) \
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)",
params![
&id_str,
&media_id_str,
&user_id_str,
&profile,
&cache_path,
&status,
progress,
&error_message,
&created_at,
&expires_at
],
)?;
Ok(())
});
tokio::time::timeout(std::time::Duration::from_secs(10), fut)
.await
.map_err(|_| {
PinakesError::Database("create_transcode_session timed out".into())
})?
.map_err(|e: tokio::task::JoinError| {
PinakesError::Database(format!("create_transcode_session: {e}"))
})?
}
async fn get_transcode_session(
&self,
id: Uuid,
) -> Result<crate::transcode::TranscodeSession> {
let conn = Arc::clone(&self.conn);
let id_str = id.to_string();
let fut = tokio::task::spawn_blocking(move || {
let db = conn.lock().map_err(|e| {
PinakesError::Database(format!("failed to acquire database lock: {e}"))
})?;
db.query_row(
"SELECT id, media_id, user_id, profile, cache_path, status, progress, \
error_message, created_at, expires_at FROM transcode_sessions WHERE \
id = ?",
[&id_str],
|row| {
let id_str: String = row.get(0)?;
let mid_str: String = row.get(1)?;
let uid_str: Option<String> = row.get(2)?;
let status_str: String = row.get(5)?;
let error_msg: Option<String> = row.get(7)?;
let created_str: String = row.get(8)?;
let expires_str: Option<String> = row.get(9)?;
Ok(crate::transcode::TranscodeSession {
id: parse_uuid(&id_str)?,
media_id: MediaId(parse_uuid(&mid_str)?),
user_id: uid_str
.and_then(|s| Uuid::parse_str(&s).ok())
.map(crate::users::UserId),
profile: row.get(3)?,
cache_path: std::path::PathBuf::from(row.get::<_, String>(4)?),
status: crate::transcode::TranscodeStatus::from_db(
&status_str,
error_msg.as_deref(),
),
progress: row.get(6)?,
created_at: parse_datetime(&created_str),
expires_at: expires_str.map(|s| parse_datetime(&s)),
duration_secs: None,
child_cancel: None,
})
},
)
.map_err(|e| {
match e {
rusqlite::Error::QueryReturnedNoRows => {
PinakesError::NotFound(format!("transcode session {id}"))
},
_ => PinakesError::Database(e.to_string()),
}
})
});
tokio::time::timeout(std::time::Duration::from_secs(10), fut)
.await
.map_err(|_| {
PinakesError::Database("get_transcode_session timed out".into())
})?
.map_err(|e: tokio::task::JoinError| {
PinakesError::Database(format!("get_transcode_session: {e}"))
})?
}
async fn list_transcode_sessions(
&self,
media_id: Option<MediaId>,
) -> Result<Vec<crate::transcode::TranscodeSession>> {
let conn = Arc::clone(&self.conn);
let fut = tokio::task::spawn_blocking(move || {
let db = conn.lock().map_err(|e| {
PinakesError::Database(format!("failed to acquire database lock: {e}"))
})?;
let (sql, param) = media_id.map_or_else(
|| {
(
"SELECT id, media_id, user_id, profile, cache_path, status, \
progress, error_message, created_at, expires_at FROM \
transcode_sessions ORDER BY created_at DESC"
.to_string(),
None,
)
},
|mid| {
(
"SELECT id, media_id, user_id, profile, cache_path, status, \
progress, error_message, created_at, expires_at FROM \
transcode_sessions WHERE media_id = ? ORDER BY created_at DESC"
.to_string(),
Some(mid.0.to_string()),
)
},
);
let mut stmt = db.prepare(&sql)?;
let parse_row =
|row: &Row| -> rusqlite::Result<crate::transcode::TranscodeSession> {
let id_str: String = row.get(0)?;
let mid_str: String = row.get(1)?;
let uid_str: Option<String> = row.get(2)?;
let status_str: String = row.get(5)?;
let error_msg: Option<String> = row.get(7)?;
let created_str: String = row.get(8)?;
let expires_str: Option<String> = row.get(9)?;
Ok(crate::transcode::TranscodeSession {
id: parse_uuid(&id_str)?,
media_id: MediaId(parse_uuid(&mid_str)?),
user_id: uid_str
.and_then(|s| Uuid::parse_str(&s).ok())
.map(crate::users::UserId),
profile: row.get(3)?,
cache_path: std::path::PathBuf::from(row.get::<_, String>(4)?),
status: crate::transcode::TranscodeStatus::from_db(
&status_str,
error_msg.as_deref(),
),
progress: row.get(6)?,
created_at: parse_datetime(&created_str),
expires_at: expires_str.map(|s| parse_datetime(&s)),
duration_secs: None,
child_cancel: None,
})
};
let sessions: Vec<_> = if let Some(ref p) = param {
stmt
.query_map([p], parse_row)?
.filter_map(std::result::Result::ok)
.collect()
} else {
stmt
.query_map([], parse_row)?
.filter_map(std::result::Result::ok)
.collect()
};
Ok(sessions)
});
tokio::time::timeout(std::time::Duration::from_secs(10), fut)
.await
.map_err(|_| {
PinakesError::Database("list_transcode_sessions timed out".into())
})?
.map_err(|e: tokio::task::JoinError| {
PinakesError::Database(format!("list_transcode_sessions: {e}"))
})?
}
async fn update_transcode_status(
&self,
id: Uuid,
status: crate::transcode::TranscodeStatus,
progress: f32,
) -> Result<()> {
let conn = Arc::clone(&self.conn);
let id_str = id.to_string();
let status_str = status.as_str().to_string();
let error_message = status.error_message().map(String::from);
let fut = tokio::task::spawn_blocking(move || {
let db = conn.lock().map_err(|e| {
PinakesError::Database(format!("failed to acquire database lock: {e}"))
})?;
db.execute(
"UPDATE transcode_sessions SET status = ?, progress = ?, \
error_message = ? WHERE id = ?",
params![&status_str, progress, &error_message, &id_str],
)?;
Ok(())
});
tokio::time::timeout(std::time::Duration::from_secs(10), fut)
.await
.map_err(|_| {
PinakesError::Database("update_transcode_status timed out".into())
})?
.map_err(|e: tokio::task::JoinError| {
PinakesError::Database(format!("update_transcode_status: {e}"))
})?
}
async fn cleanup_expired_transcodes(
&self,
before: chrono::DateTime<chrono::Utc>,
) -> Result<u64> {
let conn = Arc::clone(&self.conn);
let before_str = before.to_rfc3339();
let fut = tokio::task::spawn_blocking(move || {
let db = conn.lock().map_err(|e| {
PinakesError::Database(format!("failed to acquire database lock: {e}"))
})?;
let affected = db.execute(
"DELETE FROM transcode_sessions WHERE expires_at IS NOT NULL AND \
expires_at < ?",
[&before_str],
)?;
Ok(affected as u64)
});
tokio::time::timeout(std::time::Duration::from_secs(10), fut)
.await
.map_err(|_| {
PinakesError::Database("cleanup_expired_transcodes timed out".into())
})?
.map_err(|e: tokio::task::JoinError| {
PinakesError::Database(format!("cleanup_expired_transcodes: {e}"))
})?
}
async fn create_session(
&self,
session: &crate::storage::SessionData,
) -> Result<()> {
let conn = Arc::clone(&self.conn);
let session_token = session.session_token.clone();
let user_id = session.user_id.clone();
let username = session.username.clone();
let role = session.role.clone();
let created_at = session.created_at.to_rfc3339();
let expires_at = session.expires_at.to_rfc3339();
let last_accessed = session.last_accessed.to_rfc3339();
let fut = tokio::task::spawn_blocking(move || {
let db = conn.lock().map_err(|e| {
PinakesError::Database(format!("failed to acquire database lock: {e}"))
})?;
db.execute(
"INSERT INTO sessions (session_token, user_id, username, role, \
created_at, expires_at, last_accessed)
VALUES (?, ?, ?, ?, ?, ?, ?)",
params![
&session_token,
&user_id,
&username,
&role,
&created_at,
&expires_at,
&last_accessed
],
)?;
Ok(())
});
tokio::time::timeout(std::time::Duration::from_secs(10), fut)
.await
.map_err(|_| PinakesError::Database("create_session timed out".into()))?
.map_err(|e: tokio::task::JoinError| {
PinakesError::Database(format!("create_session: {e}"))
})?
}
async fn get_session(
&self,
session_token: &str,
) -> Result<Option<crate::storage::SessionData>> {
let conn = Arc::clone(&self.conn);
let token = session_token.to_string();
let fut = tokio::task::spawn_blocking(move || {
let db = conn.lock().map_err(|e| {
PinakesError::Database(format!("failed to acquire database lock: {e}"))
})?;
let result = db
.query_row(
"SELECT session_token, user_id, username, role, created_at, \
expires_at, last_accessed
FROM sessions WHERE session_token = ?",
[&token],
|row| {
let created_at_str: String = row.get(4)?;
let expires_at_str: String = row.get(5)?;
let last_accessed_str: String = row.get(6)?;
Ok(crate::storage::SessionData {
session_token: row.get(0)?,
user_id: row.get(1)?,
username: row.get(2)?,
role: row.get(3)?,
created_at: chrono::DateTime::parse_from_rfc3339(
&created_at_str,
)
.map_err(|e| {
rusqlite::Error::ToSqlConversionFailure(Box::new(e))
})?
.with_timezone(&chrono::Utc),
expires_at: chrono::DateTime::parse_from_rfc3339(
&expires_at_str,
)
.map_err(|e| {
rusqlite::Error::ToSqlConversionFailure(Box::new(e))
})?
.with_timezone(&chrono::Utc),
last_accessed: chrono::DateTime::parse_from_rfc3339(
&last_accessed_str,
)
.map_err(|e| {
rusqlite::Error::ToSqlConversionFailure(Box::new(e))
})?
.with_timezone(&chrono::Utc),
})
},
)
.optional()?;
Ok(result)
});
tokio::time::timeout(std::time::Duration::from_secs(10), fut)
.await
.map_err(|_| PinakesError::Database("get_session timed out".into()))?
.map_err(|e: tokio::task::JoinError| {
PinakesError::Database(format!("get_session: {e}"))
})?
}
async fn touch_session(&self, session_token: &str) -> Result<()> {
let conn = Arc::clone(&self.conn);
let token = session_token.to_string();
let now = chrono::Utc::now().to_rfc3339();
let fut = tokio::task::spawn_blocking(move || {
let db = conn.lock().map_err(|e| {
PinakesError::Database(format!("failed to acquire database lock: {e}"))
})?;
db.execute(
"UPDATE sessions SET last_accessed = ? WHERE session_token = ?",
params![&now, &token],
)?;
Ok(())
});
tokio::time::timeout(std::time::Duration::from_secs(10), fut)
.await
.map_err(|_| PinakesError::Database("touch_session timed out".into()))?
.map_err(|e: tokio::task::JoinError| {
PinakesError::Database(format!("touch_session: {e}"))
})?
}
async fn extend_session(
&self,
session_token: &str,
new_expires_at: chrono::DateTime<chrono::Utc>,
) -> Result<Option<chrono::DateTime<chrono::Utc>>> {
let conn = Arc::clone(&self.conn);
let token = session_token.to_string();
let expires = new_expires_at.to_rfc3339();
let now = chrono::Utc::now().to_rfc3339();
let fut = tokio::task::spawn_blocking(move || {
let db = conn.lock().map_err(|e| {
PinakesError::Database(format!("failed to acquire database lock: {e}"))
})?;
let rows = db.execute(
"UPDATE sessions SET expires_at = ?, last_accessed = ? WHERE \
session_token = ? AND expires_at > datetime('now')",
params![&expires, &now, &token],
)?;
if rows > 0 {
Ok(Some(new_expires_at))
} else {
Ok(None)
}
});
tokio::time::timeout(std::time::Duration::from_secs(10), fut)
.await
.map_err(|_| PinakesError::Database("extend_session timed out".into()))?
.map_err(|e: tokio::task::JoinError| {
PinakesError::Database(format!("extend_session: {e}"))
})?
}
async fn delete_session(&self, session_token: &str) -> Result<()> {
let conn = Arc::clone(&self.conn);
let token = session_token.to_string();
let fut = tokio::task::spawn_blocking(move || {
let db = conn.lock().map_err(|e| {
PinakesError::Database(format!("failed to acquire database lock: {e}"))
})?;
db.execute("DELETE FROM sessions WHERE session_token = ?", [&token])?;
Ok(())
});
tokio::time::timeout(std::time::Duration::from_secs(10), fut)
.await
.map_err(|_| PinakesError::Database("delete_session timed out".into()))?
.map_err(|e: tokio::task::JoinError| {
PinakesError::Database(format!("delete_session: {e}"))
})?
}
async fn delete_user_sessions(&self, username: &str) -> Result<u64> {
let conn = Arc::clone(&self.conn);
let user = username.to_string();
let fut = tokio::task::spawn_blocking(move || {
let db = conn.lock().map_err(|e| {
PinakesError::Database(format!("failed to acquire database lock: {e}"))
})?;
let affected =
db.execute("DELETE FROM sessions WHERE username = ?", [&user])?;
Ok(affected as u64)
});
tokio::time::timeout(std::time::Duration::from_secs(10), fut)
.await
.map_err(|_| {
PinakesError::Database("delete_user_sessions timed out".into())
})?
.map_err(|e: tokio::task::JoinError| {
PinakesError::Database(format!("delete_user_sessions: {e}"))
})?
}
async fn delete_expired_sessions(&self) -> Result<u64> {
let conn = Arc::clone(&self.conn);
let now = chrono::Utc::now().to_rfc3339();
let fut = tokio::task::spawn_blocking(move || {
let db = conn.lock().map_err(|e| {
PinakesError::Database(format!("failed to acquire database lock: {e}"))
})?;
let affected =
db.execute("DELETE FROM sessions WHERE expires_at < ?", [&now])?;
Ok(affected as u64)
});
tokio::time::timeout(std::time::Duration::from_secs(10), fut)
.await
.map_err(|_| {
PinakesError::Database("delete_expired_sessions timed out".into())
})?
.map_err(|e: tokio::task::JoinError| {
PinakesError::Database(format!("delete_expired_sessions: {e}"))
})?
}
async fn list_active_sessions(
&self,
username: Option<&str>,
) -> Result<Vec<crate::storage::SessionData>> {
let conn = Arc::clone(&self.conn);
let user_filter = username.map(std::string::ToString::to_string);
let now = chrono::Utc::now().to_rfc3339();
let fut = tokio::task::spawn_blocking(move || {
let db = conn.lock().map_err(|e| {
PinakesError::Database(format!("failed to acquire database lock: {e}"))
})?;
let (query, params): (&str, Vec<String>) = if let Some(user) = user_filter
{
(
"SELECT session_token, user_id, username, role, created_at, \
expires_at, last_accessed
FROM sessions WHERE expires_at > ? AND username = ?
ORDER BY last_accessed DESC",
vec![now, user],
)
} else {
(
"SELECT session_token, user_id, username, role, created_at, \
expires_at, last_accessed
FROM sessions WHERE expires_at > ?
ORDER BY last_accessed DESC",
vec![now],
)
};
let mut stmt = db.prepare(query)?;
let param_refs: Vec<&dyn rusqlite::ToSql> =
params.iter().map(|p| p as &dyn rusqlite::ToSql).collect();
let rows = stmt.query_map(&param_refs[..], |row| {
let created_at_str: String = row.get(4)?;
let expires_at_str: String = row.get(5)?;
let last_accessed_str: String = row.get(6)?;
Ok(crate::storage::SessionData {
session_token: row.get(0)?,
user_id: row.get(1)?,
username: row.get(2)?,
role: row.get(3)?,
created_at: chrono::DateTime::parse_from_rfc3339(&created_at_str)
.map_err(|e| rusqlite::Error::ToSqlConversionFailure(Box::new(e)))?
.with_timezone(&chrono::Utc),
expires_at: chrono::DateTime::parse_from_rfc3339(&expires_at_str)
.map_err(|e| rusqlite::Error::ToSqlConversionFailure(Box::new(e)))?
.with_timezone(&chrono::Utc),
last_accessed: chrono::DateTime::parse_from_rfc3339(
&last_accessed_str,
)
.map_err(|e| rusqlite::Error::ToSqlConversionFailure(Box::new(e)))?
.with_timezone(&chrono::Utc),
})
})?;
rows
.collect::<std::result::Result<Vec<_>, _>>()
.map_err(std::convert::Into::into)
});
tokio::time::timeout(std::time::Duration::from_secs(10), fut)
.await
.map_err(|_| {
PinakesError::Database("list_active_sessions timed out".into())
})?
.map_err(|e: tokio::task::JoinError| {
PinakesError::Database(format!("list_active_sessions: {e}"))
})?
}
// Book Management Methods
async fn upsert_book_metadata(
&self,
metadata: &crate::model::BookMetadata,
) -> Result<()> {
if metadata.media_id.0.is_nil() {
return Err(PinakesError::InvalidOperation(
"upsert_book_metadata: media_id must not be nil".to_string(),
));
}
let conn = Arc::clone(&self.conn);
let media_id_str = metadata.media_id.to_string();
let isbn = metadata.isbn.clone();
let isbn13 = metadata.isbn13.clone();
let publisher = metadata.publisher.clone();
let language = metadata.language.clone();
let page_count = metadata.page_count;
let publication_date = metadata.publication_date.map(|d| d.to_string());
let series_name = metadata.series_name.clone();
let series_index = metadata.series_index;
let format = metadata.format.clone();
let authors = metadata.authors.clone();
let identifiers = metadata.identifiers.clone();
let fut = tokio::task::spawn_blocking(move || {
let mut conn = conn.lock().map_err(|e| {
PinakesError::Database(format!("connection mutex poisoned: {e}"))
})?;
let tx = conn.transaction()?;
// Upsert book_metadata
tx.execute(
"INSERT INTO book_metadata (
media_id, isbn, isbn13, publisher, language, page_count,
publication_date, series_name, series_index, format
) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10)
ON CONFLICT(media_id) DO UPDATE SET
isbn = ?2, isbn13 = ?3, publisher = ?4, language = ?5,
page_count = ?6, publication_date = ?7, series_name = ?8,
series_index = ?9, format = ?10, updated_at = \
datetime('now')",
rusqlite::params![
media_id_str,
isbn,
isbn13,
publisher,
language,
page_count,
publication_date,
series_name,
series_index,
format
],
)?;
// Clear existing authors and identifiers
tx.execute("DELETE FROM book_authors WHERE media_id = ?1", [
&media_id_str,
])?;
tx.execute("DELETE FROM book_identifiers WHERE media_id = ?1", [
&media_id_str,
])?;
// Insert authors
for author in &authors {
tx.execute(
"INSERT INTO book_authors (media_id, author_name, author_sort, \
role, position)
VALUES (?1, ?2, ?3, ?4, ?5)",
rusqlite::params![
media_id_str,
author.name,
author.file_as,
author.role,
author.position
],
)?;
}
// Insert identifiers
for (id_type, values) in &identifiers {
for value in values {
tx.execute(
"INSERT INTO book_identifiers (media_id, identifier_type, \
identifier_value)
VALUES (?1, ?2, ?3)",
rusqlite::params![media_id_str, id_type, value],
)?;
}
}
tx.commit()?;
Ok::<_, PinakesError>(())
});
tokio::time::timeout(std::time::Duration::from_secs(30), fut)
.await
.map_err(|_| {
PinakesError::Database("upsert_book_metadata timed out".into())
})?
.map_err(|e: tokio::task::JoinError| {
PinakesError::Database(format!("upsert_book_metadata: {e}"))
})??;
Ok(())
}
async fn get_book_metadata(
&self,
media_id: MediaId,
) -> Result<Option<crate::model::BookMetadata>> {
let conn = Arc::clone(&self.conn);
let media_id_str = media_id.to_string();
let fut = tokio::task::spawn_blocking(move || {
let conn = conn.lock().map_err(|e| {
PinakesError::Database(format!("connection mutex poisoned: {e}"))
})?;
// Get base book metadata
let metadata_row = conn
.query_row(
"SELECT isbn, isbn13, publisher, language, page_count,
publication_date, series_name, series_index, format,
created_at, updated_at
FROM book_metadata WHERE media_id = ?1",
[&media_id_str],
|row| {
Ok((
row.get::<_, Option<String>>(0)?,
row.get::<_, Option<String>>(1)?,
row.get::<_, Option<String>>(2)?,
row.get::<_, Option<String>>(3)?,
row.get::<_, Option<i32>>(4)?,
row.get::<_, Option<String>>(5)?,
row.get::<_, Option<String>>(6)?,
row.get::<_, Option<f64>>(7)?,
row.get::<_, Option<String>>(8)?,
row.get::<_, String>(9)?,
row.get::<_, String>(10)?,
))
},
)
.optional()?;
let Some((
isbn,
isbn13,
publisher,
language,
page_count,
publication_date,
series_name,
series_index,
format,
created_at,
updated_at,
)) = metadata_row
else {
return Ok::<_, PinakesError>(None);
};
// Get authors
let mut stmt = conn.prepare(
"SELECT author_name, author_sort, role, position
FROM book_authors WHERE media_id = ?1 ORDER BY position",
)?;
let authors: Vec<crate::model::AuthorInfo> = stmt
.query_map([&media_id_str], |row| {
Ok(crate::model::AuthorInfo {
name: row.get(0)?,
file_as: row.get(1)?,
role: row.get(2)?,
position: row.get(3)?,
})
})?
.collect::<rusqlite::Result<Vec<_>>>()?;
// Get identifiers
let mut stmt = conn.prepare(
"SELECT identifier_type, identifier_value
FROM book_identifiers WHERE media_id = ?1",
)?;
let mut identifiers: FxHashMap<String, Vec<String>> =
FxHashMap::default();
for row in stmt.query_map([&media_id_str], |row| {
Ok((row.get::<_, String>(0)?, row.get::<_, String>(1)?))
})? {
let (id_type, value) = row?;
identifiers.entry(id_type).or_default().push(value);
}
let parsed_date = publication_date
.and_then(|d| chrono::NaiveDate::parse_from_str(&d, "%Y-%m-%d").ok());
Ok(Some(crate::model::BookMetadata {
media_id,
isbn,
isbn13,
publisher,
language,
page_count,
publication_date: parsed_date,
series_name,
series_index,
format,
authors,
identifiers,
created_at: chrono::DateTime::parse_from_rfc3339(&created_at)
.map_err(|e| {
PinakesError::Database(format!("invalid datetime in database: {e}"))
})?
.with_timezone(&chrono::Utc),
updated_at: chrono::DateTime::parse_from_rfc3339(&updated_at)
.map_err(|e| {
PinakesError::Database(format!("invalid datetime in database: {e}"))
})?
.with_timezone(&chrono::Utc),
}))
});
Ok(
tokio::time::timeout(std::time::Duration::from_secs(10), fut)
.await
.map_err(|_| {
PinakesError::Database("get_book_metadata timed out".into())
})?
.map_err(|e: tokio::task::JoinError| {
PinakesError::Database(format!("get_book_metadata: {e}"))
})??,
)
}
async fn add_book_author(
&self,
media_id: MediaId,
author: &crate::model::AuthorInfo,
) -> Result<()> {
let conn = Arc::clone(&self.conn);
let media_id_str = media_id.to_string();
let author_clone = author.clone();
let fut = tokio::task::spawn_blocking(move || {
let conn = conn.lock().map_err(|e| {
PinakesError::Database(format!("connection mutex poisoned: {e}"))
})?;
conn.execute(
"INSERT INTO book_authors (media_id, author_name, author_sort, role, \
position)
VALUES (?1, ?2, ?3, ?4, ?5)
ON CONFLICT(media_id, author_name, role) DO UPDATE SET
author_sort = ?3, position = ?5",
rusqlite::params![
media_id_str,
author_clone.name,
author_clone.file_as,
author_clone.role,
author_clone.position
],
)?;
Ok::<_, PinakesError>(())
});
tokio::time::timeout(std::time::Duration::from_secs(5), fut)
.await
.map_err(|_| PinakesError::Database("add_book_author timed out".into()))?
.map_err(|e: tokio::task::JoinError| {
PinakesError::Database(format!("add_book_author: {e}"))
})??;
Ok(())
}
async fn get_book_authors(
&self,
media_id: MediaId,
) -> Result<Vec<crate::model::AuthorInfo>> {
let conn = Arc::clone(&self.conn);
let media_id_str = media_id.to_string();
let fut = tokio::task::spawn_blocking(move || {
let conn = conn.lock().map_err(|e| {
PinakesError::Database(format!("connection mutex poisoned: {e}"))
})?;
let mut stmt = conn.prepare(
"SELECT author_name, author_sort, role, position
FROM book_authors WHERE media_id = ?1 ORDER BY position",
)?;
let authors: Vec<crate::model::AuthorInfo> = stmt
.query_map([&media_id_str], |row| {
Ok(crate::model::AuthorInfo {
name: row.get(0)?,
file_as: row.get(1)?,
role: row.get(2)?,
position: row.get(3)?,
})
})?
.collect::<rusqlite::Result<Vec<_>>>()?;
Ok::<_, PinakesError>(authors)
});
Ok(
tokio::time::timeout(std::time::Duration::from_secs(5), fut)
.await
.map_err(|_| {
PinakesError::Database("get_book_authors timed out".into())
})?
.map_err(|e: tokio::task::JoinError| {
PinakesError::Database(format!("get_book_authors: {e}"))
})??,
)
}
async fn list_all_authors(
&self,
pagination: &Pagination,
) -> Result<Vec<(String, u64)>> {
let conn = Arc::clone(&self.conn);
let offset = pagination.offset;
let limit = pagination.limit;
let fut = tokio::task::spawn_blocking(move || {
let conn = conn.lock().map_err(|e| {
PinakesError::Database(format!("connection mutex poisoned: {e}"))
})?;
let mut stmt = conn.prepare(
"SELECT author_name, COUNT(DISTINCT media_id) as book_count
FROM book_authors
GROUP BY author_name
ORDER BY book_count DESC, author_name
LIMIT ?1 OFFSET ?2",
)?;
let authors: Vec<(String, u64)> = stmt
.query_map([limit.cast_signed(), offset.cast_signed()], |row| {
Ok((row.get(0)?, row.get::<_, i64>(1)?.cast_unsigned()))
})?
.collect::<rusqlite::Result<Vec<_>>>()?;
Ok::<_, PinakesError>(authors)
});
Ok(
tokio::time::timeout(std::time::Duration::from_secs(10), fut)
.await
.map_err(|_| {
PinakesError::Database("list_all_authors timed out".into())
})?
.map_err(|e: tokio::task::JoinError| {
PinakesError::Database(format!("list_all_authors: {e}"))
})??,
)
}
async fn list_series(&self) -> Result<Vec<(String, u64)>> {
let conn = Arc::clone(&self.conn);
let fut = tokio::task::spawn_blocking(move || {
let conn = conn.lock().map_err(|e| {
PinakesError::Database(format!("connection mutex poisoned: {e}"))
})?;
let mut stmt = conn.prepare(
"SELECT series_name, COUNT(*) as book_count
FROM book_metadata
WHERE series_name IS NOT NULL
GROUP BY series_name
ORDER BY series_name",
)?;
let series: Vec<(String, u64)> = stmt
.query_map([], |row| {
Ok((row.get(0)?, row.get::<_, i64>(1)?.cast_unsigned()))
})?
.collect::<rusqlite::Result<Vec<_>>>()?;
Ok::<_, PinakesError>(series)
});
Ok(
tokio::time::timeout(std::time::Duration::from_secs(10), fut)
.await
.map_err(|_| PinakesError::Database("list_series timed out".into()))?
.map_err(|e: tokio::task::JoinError| {
PinakesError::Database(format!("list_series: {e}"))
})??,
)
}
async fn get_series_books(
&self,
series_name: &str,
) -> Result<Vec<MediaItem>> {
let conn = Arc::clone(&self.conn);
let series = series_name.to_string();
let fut = tokio::task::spawn_blocking(move || {
let conn = conn.lock().map_err(|e| {
PinakesError::Database(format!("connection mutex poisoned: {e}"))
})?;
let mut stmt = conn.prepare(
"SELECT m.id, m.path, m.file_name, m.media_type, m.content_hash,
m.file_size, m.title, m.artist, m.album, m.genre, \
m.year,
m.duration_secs, m.description, m.thumbnail_path, \
m.file_mtime,
m.created_at, m.updated_at
FROM media_items m
INNER JOIN book_metadata b ON m.id = b.media_id
WHERE b.series_name = ?1
ORDER BY b.series_index, m.title",
)?;
let items = stmt
.query_map([&series], row_to_media_item)?
.collect::<rusqlite::Result<Vec<_>>>()?;
Ok::<_, PinakesError>(items)
});
Ok(
tokio::time::timeout(std::time::Duration::from_secs(10), fut)
.await
.map_err(|_| {
PinakesError::Database("get_series_books timed out".into())
})?
.map_err(|e: tokio::task::JoinError| {
PinakesError::Database(format!("get_series_books: {e}"))
})??,
)
}
async fn update_reading_progress(
&self,
user_id: uuid::Uuid,
media_id: MediaId,
current_page: i32,
) -> Result<()> {
// Reuse watch_history table: progress_secs stores current page for books
let conn = Arc::clone(&self.conn);
let user_id_str = user_id.to_string();
let media_id_str = media_id.to_string();
let fut = tokio::task::spawn_blocking(move || {
let conn = conn.lock().map_err(|e| {
PinakesError::Database(format!("connection mutex poisoned: {e}"))
})?;
conn.execute(
"INSERT INTO watch_history (user_id, media_id, progress_secs, \
last_watched)
VALUES (?1, ?2, ?3, datetime('now'))
ON CONFLICT(user_id, media_id) DO UPDATE SET
progress_secs = ?3, last_watched = datetime('now')",
rusqlite::params![user_id_str, media_id_str, f64::from(current_page)],
)?;
Ok::<_, PinakesError>(())
});
tokio::time::timeout(std::time::Duration::from_secs(5), fut)
.await
.map_err(|_| {
PinakesError::Database("update_reading_progress timed out".into())
})?
.map_err(|e: tokio::task::JoinError| {
PinakesError::Database(format!("update_reading_progress: {e}"))
})??;
Ok(())
}
async fn get_reading_progress(
&self,
user_id: uuid::Uuid,
media_id: MediaId,
) -> Result<Option<crate::model::ReadingProgress>> {
let conn = Arc::clone(&self.conn);
let user_id_str = user_id.to_string();
let media_id_str = media_id.to_string();
let fut = tokio::task::spawn_blocking(move || {
let conn = conn.lock().map_err(|e| {
PinakesError::Database(format!("connection mutex poisoned: {e}"))
})?;
let result = conn
.query_row(
"SELECT wh.progress_secs, bm.page_count, wh.last_watched
FROM watch_history wh
LEFT JOIN book_metadata bm ON wh.media_id = bm.media_id
WHERE wh.user_id = ?1 AND wh.media_id = ?2",
[&user_id_str, &media_id_str],
|row| {
let current_page = row
.get::<_, i64>(0)
.map(|v| i32::try_from(v).unwrap_or(0))?;
let total_pages = row.get::<_, Option<i32>>(1)?;
let last_read_str = row.get::<_, String>(2)?;
Ok((current_page, total_pages, last_read_str))
},
)
.optional()?;
let progress = match result {
Some((current_page, total_pages, last_read_str)) => {
let last_read_at =
chrono::DateTime::parse_from_rfc3339(&last_read_str)
.map_err(|e| {
PinakesError::Database(format!(
"invalid datetime in database: {e}"
))
})?
.with_timezone(&chrono::Utc);
Some(crate::model::ReadingProgress {
media_id,
user_id,
current_page,
total_pages,
progress_percent: total_pages.map_or(0.0, |total| {
if total > 0 {
(f64::from(current_page) / f64::from(total) * 100.0).min(100.0)
} else {
0.0
}
}),
last_read_at,
})
},
None => None,
};
Ok::<_, PinakesError>(progress)
});
Ok(
tokio::time::timeout(std::time::Duration::from_secs(5), fut)
.await
.map_err(|_| {
PinakesError::Database("get_reading_progress timed out".into())
})?
.map_err(|e: tokio::task::JoinError| {
PinakesError::Database(format!("get_reading_progress: {e}"))
})??,
)
}
async fn get_reading_list(
&self,
user_id: uuid::Uuid,
status: Option<crate::model::ReadingStatus>,
) -> Result<Vec<MediaItem>> {
let conn = Arc::clone(&self.conn);
let user_id_str = user_id.to_string();
let fut = tokio::task::spawn_blocking(move || {
let conn = conn.lock().map_err(|e| {
PinakesError::Database(format!("connection mutex poisoned: {e}"))
})?;
// Query books with reading progress for this user
// Join with book_metadata to get page counts and media_items for the
// items
let mut stmt = conn.prepare(
"SELECT m.*, wh.progress_secs, bm.page_count
FROM media_items m
INNER JOIN watch_history wh ON m.id = wh.media_id
LEFT JOIN book_metadata bm ON m.id = bm.media_id
WHERE wh.user_id = ?1
ORDER BY wh.last_watched DESC",
)?;
let rows = stmt.query_map([&user_id_str], |row| {
// Parse the media item
let item = row_to_media_item(row)?;
// Read the extra columns by name, this is safe *regardless* of column
// count.
let current_page = row
.get::<_, Option<i64>>("progress_secs")?
.map_or(0, |v| i32::try_from(v).unwrap_or(0));
let total_pages = row.get::<_, Option<i32>>("page_count")?;
Ok((item, current_page, total_pages))
})?;
let mut results = Vec::new();
for row in rows {
match row {
Ok((item, current_page, total_pages)) => {
// Calculate status based on progress
let calculated_status = total_pages.map_or(
// No total pages known, assume reading
crate::model::ReadingStatus::Reading,
|total| {
if total > 0 {
let percent = (f64::from(current_page) / f64::from(total)
* 100.0)
.min(100.0);
if percent >= 100.0 {
crate::model::ReadingStatus::Completed
} else if percent > 0.0 {
crate::model::ReadingStatus::Reading
} else {
crate::model::ReadingStatus::ToRead
}
} else {
crate::model::ReadingStatus::Reading
}
},
);
// Filter by status if specified
match status {
None => results.push(item),
Some(s) if s == calculated_status => results.push(item),
_ => {},
}
},
Err(_) => continue,
}
}
Ok::<_, PinakesError>(results)
});
Ok(
tokio::time::timeout(std::time::Duration::from_secs(10), fut)
.await
.map_err(|_| {
PinakesError::Database("get_reading_list timed out".into())
})?
.map_err(|e: tokio::task::JoinError| {
PinakesError::Database(format!("get_reading_list: {e}"))
})??,
)
}
#[allow(clippy::too_many_arguments)]
async fn search_books(
&self,
isbn: Option<&str>,
author: Option<&str>,
series: Option<&str>,
publisher: Option<&str>,
language: Option<&str>,
pagination: &Pagination,
) -> Result<Vec<MediaItem>> {
let conn = Arc::clone(&self.conn);
let isbn = isbn.map(String::from);
let author = author.map(String::from);
let series = series.map(String::from);
let publisher = publisher.map(String::from);
let language = language.map(String::from);
let offset = pagination.offset;
let limit = pagination.limit;
let fut = tokio::task::spawn_blocking(move || {
let conn = conn.lock().map_err(|e| {
PinakesError::Database(format!("connection mutex poisoned: {e}"))
})?;
let mut query = String::from(
"SELECT DISTINCT m.id, m.path, m.file_name, m.media_type, \
m.content_hash,
m.file_size, m.title, m.artist, m.album, m.genre, \
m.year,
m.duration_secs, m.description, m.thumbnail_path, \
m.file_mtime,
m.created_at, m.updated_at
FROM media_items m
INNER JOIN book_metadata bm ON m.id = bm.media_id",
);
let mut conditions = Vec::new();
let mut params: Vec<Box<dyn rusqlite::ToSql>> = Vec::new();
if let Some(ref i) = isbn {
conditions.push("(bm.isbn = ? OR bm.isbn13 = ?)");
params.push(Box::new(i.clone()));
params.push(Box::new(i.clone()));
}
if let Some(ref a) = author {
query.push_str(" INNER JOIN book_authors ba ON m.id = ba.media_id");
conditions.push("ba.author_name LIKE ?");
params.push(Box::new(format!("%{a}%")));
}
if let Some(ref s) = series {
conditions.push("bm.series_name LIKE ?");
params.push(Box::new(format!("%{s}%")));
}
if let Some(ref p) = publisher {
conditions.push("bm.publisher LIKE ?");
params.push(Box::new(format!("%{p}%")));
}
if let Some(ref l) = language {
conditions.push("bm.language = ?");
params.push(Box::new(l.clone()));
}
if !conditions.is_empty() {
query.push_str(" WHERE ");
query.push_str(&conditions.join(" AND "));
}
query.push_str(" ORDER BY m.title LIMIT ? OFFSET ?");
params.push(Box::new(limit.cast_signed()));
params.push(Box::new(offset.cast_signed()));
let params_refs: Vec<&dyn rusqlite::ToSql> =
params.iter().map(std::convert::AsRef::as_ref).collect();
let mut stmt = conn.prepare(&query)?;
let items = stmt
.query_map(&*params_refs, row_to_media_item)?
.collect::<rusqlite::Result<Vec<_>>>()?;
Ok::<_, PinakesError>(items)
});
Ok(
tokio::time::timeout(std::time::Duration::from_secs(10), fut)
.await
.map_err(|_| PinakesError::Database("search_books timed out".into()))?
.map_err(|e: tokio::task::JoinError| {
PinakesError::Database(format!("search_books: {e}"))
})??,
)
}
async fn insert_managed_media(&self, item: &MediaItem) -> Result<()> {
let conn = Arc::clone(&self.conn);
let item = item.clone();
tokio::task::spawn_blocking(move || {
let conn = conn.lock().map_err(|e| {
PinakesError::Database(format!("connection mutex poisoned: {e}"))
})?;
conn.execute(
"INSERT INTO media_items (id, path, file_name, media_type, \
content_hash, file_size,
title, artist, album, genre, year, duration_secs, \
description, thumbnail_path,
storage_mode, original_filename, uploaded_at, storage_key, \
created_at, updated_at)
VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, \
?13, ?14, ?15, ?16, ?17, ?18, ?19, ?20)",
params![
item.id.0.to_string(),
item.path.to_string_lossy().to_string(),
item.file_name,
media_type_to_str(&item.media_type),
item.content_hash.0,
item.file_size.cast_signed(),
item.title,
item.artist,
item.album,
item.genre,
item.year,
item.duration_secs,
item.description,
item
.thumbnail_path
.as_ref()
.map(|p| p.to_string_lossy().to_string()),
item.storage_mode.to_string(),
item.original_filename,
item.uploaded_at.map(|dt| dt.to_rfc3339()),
item.storage_key,
item.created_at.to_rfc3339(),
item.updated_at.to_rfc3339(),
],
)?;
Ok::<_, PinakesError>(())
})
.await
.map_err(|e| {
PinakesError::Database(format!("insert_managed_media: {e}"))
})??;
Ok(())
}
async fn get_or_create_blob(
&self,
hash: &ContentHash,
size: u64,
mime_type: &str,
) -> Result<ManagedBlob> {
let conn = Arc::clone(&self.conn);
let hash_str = hash.0.clone();
let mime = mime_type.to_string();
let now = chrono::Utc::now().to_rfc3339();
tokio::task::spawn_blocking(move || {
let conn = conn.lock().map_err(|e| {
PinakesError::Database(format!("connection mutex poisoned: {e}"))
})?;
// Try to get existing blob
let existing = conn
.query_row(
"SELECT content_hash, file_size, mime_type, reference_count, \
stored_at, last_verified
FROM managed_blobs WHERE content_hash = ?1",
params![&hash_str],
|row| {
Ok(ManagedBlob {
content_hash: ContentHash(row.get::<_, String>(0)?),
file_size: row.get::<_, i64>(1)?.cast_unsigned(),
mime_type: row.get(2)?,
reference_count: row.get::<_, i32>(3)?.cast_unsigned(),
stored_at: parse_datetime(&row.get::<_, String>(4)?),
last_verified: row
.get::<_, Option<String>>(5)?
.map(|s| parse_datetime(&s)),
})
},
)
.optional()?;
if let Some(blob) = existing {
return Ok(blob);
}
// Create new blob
conn.execute(
"INSERT INTO managed_blobs (content_hash, file_size, mime_type, \
reference_count, stored_at)
VALUES (?1, ?2, ?3, 1, ?4)",
params![&hash_str, size.cast_signed(), &mime, &now],
)?;
Ok(ManagedBlob {
content_hash: ContentHash(hash_str),
file_size: size,
mime_type: mime,
reference_count: 1,
stored_at: chrono::Utc::now(),
last_verified: None,
})
})
.await
.map_err(|e| PinakesError::Database(format!("get_or_create_blob: {e}")))?
}
async fn get_blob(&self, hash: &ContentHash) -> Result<Option<ManagedBlob>> {
let conn = Arc::clone(&self.conn);
let hash_str = hash.0.clone();
tokio::task::spawn_blocking(move || {
let conn = conn.lock().map_err(|e| {
PinakesError::Database(format!("connection mutex poisoned: {e}"))
})?;
conn
.query_row(
"SELECT content_hash, file_size, mime_type, reference_count, \
stored_at, last_verified
FROM managed_blobs WHERE content_hash = ?1",
params![&hash_str],
|row| {
Ok(ManagedBlob {
content_hash: ContentHash(row.get::<_, String>(0)?),
file_size: row.get::<_, i64>(1)?.cast_unsigned(),
mime_type: row.get(2)?,
reference_count: row.get::<_, i32>(3)?.cast_unsigned(),
stored_at: parse_datetime(&row.get::<_, String>(4)?),
last_verified: row
.get::<_, Option<String>>(5)?
.map(|s| parse_datetime(&s)),
})
},
)
.optional()
.map_err(|e| PinakesError::Database(format!("get_blob query: {e}")))
})
.await
.map_err(|e| PinakesError::Database(format!("get_blob: {e}")))?
}
async fn increment_blob_ref(&self, hash: &ContentHash) -> Result<()> {
let conn = Arc::clone(&self.conn);
let hash_str = hash.0.clone();
tokio::task::spawn_blocking(move || {
let conn = conn.lock().map_err(|e| {
PinakesError::Database(format!("connection mutex poisoned: {e}"))
})?;
conn.execute(
"UPDATE managed_blobs SET reference_count = reference_count + 1 WHERE \
content_hash = ?1",
params![&hash_str],
)?;
Ok::<_, PinakesError>(())
})
.await
.map_err(|e| {
PinakesError::Database(format!("increment_blob_ref: {e}"))
})??;
Ok(())
}
async fn decrement_blob_ref(&self, hash: &ContentHash) -> Result<bool> {
let conn = Arc::clone(&self.conn);
let hash_str = hash.0.clone();
tokio::task::spawn_blocking(move || {
let conn = conn.lock().map_err(|e| {
PinakesError::Database(format!("connection mutex poisoned: {e}"))
})?;
conn.execute(
"UPDATE managed_blobs SET reference_count = reference_count - 1 WHERE \
content_hash = ?1",
params![&hash_str],
)?;
// Check if reference count is now 0
let count: i32 = conn
.query_row(
"SELECT reference_count FROM managed_blobs WHERE content_hash = ?1",
params![&hash_str],
|row| row.get(0),
)
.map_err(|e| {
PinakesError::Database(format!("decrement_blob_ref read: {e}"))
})?;
Ok::<_, PinakesError>(count <= 0)
})
.await
.map_err(|e| PinakesError::Database(format!("decrement_blob_ref: {e}")))?
.map_err(|e| {
PinakesError::Database(format!("decrement_blob_ref query: {e}"))
})
}
async fn update_blob_verified(&self, hash: &ContentHash) -> Result<()> {
let conn = Arc::clone(&self.conn);
let hash_str = hash.0.clone();
let now = chrono::Utc::now().to_rfc3339();
tokio::task::spawn_blocking(move || {
let conn = conn.lock().map_err(|e| {
PinakesError::Database(format!("connection mutex poisoned: {e}"))
})?;
conn.execute(
"UPDATE managed_blobs SET last_verified = ?1 WHERE content_hash = ?2",
params![&now, &hash_str],
)?;
Ok::<_, PinakesError>(())
})
.await
.map_err(|e| {
PinakesError::Database(format!("update_blob_verified: {e}"))
})??;
Ok(())
}
async fn list_orphaned_blobs(&self) -> Result<Vec<ManagedBlob>> {
let conn = Arc::clone(&self.conn);
tokio::task::spawn_blocking(move || {
let conn = conn.lock().map_err(|e| {
PinakesError::Database(format!("connection mutex poisoned: {e}"))
})?;
let mut stmt = conn.prepare(
"SELECT content_hash, file_size, mime_type, reference_count, \
stored_at, last_verified
FROM managed_blobs WHERE reference_count <= 0",
)?;
let blobs = stmt
.query_map([], |row| {
Ok(ManagedBlob {
content_hash: ContentHash(row.get::<_, String>(0)?),
file_size: row.get::<_, i64>(1)?.cast_unsigned(),
mime_type: row.get(2)?,
reference_count: row.get::<_, i32>(3)?.cast_unsigned(),
stored_at: parse_datetime(&row.get::<_, String>(4)?),
last_verified: row
.get::<_, Option<String>>(5)?
.map(|s| parse_datetime(&s)),
})
})?
.collect::<rusqlite::Result<Vec<_>>>()?;
Ok::<_, PinakesError>(blobs)
})
.await
.map_err(|e| PinakesError::Database(format!("list_orphaned_blobs: {e}")))?
.map_err(|e| {
PinakesError::Database(format!("list_orphaned_blobs query: {e}"))
})
}
async fn delete_blob(&self, hash: &ContentHash) -> Result<()> {
let conn = Arc::clone(&self.conn);
let hash_str = hash.0.clone();
tokio::task::spawn_blocking(move || {
let conn = conn.lock().map_err(|e| {
PinakesError::Database(format!("connection mutex poisoned: {e}"))
})?;
conn.execute(
"DELETE FROM managed_blobs WHERE content_hash = ?1",
params![&hash_str],
)?;
Ok::<_, PinakesError>(())
})
.await
.map_err(|e| PinakesError::Database(format!("delete_blob: {e}")))??;
Ok(())
}
async fn managed_storage_stats(&self) -> Result<ManagedStorageStats> {
let conn = Arc::clone(&self.conn);
tokio::task::spawn_blocking(move || {
let conn = conn.lock().map_err(|e| {
PinakesError::Database(format!("connection mutex poisoned: {e}"))
})?;
let total_blobs: u64 = conn
.query_row("SELECT COUNT(*) FROM managed_blobs", [], |row| {
row.get::<_, i64>(0)
})?
.cast_unsigned();
let total_size: u64 = conn
.query_row(
"SELECT COALESCE(SUM(file_size), 0) FROM managed_blobs",
[],
|row| row.get::<_, i64>(0),
)?
.cast_unsigned();
let unique_size: u64 = conn
.query_row(
"SELECT COALESCE(SUM(file_size), 0) FROM managed_blobs WHERE \
reference_count = 1",
[],
|row| row.get::<_, i64>(0),
)?
.cast_unsigned();
let managed_media_count: u64 = conn
.query_row(
"SELECT COUNT(*) FROM media_items WHERE storage_mode = 'managed'",
[],
|row| row.get::<_, i64>(0),
)?
.cast_unsigned();
let orphaned_blobs: u64 = conn
.query_row(
"SELECT COUNT(*) FROM managed_blobs WHERE reference_count <= 0",
[],
|row| row.get::<_, i64>(0),
)?
.cast_unsigned();
let dedup_ratio = if total_size > 0 {
// Compute ratio via fixed-point arithmetic to avoid u64->f64 precision
// loss. Uses u128 intermediate to avoid overflow.
let ratio_fixed =
u128::from(unique_size) * (1u128 << 20) / u128::from(total_size);
f64::from(u32::try_from(ratio_fixed).unwrap_or(u32::MAX))
/ f64::from(1u32 << 20)
} else {
1.0
};
Ok::<_, PinakesError>(ManagedStorageStats {
total_blobs,
total_size_bytes: total_size,
unique_size_bytes: unique_size,
deduplication_ratio: dedup_ratio,
managed_media_count,
orphaned_blobs,
})
})
.await
.map_err(|e| PinakesError::Database(format!("managed_storage_stats: {e}")))?
.map_err(|e| {
PinakesError::Database(format!("managed_storage_stats query: {e}"))
})
}
async fn register_device(
&self,
device: &crate::sync::SyncDevice,
token_hash: &str,
) -> Result<crate::sync::SyncDevice> {
let conn = Arc::clone(&self.conn);
let device = device.clone();
let token_hash = token_hash.to_string();
tokio::task::spawn_blocking(move || {
let conn = conn.lock().map_err(|e| {
PinakesError::Database(format!("connection mutex poisoned: {e}"))
})?;
conn.execute(
"INSERT INTO sync_devices (id, user_id, name, device_type, \
client_version, os_info,
device_token_hash, last_seen_at, sync_cursor, enabled, \
created_at, updated_at)
VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12)",
params![
device.id.0.to_string(),
device.user_id.0.to_string(),
device.name,
device.device_type.to_string(),
device.client_version,
device.os_info,
token_hash,
device.last_seen_at.to_rfc3339(),
device.sync_cursor,
device.enabled,
device.created_at.to_rfc3339(),
device.updated_at.to_rfc3339(),
],
)?;
Ok::<_, PinakesError>(device)
})
.await
.map_err(|e| PinakesError::Database(format!("register_device: {e}")))?
.map_err(|e| PinakesError::Database(format!("register_device query: {e}")))
}
async fn get_device(
&self,
id: crate::sync::DeviceId,
) -> Result<crate::sync::SyncDevice> {
let conn = Arc::clone(&self.conn);
tokio::task::spawn_blocking(move || {
let conn = conn.lock().map_err(|e| {
PinakesError::Database(format!("connection mutex poisoned: {e}"))
})?;
conn
.query_row(
"SELECT id, user_id, name, device_type, client_version, os_info,
last_sync_at, last_seen_at, sync_cursor, enabled, \
created_at, updated_at
FROM sync_devices WHERE id = ?1",
params![id.0.to_string()],
|row| {
Ok(crate::sync::SyncDevice {
id: crate::sync::DeviceId(parse_uuid(
&row.get::<_, String>(0)?,
)?),
user_id: crate::users::UserId(parse_uuid(
&row.get::<_, String>(1)?,
)?),
name: row.get(2)?,
device_type: row
.get::<_, String>(3)?
.parse()
.unwrap_or_default(),
client_version: row.get(4)?,
os_info: row.get(5)?,
last_sync_at: row
.get::<_, Option<String>>(6)?
.map(|s| parse_datetime(&s)),
last_seen_at: parse_datetime(&row.get::<_, String>(7)?),
sync_cursor: row.get(8)?,
enabled: row.get(9)?,
created_at: parse_datetime(&row.get::<_, String>(10)?),
updated_at: parse_datetime(&row.get::<_, String>(11)?),
})
},
)
.map_err(|e| PinakesError::Database(format!("get_device query: {e}")))
})
.await
.map_err(|e| PinakesError::Database(format!("get_device: {e}")))?
}
async fn get_device_by_token(
&self,
token_hash: &str,
) -> Result<Option<crate::sync::SyncDevice>> {
let conn = Arc::clone(&self.conn);
let token_hash = token_hash.to_string();
tokio::task::spawn_blocking(move || {
let conn = conn.lock().map_err(|e| {
PinakesError::Database(format!("connection mutex poisoned: {e}"))
})?;
conn
.query_row(
"SELECT id, user_id, name, device_type, client_version, os_info,
last_sync_at, last_seen_at, sync_cursor, enabled, \
created_at, updated_at
FROM sync_devices WHERE device_token_hash = ?1",
params![&token_hash],
|row| {
Ok(crate::sync::SyncDevice {
id: crate::sync::DeviceId(parse_uuid(
&row.get::<_, String>(0)?,
)?),
user_id: crate::users::UserId(parse_uuid(
&row.get::<_, String>(1)?,
)?),
name: row.get(2)?,
device_type: row
.get::<_, String>(3)?
.parse()
.unwrap_or_default(),
client_version: row.get(4)?,
os_info: row.get(5)?,
last_sync_at: row
.get::<_, Option<String>>(6)?
.map(|s| parse_datetime(&s)),
last_seen_at: parse_datetime(&row.get::<_, String>(7)?),
sync_cursor: row.get(8)?,
enabled: row.get(9)?,
created_at: parse_datetime(&row.get::<_, String>(10)?),
updated_at: parse_datetime(&row.get::<_, String>(11)?),
})
},
)
.optional()
.map_err(|e| {
PinakesError::Database(format!("get_device_by_token query: {e}"))
})
})
.await
.map_err(|e| PinakesError::Database(format!("get_device_by_token: {e}")))?
}
async fn list_user_devices(
&self,
user_id: crate::users::UserId,
) -> Result<Vec<crate::sync::SyncDevice>> {
let conn = Arc::clone(&self.conn);
tokio::task::spawn_blocking(move || {
let conn = conn.lock().map_err(|e| {
PinakesError::Database(format!("connection mutex poisoned: {e}"))
})?;
let mut stmt = conn.prepare(
"SELECT id, user_id, name, device_type, client_version, os_info,
last_sync_at, last_seen_at, sync_cursor, enabled, \
created_at, updated_at
FROM sync_devices WHERE user_id = ?1 ORDER BY last_seen_at \
DESC",
)?;
let devices = stmt
.query_map(params![user_id.0.to_string()], |row| {
Ok(crate::sync::SyncDevice {
id: crate::sync::DeviceId(parse_uuid(
&row.get::<_, String>(0)?,
)?),
user_id: crate::users::UserId(parse_uuid(
&row.get::<_, String>(1)?,
)?),
name: row.get(2)?,
device_type: row
.get::<_, String>(3)?
.parse()
.unwrap_or_default(),
client_version: row.get(4)?,
os_info: row.get(5)?,
last_sync_at: row
.get::<_, Option<String>>(6)?
.map(|s| parse_datetime(&s)),
last_seen_at: parse_datetime(&row.get::<_, String>(7)?),
sync_cursor: row.get(8)?,
enabled: row.get(9)?,
created_at: parse_datetime(&row.get::<_, String>(10)?),
updated_at: parse_datetime(&row.get::<_, String>(11)?),
})
})?
.collect::<rusqlite::Result<Vec<_>>>()?;
Ok::<_, PinakesError>(devices)
})
.await
.map_err(|e| PinakesError::Database(format!("list_user_devices: {e}")))?
.map_err(|e| {
PinakesError::Database(format!("list_user_devices query: {e}"))
})
}
async fn update_device(
&self,
device: &crate::sync::SyncDevice,
) -> Result<()> {
let conn = Arc::clone(&self.conn);
let device = device.clone();
tokio::task::spawn_blocking(move || {
let conn = conn.lock().map_err(|e| {
PinakesError::Database(format!("connection mutex poisoned: {e}"))
})?;
conn.execute(
"UPDATE sync_devices SET name = ?1, device_type = ?2, client_version \
= ?3,
os_info = ?4, last_sync_at = ?5, last_seen_at = ?6, \
sync_cursor = ?7,
enabled = ?8, updated_at = ?9 WHERE id = ?10",
params![
device.name,
device.device_type.to_string(),
device.client_version,
device.os_info,
device.last_sync_at.map(|dt| dt.to_rfc3339()),
device.last_seen_at.to_rfc3339(),
device.sync_cursor,
device.enabled,
device.updated_at.to_rfc3339(),
device.id.0.to_string(),
],
)?;
Ok::<_, PinakesError>(())
})
.await
.map_err(|e| PinakesError::Database(format!("update_device: {e}")))??;
Ok(())
}
async fn delete_device(&self, id: crate::sync::DeviceId) -> Result<()> {
let conn = Arc::clone(&self.conn);
tokio::task::spawn_blocking(move || {
let conn = conn.lock().map_err(|e| {
PinakesError::Database(format!("connection mutex poisoned: {e}"))
})?;
conn.execute("DELETE FROM sync_devices WHERE id = ?1", params![
id.0.to_string()
])?;
Ok::<_, PinakesError>(())
})
.await
.map_err(|e| PinakesError::Database(format!("delete_device: {e}")))??;
Ok(())
}
async fn touch_device(&self, id: crate::sync::DeviceId) -> Result<()> {
let conn = Arc::clone(&self.conn);
let now = chrono::Utc::now().to_rfc3339();
tokio::task::spawn_blocking(move || {
let conn = conn.lock().map_err(|e| {
PinakesError::Database(format!("connection mutex poisoned: {e}"))
})?;
conn.execute(
"UPDATE sync_devices SET last_seen_at = ?1, updated_at = ?1 WHERE id \
= ?2",
params![&now, id.0.to_string()],
)?;
Ok::<_, PinakesError>(())
})
.await
.map_err(|e| PinakesError::Database(format!("touch_device: {e}")))??;
Ok(())
}
async fn record_sync_change(
&self,
change: &crate::sync::SyncLogEntry,
) -> Result<()> {
let conn = Arc::clone(&self.conn);
let change = change.clone();
tokio::task::spawn_blocking(move || {
let conn = conn.lock().map_err(|e| {
PinakesError::Database(format!("connection mutex poisoned: {e}"))
})?;
// Get and increment sequence
let seq: i64 = conn.query_row(
"UPDATE sync_sequence SET current_value = current_value + 1 WHERE id \
= 1 RETURNING current_value",
[],
|row| row.get(0),
)?;
conn.execute(
"INSERT INTO sync_log (id, sequence, change_type, media_id, path, \
content_hash,
file_size, metadata_json, changed_by_device, timestamp)
VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10)",
params![
change.id.to_string(),
seq,
change.change_type.to_string(),
change.media_id.map(|m| m.0.to_string()),
change.path,
change.content_hash.as_ref().map(|h| h.0.clone()),
change.file_size.map(u64::cast_signed),
change.metadata_json,
change.changed_by_device.map(|d| d.0.to_string()),
change.timestamp.to_rfc3339(),
],
)?;
Ok::<_, PinakesError>(())
})
.await
.map_err(|e| {
PinakesError::Database(format!("record_sync_change: {e}"))
})??;
Ok(())
}
async fn get_changes_since(
&self,
cursor: i64,
limit: u64,
) -> Result<Vec<crate::sync::SyncLogEntry>> {
let conn = Arc::clone(&self.conn);
tokio::task::spawn_blocking(move || {
let conn = conn.lock().map_err(|e| {
PinakesError::Database(format!("connection mutex poisoned: {e}"))
})?;
let mut stmt = conn.prepare(
"SELECT id, sequence, change_type, media_id, path, content_hash,
file_size, metadata_json, changed_by_device, timestamp
FROM sync_log WHERE sequence > ?1 ORDER BY sequence LIMIT ?2",
)?;
let entries = stmt
.query_map(params![cursor, limit.cast_signed()], |row| {
Ok(crate::sync::SyncLogEntry {
id: parse_uuid(&row.get::<_, String>(0)?)?,
sequence: row.get(1)?,
change_type: row
.get::<_, String>(2)?
.parse()
.unwrap_or(crate::sync::SyncChangeType::Modified),
media_id: row
.get::<_, Option<String>>(3)?
.and_then(|s| Uuid::parse_str(&s).ok().map(MediaId)),
path: row.get(4)?,
content_hash: row
.get::<_, Option<String>>(5)?
.map(ContentHash),
file_size: row
.get::<_, Option<i64>>(6)?
.map(i64::cast_unsigned),
metadata_json: row.get(7)?,
changed_by_device: row.get::<_, Option<String>>(8)?.and_then(|s| {
Uuid::parse_str(&s).ok().map(crate::sync::DeviceId)
}),
timestamp: parse_datetime(&row.get::<_, String>(9)?),
})
})?
.collect::<rusqlite::Result<Vec<_>>>()?;
Ok::<_, PinakesError>(entries)
})
.await
.map_err(|e| PinakesError::Database(format!("get_changes_since: {e}")))?
.map_err(|e| {
PinakesError::Database(format!("get_changes_since query: {e}"))
})
}
async fn get_current_sync_cursor(&self) -> Result<i64> {
let conn = Arc::clone(&self.conn);
tokio::task::spawn_blocking(move || {
let conn = conn.lock().map_err(|e| {
PinakesError::Database(format!("connection mutex poisoned: {e}"))
})?;
conn
.query_row(
"SELECT current_value FROM sync_sequence WHERE id = 1",
[],
|row| row.get(0),
)
.map_err(|e| {
PinakesError::Database(format!("get_current_sync_cursor query: {e}"))
})
})
.await
.map_err(|e| {
PinakesError::Database(format!("get_current_sync_cursor: {e}"))
})?
}
async fn cleanup_old_sync_log(&self, before: DateTime<Utc>) -> Result<u64> {
let conn = Arc::clone(&self.conn);
let before_str = before.to_rfc3339();
tokio::task::spawn_blocking(move || {
let conn = conn.lock().map_err(|e| {
PinakesError::Database(format!("connection mutex poisoned: {e}"))
})?;
conn
.execute("DELETE FROM sync_log WHERE timestamp < ?1", params![
&before_str
])
.map(|n| n as u64)
.map_err(|e| {
PinakesError::Database(format!("cleanup_old_sync_log query: {e}"))
})
})
.await
.map_err(|e| PinakesError::Database(format!("cleanup_old_sync_log: {e}")))?
}
async fn get_device_sync_state(
&self,
device_id: crate::sync::DeviceId,
path: &str,
) -> Result<Option<crate::sync::DeviceSyncState>> {
let conn = Arc::clone(&self.conn);
let path = path.to_string();
tokio::task::spawn_blocking(move || {
let conn = conn.lock().map_err(|e| {
PinakesError::Database(format!("connection mutex poisoned: {e}"))
})?;
conn
.query_row(
"SELECT device_id, path, local_hash, server_hash, local_mtime, \
server_mtime,
sync_status, last_synced_at, conflict_info_json
FROM device_sync_state WHERE device_id = ?1 AND path = ?2",
params![device_id.0.to_string(), &path],
|row| {
Ok(crate::sync::DeviceSyncState {
device_id: crate::sync::DeviceId(parse_uuid(
&row.get::<_, String>(0)?,
)?),
path: row.get(1)?,
local_hash: row.get(2)?,
server_hash: row.get(3)?,
local_mtime: row.get(4)?,
server_mtime: row.get(5)?,
sync_status: row
.get::<_, String>(6)?
.parse()
.unwrap_or(crate::sync::FileSyncStatus::Synced),
last_synced_at: row
.get::<_, Option<String>>(7)?
.map(|s| parse_datetime(&s)),
conflict_info_json: row.get(8)?,
})
},
)
.optional()
.map_err(|e| {
PinakesError::Database(format!("get_device_sync_state query: {e}"))
})
})
.await
.map_err(|e| {
PinakesError::Database(format!("get_device_sync_state: {e}"))
})?
}
async fn upsert_device_sync_state(
&self,
state: &crate::sync::DeviceSyncState,
) -> Result<()> {
let conn = Arc::clone(&self.conn);
let state = state.clone();
tokio::task::spawn_blocking(move || {
let conn = conn.lock().map_err(|e| {
PinakesError::Database(format!("connection mutex poisoned: {e}"))
})?;
conn.execute(
"INSERT INTO device_sync_state (device_id, path, local_hash, \
server_hash,
local_mtime, server_mtime, sync_status, last_synced_at, \
conflict_info_json)
VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9)
ON CONFLICT(device_id, path) DO UPDATE SET
local_hash = excluded.local_hash,
server_hash = excluded.server_hash,
local_mtime = excluded.local_mtime,
server_mtime = excluded.server_mtime,
sync_status = excluded.sync_status,
last_synced_at = excluded.last_synced_at,
conflict_info_json = excluded.conflict_info_json",
params![
state.device_id.0.to_string(),
state.path,
state.local_hash,
state.server_hash,
state.local_mtime,
state.server_mtime,
state.sync_status.to_string(),
state.last_synced_at.map(|dt| dt.to_rfc3339()),
state.conflict_info_json,
],
)?;
Ok::<_, PinakesError>(())
})
.await
.map_err(|e| {
PinakesError::Database(format!("upsert_device_sync_state: {e}"))
})??;
Ok(())
}
async fn list_pending_sync(
&self,
device_id: crate::sync::DeviceId,
) -> Result<Vec<crate::sync::DeviceSyncState>> {
let conn = Arc::clone(&self.conn);
tokio::task::spawn_blocking(move || {
let conn = conn.lock().map_err(|e| {
PinakesError::Database(format!("connection mutex poisoned: {e}"))
})?;
let mut stmt = conn.prepare(
"SELECT device_id, path, local_hash, server_hash, local_mtime, \
server_mtime,
sync_status, last_synced_at, conflict_info_json
FROM device_sync_state
WHERE device_id = ?1 AND sync_status IN ('pending_upload', \
'pending_download', 'conflict')",
)?;
let states = stmt
.query_map(params![device_id.0.to_string()], |row| {
Ok(crate::sync::DeviceSyncState {
device_id: crate::sync::DeviceId(parse_uuid(
&row.get::<_, String>(0)?,
)?),
path: row.get(1)?,
local_hash: row.get(2)?,
server_hash: row.get(3)?,
local_mtime: row.get(4)?,
server_mtime: row.get(5)?,
sync_status: row
.get::<_, String>(6)?
.parse()
.unwrap_or(crate::sync::FileSyncStatus::Synced),
last_synced_at: row
.get::<_, Option<String>>(7)?
.map(|s| parse_datetime(&s)),
conflict_info_json: row.get(8)?,
})
})?
.collect::<rusqlite::Result<Vec<_>>>()?;
Ok::<_, PinakesError>(states)
})
.await
.map_err(|e| PinakesError::Database(format!("list_pending_sync: {e}")))?
.map_err(|e| {
PinakesError::Database(format!("list_pending_sync query: {e}"))
})
}
async fn create_upload_session(
&self,
session: &crate::sync::UploadSession,
) -> Result<()> {
let conn = Arc::clone(&self.conn);
let session = session.clone();
tokio::task::spawn_blocking(move || {
let conn = conn.lock().map_err(|e| {
PinakesError::Database(format!("connection mutex poisoned: {e}"))
})?;
conn.execute(
"INSERT INTO upload_sessions (id, device_id, target_path, \
expected_hash,
expected_size, chunk_size, chunk_count, status, \
created_at, expires_at, last_activity)
VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11)",
params![
session.id.to_string(),
session.device_id.0.to_string(),
session.target_path,
session.expected_hash.0,
session.expected_size.cast_signed(),
session.chunk_size.cast_signed(),
session.chunk_count.cast_signed(),
session.status.to_string(),
session.created_at.to_rfc3339(),
session.expires_at.to_rfc3339(),
session.last_activity.to_rfc3339(),
],
)?;
Ok::<_, PinakesError>(())
})
.await
.map_err(|e| {
PinakesError::Database(format!("create_upload_session: {e}"))
})??;
Ok(())
}
async fn get_upload_session(
&self,
id: Uuid,
) -> Result<crate::sync::UploadSession> {
let conn = Arc::clone(&self.conn);
tokio::task::spawn_blocking(move || {
let conn = conn.lock().map_err(|e| {
PinakesError::Database(format!("connection mutex poisoned: {e}"))
})?;
conn
.query_row(
"SELECT id, device_id, target_path, expected_hash, expected_size, \
chunk_size,
chunk_count, status, created_at, expires_at, \
last_activity
FROM upload_sessions WHERE id = ?1",
params![id.to_string()],
|row| {
Ok(crate::sync::UploadSession {
id: parse_uuid(&row.get::<_, String>(0)?)?,
device_id: crate::sync::DeviceId(parse_uuid(
&row.get::<_, String>(1)?,
)?),
target_path: row.get(2)?,
expected_hash: ContentHash(row.get(3)?),
expected_size: row.get::<_, i64>(4)?.cast_unsigned(),
chunk_size: row.get::<_, i64>(5)?.cast_unsigned(),
chunk_count: row.get::<_, i64>(6)?.cast_unsigned(),
status: row
.get::<_, String>(7)?
.parse()
.unwrap_or(crate::sync::UploadStatus::Pending),
created_at: parse_datetime(&row.get::<_, String>(8)?),
expires_at: parse_datetime(&row.get::<_, String>(9)?),
last_activity: parse_datetime(&row.get::<_, String>(10)?),
})
},
)
.map_err(|e| {
PinakesError::Database(format!("get_upload_session query: {e}"))
})
})
.await
.map_err(|e| PinakesError::Database(format!("get_upload_session: {e}")))?
}
async fn update_upload_session(
&self,
session: &crate::sync::UploadSession,
) -> Result<()> {
let conn = Arc::clone(&self.conn);
let session = session.clone();
tokio::task::spawn_blocking(move || {
let conn = conn.lock().map_err(|e| {
PinakesError::Database(format!("connection mutex poisoned: {e}"))
})?;
conn.execute(
"UPDATE upload_sessions SET status = ?1, last_activity = ?2 WHERE id \
= ?3",
params![
session.status.to_string(),
session.last_activity.to_rfc3339(),
session.id.to_string(),
],
)?;
Ok::<_, PinakesError>(())
})
.await
.map_err(|e| {
PinakesError::Database(format!("update_upload_session: {e}"))
})??;
Ok(())
}
async fn record_chunk(
&self,
upload_id: Uuid,
chunk: &crate::sync::ChunkInfo,
) -> Result<()> {
let conn = Arc::clone(&self.conn);
let chunk = chunk.clone();
tokio::task::spawn_blocking(move || {
let conn = conn.lock().map_err(|e| {
PinakesError::Database(format!("connection mutex poisoned: {e}"))
})?;
conn.execute(
"INSERT INTO upload_chunks (upload_id, chunk_index, offset, size, \
hash, received_at)
VALUES (?1, ?2, ?3, ?4, ?5, ?6)
ON CONFLICT(upload_id, chunk_index) DO UPDATE SET
offset = excluded.offset, size = excluded.size,
hash = excluded.hash, received_at = excluded.received_at",
params![
upload_id.to_string(),
chunk.chunk_index.cast_signed(),
chunk.offset.cast_signed(),
chunk.size.cast_signed(),
chunk.hash,
chunk.received_at.to_rfc3339(),
],
)?;
Ok::<_, PinakesError>(())
})
.await
.map_err(|e| PinakesError::Database(format!("record_chunk: {e}")))??;
Ok(())
}
async fn get_upload_chunks(
&self,
upload_id: Uuid,
) -> Result<Vec<crate::sync::ChunkInfo>> {
let conn = Arc::clone(&self.conn);
tokio::task::spawn_blocking(move || {
let conn = conn.lock().map_err(|e| {
PinakesError::Database(format!("connection mutex poisoned: {e}"))
})?;
let mut stmt = conn.prepare(
"SELECT upload_id, chunk_index, offset, size, hash, received_at
FROM upload_chunks WHERE upload_id = ?1 ORDER BY chunk_index",
)?;
let chunks = stmt
.query_map(params![upload_id.to_string()], |row| {
Ok(crate::sync::ChunkInfo {
upload_id: parse_uuid(&row.get::<_, String>(0)?)?,
chunk_index: row.get::<_, i64>(1)?.cast_unsigned(),
offset: row.get::<_, i64>(2)?.cast_unsigned(),
size: row.get::<_, i64>(3)?.cast_unsigned(),
hash: row.get(4)?,
received_at: parse_datetime(&row.get::<_, String>(5)?),
})
})?
.collect::<rusqlite::Result<Vec<_>>>()?;
Ok::<_, PinakesError>(chunks)
})
.await
.map_err(|e| PinakesError::Database(format!("get_upload_chunks: {e}")))?
.map_err(|e| {
PinakesError::Database(format!("get_upload_chunks query: {e}"))
})
}
async fn cleanup_expired_uploads(&self) -> Result<u64> {
let conn = Arc::clone(&self.conn);
let now = chrono::Utc::now().to_rfc3339();
tokio::task::spawn_blocking(move || {
let conn = conn.lock().map_err(|e| {
PinakesError::Database(format!("connection mutex poisoned: {e}"))
})?;
conn
.execute(
"DELETE FROM upload_sessions WHERE expires_at < ?1",
params![&now],
)
.map(|n| n as u64)
.map_err(|e| {
PinakesError::Database(format!("cleanup_expired_uploads query: {e}"))
})
})
.await
.map_err(|e| {
PinakesError::Database(format!("cleanup_expired_uploads: {e}"))
})?
}
async fn record_conflict(
&self,
conflict: &crate::sync::SyncConflict,
) -> Result<()> {
let conn = Arc::clone(&self.conn);
let conflict = conflict.clone();
tokio::task::spawn_blocking(move || {
let conn = conn.lock().map_err(|e| {
PinakesError::Database(format!("connection mutex poisoned: {e}"))
})?;
conn.execute(
"INSERT INTO sync_conflicts (id, device_id, path, local_hash, \
local_mtime,
server_hash, server_mtime, detected_at)
VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8)",
params![
conflict.id.to_string(),
conflict.device_id.0.to_string(),
conflict.path,
conflict.local_hash,
conflict.local_mtime,
conflict.server_hash,
conflict.server_mtime,
conflict.detected_at.to_rfc3339(),
],
)?;
Ok::<_, PinakesError>(())
})
.await
.map_err(|e| PinakesError::Database(format!("record_conflict: {e}")))??;
Ok(())
}
async fn get_unresolved_conflicts(
&self,
device_id: crate::sync::DeviceId,
) -> Result<Vec<crate::sync::SyncConflict>> {
let conn = Arc::clone(&self.conn);
tokio::task::spawn_blocking(move || {
let conn = conn.lock().map_err(|e| {
PinakesError::Database(format!("connection mutex poisoned: {e}"))
})?;
let mut stmt = conn.prepare(
"SELECT id, device_id, path, local_hash, local_mtime, server_hash, \
server_mtime,
detected_at, resolved_at, resolution
FROM sync_conflicts WHERE device_id = ?1 AND resolved_at IS \
NULL",
)?;
let conflicts = stmt
.query_map(params![device_id.0.to_string()], |row| {
Ok(crate::sync::SyncConflict {
id: parse_uuid(&row.get::<_, String>(0)?)?,
device_id: crate::sync::DeviceId(parse_uuid(
&row.get::<_, String>(1)?,
)?),
path: row.get(2)?,
local_hash: row.get(3)?,
local_mtime: row.get(4)?,
server_hash: row.get(5)?,
server_mtime: row.get(6)?,
detected_at: parse_datetime(&row.get::<_, String>(7)?),
resolved_at: row
.get::<_, Option<String>>(8)?
.map(|s| parse_datetime(&s)),
resolution: row.get::<_, Option<String>>(9)?.and_then(|s| {
match s.as_str() {
"server_wins" => {
Some(crate::config::ConflictResolution::ServerWins)
},
"client_wins" => {
Some(crate::config::ConflictResolution::ClientWins)
},
"keep_both" => {
Some(crate::config::ConflictResolution::KeepBoth)
},
"manual" => Some(crate::config::ConflictResolution::Manual),
_ => None,
}
}),
})
})?
.collect::<rusqlite::Result<Vec<_>>>()?;
Ok::<_, PinakesError>(conflicts)
})
.await
.map_err(|e| {
PinakesError::Database(format!("get_unresolved_conflicts: {e}"))
})?
.map_err(|e| {
PinakesError::Database(format!("get_unresolved_conflicts query: {e}"))
})
}
async fn resolve_conflict(
&self,
id: Uuid,
resolution: crate::config::ConflictResolution,
) -> Result<()> {
let conn = Arc::clone(&self.conn);
let now = chrono::Utc::now().to_rfc3339();
let resolution_str = match resolution {
crate::config::ConflictResolution::ServerWins => "server_wins",
crate::config::ConflictResolution::ClientWins => "client_wins",
crate::config::ConflictResolution::KeepBoth => "keep_both",
crate::config::ConflictResolution::Manual => "manual",
};
tokio::task::spawn_blocking(move || {
let conn = conn.lock().map_err(|e| {
PinakesError::Database(format!("connection mutex poisoned: {e}"))
})?;
conn.execute(
"UPDATE sync_conflicts SET resolved_at = ?1, resolution = ?2 WHERE id \
= ?3",
params![&now, resolution_str, id.to_string()],
)?;
Ok::<_, PinakesError>(())
})
.await
.map_err(|e| PinakesError::Database(format!("resolve_conflict: {e}")))??;
Ok(())
}
async fn create_share(
&self,
share: &crate::sharing::Share,
) -> Result<crate::sharing::Share> {
let conn = Arc::clone(&self.conn);
let share = share.clone();
tokio::task::spawn_blocking(move || {
let conn = conn.lock().map_err(|e| {
PinakesError::Database(format!("connection mutex poisoned: {e}"))
})?;
let (recipient_type, recipient_user_id, public_token, password_hash) =
match &share.recipient {
crate::sharing::ShareRecipient::PublicLink {
token,
password_hash,
} => {
(
"public_link",
None,
Some(token.clone()),
password_hash.clone(),
)
},
crate::sharing::ShareRecipient::User { user_id } => {
("user", Some(user_id.0.to_string()), None, None)
},
crate::sharing::ShareRecipient::Group { .. } => {
("group", None, None, None)
},
crate::sharing::ShareRecipient::Federated { .. } => {
("federated", None, None, None)
},
};
conn.execute(
"INSERT INTO shares (id, target_type, target_id, owner_id, \
recipient_type,
recipient_user_id, public_token, public_password_hash,
perm_view, perm_download, perm_edit, perm_delete, \
perm_reshare, perm_add,
note, expires_at, access_count, inherit_to_children, \
parent_share_id,
created_at, updated_at)
VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, \
?13, ?14, ?15, ?16, ?17, ?18, ?19, ?20, ?21)",
params![
share.id.0.to_string(),
share.target.target_type(),
share.target.target_id().to_string(),
share.owner_id.0.to_string(),
recipient_type,
recipient_user_id,
public_token,
password_hash,
share.permissions.view.can_view,
share.permissions.view.can_download,
share.permissions.mutate.can_edit,
share.permissions.mutate.can_delete,
share.permissions.view.can_reshare,
share.permissions.mutate.can_add,
share.note,
share.expires_at.map(|dt| dt.to_rfc3339()),
share.access_count.cast_signed(),
share.inherit_to_children,
share.parent_share_id.map(|s| s.0.to_string()),
share.created_at.to_rfc3339(),
share.updated_at.to_rfc3339(),
],
)?;
Ok::<_, PinakesError>(share)
})
.await
.map_err(|e| PinakesError::Database(format!("create_share: {e}")))?
.map_err(|e| PinakesError::Database(format!("create_share query: {e}")))
}
async fn get_share(
&self,
id: crate::sharing::ShareId,
) -> Result<crate::sharing::Share> {
let conn = Arc::clone(&self.conn);
tokio::task::spawn_blocking(move || {
let conn = conn.lock().map_err(|e| {
PinakesError::Database(format!("connection mutex poisoned: {e}"))
})?;
conn
.query_row(
"SELECT id, target_type, target_id, owner_id, recipient_type, \
recipient_user_id,
public_token, public_password_hash, perm_view, \
perm_download, perm_edit,
perm_delete, perm_reshare, perm_add, note, expires_at, \
access_count,
last_accessed, inherit_to_children, parent_share_id, \
created_at, updated_at
FROM shares WHERE id = ?1",
params![id.0.to_string()],
row_to_share,
)
.map_err(|e| PinakesError::Database(format!("get_share query: {e}")))
})
.await
.map_err(|e| PinakesError::Database(format!("get_share: {e}")))?
}
async fn get_share_by_token(
&self,
token: &str,
) -> Result<crate::sharing::Share> {
let conn = Arc::clone(&self.conn);
let token = token.to_string();
tokio::task::spawn_blocking(move || {
let conn = conn.lock().map_err(|e| {
PinakesError::Database(format!("connection mutex poisoned: {e}"))
})?;
conn
.query_row(
"SELECT id, target_type, target_id, owner_id, recipient_type, \
recipient_user_id,
public_token, public_password_hash, perm_view, \
perm_download, perm_edit,
perm_delete, perm_reshare, perm_add, note, expires_at, \
access_count,
last_accessed, inherit_to_children, parent_share_id, \
created_at, updated_at
FROM shares WHERE public_token = ?1",
params![&token],
row_to_share,
)
.map_err(|e| {
PinakesError::Database(format!("get_share_by_token query: {e}"))
})
})
.await
.map_err(|e| PinakesError::Database(format!("get_share_by_token: {e}")))?
}
async fn list_shares_by_owner(
&self,
owner_id: crate::users::UserId,
pagination: &Pagination,
) -> Result<Vec<crate::sharing::Share>> {
let conn = Arc::clone(&self.conn);
let offset = pagination.offset;
let limit = pagination.limit;
tokio::task::spawn_blocking(move || {
let conn = conn.lock().map_err(|e| {
PinakesError::Database(format!("connection mutex poisoned: {e}"))
})?;
let mut stmt = conn.prepare(
"SELECT id, target_type, target_id, owner_id, recipient_type, \
recipient_user_id,
public_token, public_password_hash, perm_view, \
perm_download, perm_edit,
perm_delete, perm_reshare, perm_add, note, expires_at, \
access_count,
last_accessed, inherit_to_children, parent_share_id, \
created_at, updated_at
FROM shares WHERE owner_id = ?1 ORDER BY created_at DESC \
LIMIT ?2 OFFSET ?3",
)?;
let shares = stmt
.query_map(
params![
owner_id.0.to_string(),
limit.cast_signed(),
offset.cast_signed()
],
row_to_share,
)?
.collect::<rusqlite::Result<Vec<_>>>()?;
Ok::<_, PinakesError>(shares)
})
.await
.map_err(|e| PinakesError::Database(format!("list_shares_by_owner: {e}")))?
.map_err(|e| {
PinakesError::Database(format!("list_shares_by_owner query: {e}"))
})
}
async fn list_shares_for_user(
&self,
user_id: crate::users::UserId,
pagination: &Pagination,
) -> Result<Vec<crate::sharing::Share>> {
let conn = Arc::clone(&self.conn);
let offset = pagination.offset;
let limit = pagination.limit;
tokio::task::spawn_blocking(move || {
let conn = conn.lock().map_err(|e| {
PinakesError::Database(format!("connection mutex poisoned: {e}"))
})?;
let mut stmt = conn.prepare(
"SELECT id, target_type, target_id, owner_id, recipient_type, \
recipient_user_id,
public_token, public_password_hash, perm_view, \
perm_download, perm_edit,
perm_delete, perm_reshare, perm_add, note, expires_at, \
access_count,
last_accessed, inherit_to_children, parent_share_id, \
created_at, updated_at
FROM shares WHERE recipient_user_id = ?1 ORDER BY created_at \
DESC LIMIT ?2 OFFSET ?3",
)?;
let shares = stmt
.query_map(
params![
user_id.0.to_string(),
limit.cast_signed(),
offset.cast_signed()
],
row_to_share,
)?
.collect::<rusqlite::Result<Vec<_>>>()?;
Ok::<_, PinakesError>(shares)
})
.await
.map_err(|e| PinakesError::Database(format!("list_shares_for_user: {e}")))?
.map_err(|e| {
PinakesError::Database(format!("list_shares_for_user query: {e}"))
})
}
async fn list_shares_for_target(
&self,
target: &crate::sharing::ShareTarget,
) -> Result<Vec<crate::sharing::Share>> {
let conn = Arc::clone(&self.conn);
let target_type = target.target_type().to_string();
let target_id = target.target_id().to_string();
tokio::task::spawn_blocking(move || {
let conn = conn.lock().map_err(|e| {
PinakesError::Database(format!("connection mutex poisoned: {e}"))
})?;
let mut stmt = conn.prepare(
"SELECT id, target_type, target_id, owner_id, recipient_type, \
recipient_user_id,
public_token, public_password_hash, perm_view, \
perm_download, perm_edit,
perm_delete, perm_reshare, perm_add, note, expires_at, \
access_count,
last_accessed, inherit_to_children, parent_share_id, \
created_at, updated_at
FROM shares WHERE target_type = ?1 AND target_id = ?2",
)?;
let shares = stmt
.query_map(params![&target_type, &target_id], row_to_share)?
.collect::<rusqlite::Result<Vec<_>>>()?;
Ok::<_, PinakesError>(shares)
})
.await
.map_err(|e| {
PinakesError::Database(format!("list_shares_for_target: {e}"))
})?
.map_err(|e| {
PinakesError::Database(format!("list_shares_for_target query: {e}"))
})
}
async fn update_share(
&self,
share: &crate::sharing::Share,
) -> Result<crate::sharing::Share> {
let conn = Arc::clone(&self.conn);
let share = share.clone();
tokio::task::spawn_blocking(move || {
let conn = conn.lock().map_err(|e| {
PinakesError::Database(format!("connection mutex poisoned: {e}"))
})?;
conn.execute(
"UPDATE shares SET
perm_view = ?1, perm_download = ?2, perm_edit = ?3, \
perm_delete = ?4,
perm_reshare = ?5, perm_add = ?6, note = ?7, expires_at = \
?8,
inherit_to_children = ?9, updated_at = ?10
WHERE id = ?11",
params![
share.permissions.view.can_view,
share.permissions.view.can_download,
share.permissions.mutate.can_edit,
share.permissions.mutate.can_delete,
share.permissions.view.can_reshare,
share.permissions.mutate.can_add,
share.note,
share.expires_at.map(|dt| dt.to_rfc3339()),
share.inherit_to_children,
share.updated_at.to_rfc3339(),
share.id.0.to_string(),
],
)?;
Ok::<_, PinakesError>(share)
})
.await
.map_err(|e| PinakesError::Database(format!("update_share: {e}")))?
.map_err(|e| PinakesError::Database(format!("update_share query: {e}")))
}
async fn delete_share(&self, id: crate::sharing::ShareId) -> Result<()> {
let conn = Arc::clone(&self.conn);
tokio::task::spawn_blocking(move || {
let conn = conn.lock().map_err(|e| {
PinakesError::Database(format!("connection mutex poisoned: {e}"))
})?;
conn.execute("DELETE FROM shares WHERE id = ?1", params![
id.0.to_string()
])?;
Ok::<_, PinakesError>(())
})
.await
.map_err(|e| PinakesError::Database(format!("delete_share: {e}")))??;
Ok(())
}
async fn record_share_access(
&self,
id: crate::sharing::ShareId,
) -> Result<()> {
let conn = Arc::clone(&self.conn);
let now = chrono::Utc::now().to_rfc3339();
tokio::task::spawn_blocking(move || {
let conn = conn.lock().map_err(|e| {
PinakesError::Database(format!("connection mutex poisoned: {e}"))
})?;
conn.execute(
"UPDATE shares SET access_count = access_count + 1, last_accessed = \
?1 WHERE id = ?2",
params![&now, id.0.to_string()],
)?;
Ok::<_, PinakesError>(())
})
.await
.map_err(|e| {
PinakesError::Database(format!("record_share_access: {e}"))
})??;
Ok(())
}
async fn check_share_access(
&self,
user_id: Option<crate::users::UserId>,
target: &crate::sharing::ShareTarget,
) -> Result<Option<crate::sharing::SharePermissions>> {
let shares = self.list_shares_for_target(target).await?;
let now = chrono::Utc::now();
for share in shares {
// Skip expired shares
if let Some(exp) = share.expires_at
&& exp < now
{
continue;
}
match (&share.recipient, user_id) {
// Public links are accessible to anyone
(crate::sharing::ShareRecipient::PublicLink { .. }, _) => {
return Ok(Some(share.permissions));
},
// User shares require matching user
(
crate::sharing::ShareRecipient::User {
user_id: share_user,
},
Some(uid),
) if *share_user == uid => {
return Ok(Some(share.permissions));
},
_ => continue,
}
}
Ok(None)
}
async fn get_effective_share_permissions(
&self,
user_id: Option<crate::users::UserId>,
media_id: MediaId,
) -> Result<Option<crate::sharing::SharePermissions>> {
// Check direct media shares
let target = crate::sharing::ShareTarget::Media { media_id };
if let Some(perms) = self.check_share_access(user_id, &target).await? {
return Ok(Some(perms));
}
// Check collection shares (inheritance)
let conn = Arc::clone(&self.conn);
let media_id_str = media_id.0.to_string();
let collection_ids: Vec<Uuid> = tokio::task::spawn_blocking(move || {
let conn = conn.lock().map_err(|e| {
PinakesError::Database(format!("connection mutex poisoned: {e}"))
})?;
let mut stmt = conn.prepare(
"SELECT collection_id FROM collection_items WHERE media_id = ?1",
)?;
let ids = stmt
.query_map([&media_id_str], |row| {
let id_str: String = row.get(0)?;
Ok(Uuid::parse_str(&id_str).ok())
})?
.filter_map(|r| r.ok().flatten())
.collect::<Vec<Uuid>>();
Ok::<_, PinakesError>(ids)
})
.await
.map_err(|e| {
PinakesError::Database(format!(
"get_effective_share_permissions (collections): {e}"
))
})?
.map_err(|e| {
PinakesError::Database(format!(
"get_effective_share_permissions (collections) query: {e}"
))
})?;
for collection_id in collection_ids {
let target = crate::sharing::ShareTarget::Collection { collection_id };
if let Some(perms) = self.check_share_access(user_id, &target).await? {
return Ok(Some(perms));
}
}
// Check tag shares (inheritance)
let conn = Arc::clone(&self.conn);
let media_id_str = media_id.0.to_string();
let tag_ids: Vec<Uuid> = tokio::task::spawn_blocking(move || {
let conn = conn.lock().map_err(|e| {
PinakesError::Database(format!("connection mutex poisoned: {e}"))
})?;
let mut stmt =
conn.prepare("SELECT tag_id FROM media_tags WHERE media_id = ?1")?;
let ids = stmt
.query_map([&media_id_str], |row| {
let id_str: String = row.get(0)?;
Ok(Uuid::parse_str(&id_str).ok())
})?
.filter_map(|r| r.ok().flatten())
.collect::<Vec<Uuid>>();
Ok::<_, PinakesError>(ids)
})
.await
.map_err(|e| {
PinakesError::Database(format!(
"get_effective_share_permissions (tags): {e}"
))
})?
.map_err(|e| {
PinakesError::Database(format!(
"get_effective_share_permissions (tags) query: {e}"
))
})?;
for tag_id in tag_ids {
let target = crate::sharing::ShareTarget::Tag { tag_id };
if let Some(perms) = self.check_share_access(user_id, &target).await? {
return Ok(Some(perms));
}
}
Ok(None)
}
async fn batch_delete_shares(
&self,
ids: &[crate::sharing::ShareId],
) -> Result<u64> {
let conn = Arc::clone(&self.conn);
let id_strings: Vec<String> =
ids.iter().map(|id| id.0.to_string()).collect();
if id_strings.is_empty() {
return Ok(0);
}
tokio::task::spawn_blocking(move || {
let conn = conn.lock().map_err(|e| {
PinakesError::Database(format!("connection mutex poisoned: {e}"))
})?;
let placeholders: Vec<String> =
(1..=id_strings.len()).map(|i| format!("?{i}")).collect();
let sql = format!(
"DELETE FROM shares WHERE id IN ({})",
placeholders.join(", ")
);
let params: Vec<&dyn rusqlite::types::ToSql> = id_strings
.iter()
.map(|s| s as &dyn rusqlite::types::ToSql)
.collect();
conn.execute(&sql, &*params).map(|n| n as u64).map_err(|e| {
PinakesError::Database(format!("batch_delete_shares query: {e}"))
})
})
.await
.map_err(|e| PinakesError::Database(format!("batch_delete_shares: {e}")))?
}
async fn cleanup_expired_shares(&self) -> Result<u64> {
let conn = Arc::clone(&self.conn);
let now = chrono::Utc::now().to_rfc3339();
tokio::task::spawn_blocking(move || {
let conn = conn.lock().map_err(|e| {
PinakesError::Database(format!("connection mutex poisoned: {e}"))
})?;
conn
.execute(
"DELETE FROM shares WHERE expires_at IS NOT NULL AND expires_at < ?1",
params![&now],
)
.map(|n| n as u64)
.map_err(|e| {
PinakesError::Database(format!("cleanup_expired_shares query: {e}"))
})
})
.await
.map_err(|e| {
PinakesError::Database(format!("cleanup_expired_shares: {e}"))
})?
}
async fn record_share_activity(
&self,
activity: &crate::sharing::ShareActivity,
) -> Result<()> {
let conn = Arc::clone(&self.conn);
let activity = activity.clone();
tokio::task::spawn_blocking(move || {
let conn = conn.lock().map_err(|e| {
PinakesError::Database(format!("connection mutex poisoned: {e}"))
})?;
conn.execute(
"INSERT INTO share_activity (id, share_id, actor_id, actor_ip, \
action, details, timestamp)
VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7)",
params![
activity.id.to_string(),
activity.share_id.0.to_string(),
activity.actor_id.map(|u| u.0.to_string()),
activity.actor_ip,
activity.action.to_string(),
activity.details,
activity.timestamp.to_rfc3339(),
],
)?;
Ok::<_, PinakesError>(())
})
.await
.map_err(|e| {
PinakesError::Database(format!("record_share_activity: {e}"))
})??;
Ok(())
}
async fn get_share_activity(
&self,
share_id: crate::sharing::ShareId,
pagination: &Pagination,
) -> Result<Vec<crate::sharing::ShareActivity>> {
let conn = Arc::clone(&self.conn);
let offset = pagination.offset;
let limit = pagination.limit;
tokio::task::spawn_blocking(move || {
let conn = conn.lock().map_err(|e| {
PinakesError::Database(format!("connection mutex poisoned: {e}"))
})?;
let mut stmt = conn.prepare(
"SELECT id, share_id, actor_id, actor_ip, action, details, timestamp
FROM share_activity WHERE share_id = ?1 ORDER BY timestamp \
DESC LIMIT ?2 OFFSET ?3",
)?;
let activities = stmt
.query_map(
params![
share_id.0.to_string(),
limit.cast_signed(),
offset.cast_signed()
],
|row| {
Ok(crate::sharing::ShareActivity {
id: parse_uuid(&row.get::<_, String>(0)?)?,
share_id: crate::sharing::ShareId(parse_uuid(
&row.get::<_, String>(1)?,
)?),
actor_id: row.get::<_, Option<String>>(2)?.and_then(|s| {
Uuid::parse_str(&s).ok().map(crate::users::UserId)
}),
actor_ip: row.get(3)?,
action: row
.get::<_, String>(4)?
.parse()
.unwrap_or(crate::sharing::ShareActivityAction::Accessed),
details: row.get(5)?,
timestamp: parse_datetime(&row.get::<_, String>(6)?),
})
},
)?
.collect::<rusqlite::Result<Vec<_>>>()?;
Ok::<_, PinakesError>(activities)
})
.await
.map_err(|e| PinakesError::Database(format!("get_share_activity: {e}")))?
.map_err(|e| {
PinakesError::Database(format!("get_share_activity query: {e}"))
})
}
async fn create_share_notification(
&self,
notification: &crate::sharing::ShareNotification,
) -> Result<()> {
let conn = Arc::clone(&self.conn);
let notification = notification.clone();
tokio::task::spawn_blocking(move || {
let conn = conn.lock().map_err(|e| {
PinakesError::Database(format!("connection mutex poisoned: {e}"))
})?;
conn.execute(
"INSERT INTO share_notifications (id, user_id, share_id, \
notification_type, is_read, created_at)
VALUES (?1, ?2, ?3, ?4, ?5, ?6)",
params![
notification.id.to_string(),
notification.user_id.0.to_string(),
notification.share_id.0.to_string(),
notification.notification_type.to_string(),
notification.is_read,
notification.created_at.to_rfc3339(),
],
)?;
Ok::<_, PinakesError>(())
})
.await
.map_err(|e| {
PinakesError::Database(format!("create_share_notification: {e}"))
})??;
Ok(())
}
async fn get_unread_notifications(
&self,
user_id: crate::users::UserId,
) -> Result<Vec<crate::sharing::ShareNotification>> {
let conn = Arc::clone(&self.conn);
tokio::task::spawn_blocking(move || {
let conn = conn.lock().map_err(|e| {
PinakesError::Database(format!("connection mutex poisoned: {e}"))
})?;
let mut stmt = conn.prepare(
"SELECT id, user_id, share_id, notification_type, is_read, created_at
FROM share_notifications WHERE user_id = ?1 AND is_read = 0 \
ORDER BY created_at DESC",
)?;
let notifications = stmt
.query_map(params![user_id.0.to_string()], |row| {
Ok(crate::sharing::ShareNotification {
id: parse_uuid(&row.get::<_, String>(0)?)?,
user_id: crate::users::UserId(parse_uuid(
&row.get::<_, String>(1)?,
)?),
share_id: crate::sharing::ShareId(parse_uuid(
&row.get::<_, String>(2)?,
)?),
notification_type: row
.get::<_, String>(3)?
.parse()
.unwrap_or(crate::sharing::ShareNotificationType::NewShare),
is_read: row.get(4)?,
created_at: parse_datetime(&row.get::<_, String>(5)?),
})
})?
.collect::<rusqlite::Result<Vec<_>>>()?;
Ok::<_, PinakesError>(notifications)
})
.await
.map_err(|e| {
PinakesError::Database(format!("get_unread_notifications: {e}"))
})?
.map_err(|e| {
PinakesError::Database(format!("get_unread_notifications query: {e}"))
})
}
async fn mark_notification_read(
&self,
id: Uuid,
user_id: crate::users::UserId,
) -> Result<()> {
let conn = Arc::clone(&self.conn);
tokio::task::spawn_blocking(move || {
let conn = conn.lock().map_err(|e| {
PinakesError::Database(format!("connection mutex poisoned: {e}"))
})?;
conn.execute(
"UPDATE share_notifications SET is_read = 1 WHERE id = ?1 AND user_id \
= ?2",
params![id.to_string(), user_id.0.to_string()],
)?;
Ok::<_, PinakesError>(())
})
.await
.map_err(|e| {
PinakesError::Database(format!("mark_notification_read: {e}"))
})??;
Ok(())
}
async fn mark_all_notifications_read(
&self,
user_id: crate::users::UserId,
) -> Result<()> {
let conn = Arc::clone(&self.conn);
tokio::task::spawn_blocking(move || {
let conn = conn.lock().map_err(|e| {
PinakesError::Database(format!("connection mutex poisoned: {e}"))
})?;
conn.execute(
"UPDATE share_notifications SET is_read = 1 WHERE user_id = ?1",
params![user_id.0.to_string()],
)?;
Ok::<_, PinakesError>(())
})
.await
.map_err(|e| {
PinakesError::Database(format!("mark_all_notifications_read: {e}"))
})??;
Ok(())
}
async fn rename_media(&self, id: MediaId, new_name: &str) -> Result<String> {
// Validate the new name
if new_name.is_empty() || new_name.contains('/') || new_name.contains('\\')
{
return Err(PinakesError::InvalidOperation(
"Invalid file name: must not be empty or contain path separators"
.into(),
));
}
let conn = Arc::clone(&self.conn);
let id_str = id.0.to_string();
let new_name = new_name.to_string();
let (old_path, storage_mode) = tokio::task::spawn_blocking({
let conn = conn.clone();
let id_str = id_str.clone();
move || {
let conn = conn.lock().map_err(|e| {
PinakesError::Database(format!("connection mutex poisoned: {e}"))
})?;
let row: (String, String) = conn.query_row(
"SELECT path, storage_mode FROM media_items WHERE id = ?1 AND \
deleted_at IS NULL",
params![id_str],
|row| Ok((row.get(0)?, row.get(1)?)),
)?;
Ok::<_, PinakesError>(row)
}
})
.await
.map_err(|e| {
PinakesError::Database(format!("rename_media (get info): {e}"))
})??;
let old_path_buf = std::path::PathBuf::from(&old_path);
let parent = old_path_buf.parent().unwrap_or(std::path::Path::new(""));
let new_path = parent.join(&new_name);
let new_path_str = new_path.to_string_lossy().to_string();
// For external storage, actually rename the file on disk
if storage_mode == "external" && old_path_buf.exists() {
tokio::fs::rename(&old_path_buf, &new_path)
.await
.map_err(|e| {
PinakesError::Io(std::io::Error::new(
e.kind(),
format!("Failed to rename file: {e}"),
))
})?;
}
// Update the database
let now = chrono::Utc::now().to_rfc3339();
tokio::task::spawn_blocking(move || {
let conn = conn.lock().map_err(|e| {
PinakesError::Database(format!("connection mutex poisoned: {e}"))
})?;
conn.execute(
"UPDATE media_items SET file_name = ?1, path = ?2, updated_at = ?3 \
WHERE id = ?4",
params![new_name, new_path_str, now, id_str],
)?;
Ok::<_, PinakesError>(())
})
.await
.map_err(|e| {
PinakesError::Database(format!("rename_media (update db): {e}"))
})??;
Ok(old_path)
}
async fn move_media(
&self,
id: MediaId,
new_directory: &std::path::Path,
) -> Result<String> {
let conn = Arc::clone(&self.conn);
let id_str = id.0.to_string();
let new_dir = new_directory.to_path_buf();
let (old_path, file_name, storage_mode) = tokio::task::spawn_blocking({
let conn = conn.clone();
let id_str = id_str.clone();
move || {
let conn = conn.lock().map_err(|e| {
PinakesError::Database(format!("connection mutex poisoned: {e}"))
})?;
let row: (String, String, String) = conn.query_row(
"SELECT path, file_name, storage_mode FROM media_items WHERE id = \
?1 AND deleted_at IS NULL",
params![id_str],
|row| Ok((row.get(0)?, row.get(1)?, row.get(2)?)),
)?;
Ok::<_, PinakesError>(row)
}
})
.await
.map_err(|e| {
PinakesError::Database(format!("move_media (get info): {e}"))
})??;
let old_path_buf = std::path::PathBuf::from(&old_path);
let new_path = new_dir.join(&file_name);
let new_path_str = new_path.to_string_lossy().to_string();
// Ensure the target directory exists
if !new_dir.exists() {
tokio::fs::create_dir_all(&new_dir).await?;
}
// For external storage, actually move the file on disk
if storage_mode == "external" && old_path_buf.exists() {
tokio::fs::rename(&old_path_buf, &new_path)
.await
.map_err(|e| {
PinakesError::Io(std::io::Error::new(
e.kind(),
format!("Failed to move file: {e}"),
))
})?;
}
// Update the database
let now = chrono::Utc::now().to_rfc3339();
tokio::task::spawn_blocking(move || {
let conn = conn.lock().map_err(|e| {
PinakesError::Database(format!("connection mutex poisoned: {e}"))
})?;
conn.execute(
"UPDATE media_items SET path = ?1, updated_at = ?2 WHERE id = ?3",
params![new_path_str, now, id_str],
)?;
Ok::<_, PinakesError>(())
})
.await
.map_err(|e| {
PinakesError::Database(format!("move_media (update db): {e}"))
})??;
Ok(old_path)
}
async fn soft_delete_media(&self, id: MediaId) -> Result<()> {
let conn = Arc::clone(&self.conn);
let id_str = id.0.to_string();
let now = chrono::Utc::now().to_rfc3339();
let rows_affected = tokio::task::spawn_blocking(move || {
let conn = conn.lock().map_err(|e| {
PinakesError::Database(format!("connection mutex poisoned: {e}"))
})?;
conn
.execute(
"UPDATE media_items SET deleted_at = ?1, updated_at = ?1 WHERE id = \
?2 AND deleted_at IS NULL",
params![now, id_str],
)
.map_err(|e| {
PinakesError::Database(format!("soft_delete_media query: {e}"))
})
})
.await
.map_err(|e| PinakesError::Database(format!("soft_delete_media: {e}")))??;
if rows_affected == 0 {
return Err(PinakesError::NotFound(format!(
"Media item {id} not found or already deleted"
)));
}
Ok(())
}
async fn restore_media(&self, id: MediaId) -> Result<()> {
let conn = Arc::clone(&self.conn);
let id_str = id.0.to_string();
let now = chrono::Utc::now().to_rfc3339();
let rows_affected = tokio::task::spawn_blocking(move || {
let conn = conn.lock().map_err(|e| {
PinakesError::Database(format!("connection mutex poisoned: {e}"))
})?;
conn
.execute(
"UPDATE media_items SET deleted_at = NULL, updated_at = ?1 WHERE id \
= ?2 AND deleted_at IS NOT NULL",
params![now, id_str],
)
.map_err(|e| {
PinakesError::Database(format!("restore_media query: {e}"))
})
})
.await
.map_err(|e| PinakesError::Database(format!("restore_media: {e}")))??;
if rows_affected == 0 {
return Err(PinakesError::NotFound(format!(
"Media item {id} not found in trash"
)));
}
Ok(())
}
async fn list_trash(
&self,
pagination: &Pagination,
) -> Result<Vec<MediaItem>> {
let conn = Arc::clone(&self.conn);
let offset = pagination.offset;
let limit = pagination.limit;
let items = tokio::task::spawn_blocking(move || {
let conn = conn.lock().map_err(|e| {
PinakesError::Database(format!("connection mutex poisoned: {e}"))
})?;
let mut stmt = conn.prepare(
"SELECT id, path, file_name, media_type, content_hash, file_size,
title, artist, album, genre, year, duration_secs, \
description,
thumbnail_path, created_at, updated_at, file_mtime,
date_taken, latitude, longitude, camera_make, \
camera_model, rating,
storage_mode, original_filename, uploaded_at, \
storage_key,
perceptual_hash, deleted_at
FROM media_items
WHERE deleted_at IS NOT NULL
ORDER BY deleted_at DESC
LIMIT ?1 OFFSET ?2",
)?;
let rows = stmt.query_map(
params![limit.cast_signed(), offset.cast_signed()],
row_to_media_item,
)?;
let mut items = Vec::new();
for row in rows {
items.push(row?);
}
Ok::<_, PinakesError>(items)
})
.await
.map_err(|e| PinakesError::Database(format!("list_trash: {e}")))??;
Ok(items)
}
async fn empty_trash(&self) -> Result<u64> {
let conn = Arc::clone(&self.conn);
let count = tokio::task::spawn_blocking(move || {
let conn = conn.lock().map_err(|e| {
PinakesError::Database(format!("connection mutex poisoned: {e}"))
})?;
// First, get the IDs to clean up related data
let mut stmt = conn
.prepare("SELECT id FROM media_items WHERE deleted_at IS NOT NULL")?;
let ids: Vec<String> = stmt
.query_map([], |row| row.get(0))?
.filter_map(std::result::Result::ok)
.collect();
// Delete related data
for id in &ids {
conn
.execute("DELETE FROM media_tags WHERE media_id = ?1", params![id])?;
conn.execute(
"DELETE FROM collection_members WHERE media_id = ?1",
params![id],
)?;
conn
.execute("DELETE FROM custom_fields WHERE media_id = ?1", params![
id
])?;
}
// Delete the media items
let count = conn
.execute("DELETE FROM media_items WHERE deleted_at IS NOT NULL", [])?;
Ok::<_, PinakesError>(count as u64)
})
.await
.map_err(|e| PinakesError::Database(format!("empty_trash: {e}")))??;
Ok(count)
}
async fn purge_old_trash(
&self,
before: chrono::DateTime<chrono::Utc>,
) -> Result<u64> {
let conn = Arc::clone(&self.conn);
let before_str = before.to_rfc3339();
let count = tokio::task::spawn_blocking(move || {
let conn = conn.lock().map_err(|e| {
PinakesError::Database(format!("connection mutex poisoned: {e}"))
})?;
// First, get the IDs to clean up related data
let mut stmt = conn.prepare(
"SELECT id FROM media_items WHERE deleted_at IS NOT NULL AND \
deleted_at < ?1",
)?;
let ids: Vec<String> = stmt
.query_map(params![before_str], |row| row.get(0))?
.filter_map(std::result::Result::ok)
.collect();
// Delete related data
for id in &ids {
conn
.execute("DELETE FROM media_tags WHERE media_id = ?1", params![id])?;
conn.execute(
"DELETE FROM collection_members WHERE media_id = ?1",
params![id],
)?;
conn
.execute("DELETE FROM custom_fields WHERE media_id = ?1", params![
id
])?;
}
// Delete the media items
let count = conn.execute(
"DELETE FROM media_items WHERE deleted_at IS NOT NULL AND deleted_at \
< ?1",
params![before_str],
)?;
Ok::<_, PinakesError>(count as u64)
})
.await
.map_err(|e| PinakesError::Database(format!("purge_old_trash: {e}")))??;
Ok(count)
}
async fn count_trash(&self) -> Result<u64> {
let conn = Arc::clone(&self.conn);
let count = tokio::task::spawn_blocking(move || {
let conn = conn.lock().map_err(|e| {
PinakesError::Database(format!("connection mutex poisoned: {e}"))
})?;
let count: i64 = conn.query_row(
"SELECT COUNT(*) FROM media_items WHERE deleted_at IS NOT NULL",
[],
|row| row.get(0),
)?;
Ok::<_, PinakesError>(count.cast_unsigned())
})
.await
.map_err(|e| PinakesError::Database(format!("count_trash: {e}")))??;
Ok(count)
}
async fn save_markdown_links(
&self,
media_id: MediaId,
links: &[crate::model::MarkdownLink],
) -> Result<()> {
let conn = Arc::clone(&self.conn);
let media_id_str = media_id.0.to_string();
let links: Vec<_> = links.to_vec();
tokio::task::spawn_blocking(move || {
let mut conn = conn.lock().map_err(|e| {
PinakesError::Database(format!("connection mutex poisoned: {e}"))
})?;
// Wrap DELETE + INSERT in transaction to ensure atomicity
let tx = conn.transaction()?;
// Delete existing links for this source
tx.execute("DELETE FROM markdown_links WHERE source_media_id = ?1", [
&media_id_str,
])?;
// Insert new links
let mut stmt = tx.prepare(
"INSERT INTO markdown_links (
id, source_media_id, target_path, target_media_id,
link_type, link_text, line_number, context, created_at
) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9)",
)?;
for link in &links {
stmt.execute(params![
link.id.to_string(),
media_id_str,
link.target_path,
link.target_media_id.map(|id| id.0.to_string()),
link.link_type.to_string(),
link.link_text,
link.line_number,
link.context,
link.created_at.to_rfc3339(),
])?;
}
// Commit transaction - if this fails, all changes are rolled back
drop(stmt);
tx.commit()?;
Ok::<_, PinakesError>(())
})
.await
.map_err(|e| {
PinakesError::Database(format!("save_markdown_links: {e}"))
})??;
Ok(())
}
async fn get_outgoing_links(
&self,
media_id: MediaId,
) -> Result<Vec<crate::model::MarkdownLink>> {
let conn = Arc::clone(&self.conn);
let media_id_str = media_id.0.to_string();
let links = tokio::task::spawn_blocking(move || {
let conn = conn.lock().map_err(|e| {
PinakesError::Database(format!("connection mutex poisoned: {e}"))
})?;
let mut stmt = conn.prepare(
"SELECT id, source_media_id, target_path, target_media_id,
link_type, link_text, line_number, context, created_at
FROM markdown_links
WHERE source_media_id = ?1
ORDER BY line_number",
)?;
let rows = stmt.query_map([&media_id_str], row_to_markdown_link)?;
let mut links = Vec::new();
for row in rows {
links.push(row?);
}
Ok::<_, PinakesError>(links)
})
.await
.map_err(|e| {
PinakesError::Database(format!("get_outgoing_links: {e}"))
})??;
Ok(links)
}
async fn get_backlinks(
&self,
media_id: MediaId,
) -> Result<Vec<crate::model::BacklinkInfo>> {
let conn = Arc::clone(&self.conn);
let media_id_str = media_id.0.to_string();
let backlinks = tokio::task::spawn_blocking(move || {
let conn = conn.lock().map_err(|e| {
PinakesError::Database(format!("connection mutex poisoned: {e}"))
})?;
let mut stmt = conn.prepare(
"SELECT l.id, l.source_media_id, m.title, m.path,
l.link_text, l.line_number, l.context, l.link_type
FROM markdown_links l
JOIN media_items m ON l.source_media_id = m.id
WHERE l.target_media_id = ?1
ORDER BY m.title, l.line_number",
)?;
let rows = stmt.query_map([&media_id_str], |row| {
let link_id_str: String = row.get(0)?;
let source_id_str: String = row.get(1)?;
let source_title: Option<String> = row.get(2)?;
let source_path: String = row.get(3)?;
let link_text: Option<String> = row.get(4)?;
let line_number: Option<i32> = row.get(5)?;
let context: Option<String> = row.get(6)?;
let link_type_str: String = row.get(7)?;
Ok(crate::model::BacklinkInfo {
link_id: parse_uuid(&link_id_str)?,
source_id: MediaId(parse_uuid(&source_id_str)?),
source_title,
source_path,
link_text,
line_number,
context,
link_type: link_type_str
.parse()
.unwrap_or(crate::model::LinkType::Wikilink),
})
})?;
let mut backlinks = Vec::new();
for row in rows {
backlinks.push(row?);
}
Ok::<_, PinakesError>(backlinks)
})
.await
.map_err(|e| PinakesError::Database(format!("get_backlinks: {e}")))??;
Ok(backlinks)
}
async fn clear_links_for_media(&self, media_id: MediaId) -> Result<()> {
let conn = Arc::clone(&self.conn);
let media_id_str = media_id.0.to_string();
tokio::task::spawn_blocking(move || {
let conn = conn.lock().map_err(|e| {
PinakesError::Database(format!("connection mutex poisoned: {e}"))
})?;
conn
.execute("DELETE FROM markdown_links WHERE source_media_id = ?1", [
&media_id_str,
])?;
Ok::<_, PinakesError>(())
})
.await
.map_err(|e| {
PinakesError::Database(format!("clear_links_for_media: {e}"))
})??;
Ok(())
}
async fn get_graph_data(
&self,
center_id: Option<MediaId>,
depth: u32,
) -> Result<crate::model::GraphData> {
let conn = Arc::clone(&self.conn);
let center_id_str = center_id.map(|id| id.0.to_string());
let depth = depth.min(5); // Limit depth to prevent huge queries
let graph_data = tokio::task::spawn_blocking(move || {
let conn = conn.lock().map_err(|e| PinakesError::Database(format!("connection mutex poisoned: {e}")))?;
let mut nodes = Vec::new();
let mut edges = Vec::new();
let mut node_ids = rustc_hash::FxHashSet::default();
// Get nodes - either all markdown files or those connected to center
if let Some(center_id) = center_id_str {
// BFS to find connected nodes within depth
let mut frontier = vec![center_id.clone()];
let mut visited = rustc_hash::FxHashSet::default();
visited.insert(center_id);
for _ in 0..depth {
let mut next_frontier = Vec::new();
for node_id in &frontier {
// Get outgoing links
let mut stmt = conn.prepare(
"SELECT target_media_id FROM markdown_links
WHERE source_media_id = ?1 AND target_media_id IS NOT NULL",
)?;
let rows = stmt.query_map([node_id], |row| {
let id: String = row.get(0)?;
Ok(id)
})?;
for row in rows {
let id = row?;
if !visited.contains(&id) {
visited.insert(id.clone());
next_frontier.push(id);
}
}
// Get incoming links
let mut stmt = conn.prepare(
"SELECT source_media_id FROM markdown_links
WHERE target_media_id = ?1",
)?;
let rows = stmt.query_map([node_id], |row| {
let id: String = row.get(0)?;
Ok(id)
})?;
for row in rows {
let id = row?;
if !visited.contains(&id) {
visited.insert(id.clone());
next_frontier.push(id);
}
}
}
frontier = next_frontier;
}
node_ids = visited;
} else {
// Get all markdown files with links (limit to 500 for performance)
let mut stmt = conn.prepare(
"SELECT DISTINCT id FROM media_items
WHERE media_type = 'markdown' AND deleted_at IS NULL
LIMIT 500",
)?;
let rows = stmt.query_map([], |row| {
let id: String = row.get(0)?;
Ok(id)
})?;
for row in rows {
node_ids.insert(row?);
}
}
// Build nodes with metadata
for node_id in &node_ids {
let mut stmt = conn.prepare(
"SELECT id, COALESCE(title, file_name) as label, title, media_type
FROM media_items WHERE id = ?1",
)?;
if let Ok((id, label, title, media_type)) = stmt.query_row([node_id], |row| {
Ok((
row.get::<_, String>(0)?,
row.get::<_, String>(1)?,
row.get::<_, Option<String>>(2)?,
row.get::<_, String>(3)?,
))
}) {
// Count outgoing links
let link_count: i64 = conn.query_row(
"SELECT COUNT(*) FROM markdown_links WHERE source_media_id = ?1",
[&id],
|row| row.get(0),
)?;
// Count incoming links
let backlink_count: i64 = conn.query_row(
"SELECT COUNT(*) FROM markdown_links WHERE target_media_id = ?1",
[&id],
|row| row.get(0),
)?;
nodes.push(crate::model::GraphNode {
id: id.clone(),
label,
title,
media_type,
link_count: u32::try_from(link_count).unwrap_or(0),
backlink_count: u32::try_from(backlink_count).unwrap_or(0),
});
}
}
// Build edges
for node_id in &node_ids {
let mut stmt = conn.prepare(
"SELECT source_media_id, target_media_id, link_type
FROM markdown_links
WHERE source_media_id = ?1 AND target_media_id IS NOT NULL",
)?;
let rows = stmt.query_map([node_id], |row| {
let source: String = row.get(0)?;
let target: String = row.get(1)?;
let link_type_str: String = row.get(2)?;
Ok((source, target, link_type_str))
})?;
for row in rows {
let (source, target, link_type_str) = row?;
if node_ids.contains(&target) {
edges.push(crate::model::GraphEdge {
source,
target,
link_type: link_type_str
.parse()
.unwrap_or(crate::model::LinkType::Wikilink),
});
}
}
}
Ok::<_, PinakesError>(crate::model::GraphData { nodes, edges })
})
.await
.map_err(|e| PinakesError::Database(format!("get_graph_data: {e}")))??;
Ok(graph_data)
}
async fn resolve_links(&self) -> Result<u64> {
let conn = Arc::clone(&self.conn);
let count = tokio::task::spawn_blocking(move || {
let conn = conn.lock().map_err(|e| {
PinakesError::Database(format!("connection mutex poisoned: {e}"))
})?;
// Find unresolved links and try to resolve them
// Strategy 1: Exact path match
let updated1 = conn.execute(
"UPDATE markdown_links
SET target_media_id = (
SELECT id FROM media_items
WHERE path = markdown_links.target_path
AND deleted_at IS NULL
LIMIT 1
)
WHERE target_media_id IS NULL
AND EXISTS (
SELECT 1 FROM media_items
WHERE path = markdown_links.target_path
AND deleted_at IS NULL
)",
[],
)?;
// Strategy 2: Filename match (Obsidian-style)
// Match target_path to file_name (with or without .md extension)
let updated2 = conn.execute(
"UPDATE markdown_links
SET target_media_id = (
SELECT id FROM media_items
WHERE (file_name = markdown_links.target_path
OR file_name = markdown_links.target_path || '.md'
OR REPLACE(file_name, '.md', '') = \
markdown_links.target_path)
AND deleted_at IS NULL
LIMIT 1
)
WHERE target_media_id IS NULL
AND EXISTS (
SELECT 1 FROM media_items
WHERE (file_name = markdown_links.target_path
OR file_name = markdown_links.target_path || '.md'
OR REPLACE(file_name, '.md', '') = \
markdown_links.target_path)
AND deleted_at IS NULL
)",
[],
)?;
Ok::<_, PinakesError>((updated1 + updated2) as u64)
})
.await
.map_err(|e| PinakesError::Database(format!("resolve_links: {e}")))??;
Ok(count)
}
async fn mark_links_extracted(&self, media_id: MediaId) -> Result<()> {
let conn = Arc::clone(&self.conn);
let media_id_str = media_id.0.to_string();
let now = chrono::Utc::now().to_rfc3339();
tokio::task::spawn_blocking(move || {
let conn = conn.lock().map_err(|e| {
PinakesError::Database(format!("connection mutex poisoned: {e}"))
})?;
conn.execute(
"UPDATE media_items SET links_extracted_at = ?1 WHERE id = ?2",
params![now, media_id_str],
)?;
Ok::<_, PinakesError>(())
})
.await
.map_err(|e| {
PinakesError::Database(format!("mark_links_extracted: {e}"))
})??;
Ok(())
}
async fn count_unresolved_links(&self) -> Result<u64> {
let conn = Arc::clone(&self.conn);
let count = tokio::task::spawn_blocking(move || {
let conn = conn.lock().map_err(|e| {
PinakesError::Database(format!("connection mutex poisoned: {e}"))
})?;
let count: i64 = conn.query_row(
"SELECT COUNT(*) FROM markdown_links WHERE target_media_id IS NULL",
[],
|row| row.get(0),
)?;
Ok::<_, PinakesError>(count.cast_unsigned())
})
.await
.map_err(|e| {
PinakesError::Database(format!("count_unresolved_links: {e}"))
})??;
Ok(count)
}
async fn backup(&self, dest: &std::path::Path) -> Result<()> {
let conn = Arc::clone(&self.conn);
let dest = dest.to_path_buf();
let fut = tokio::task::spawn_blocking(move || {
let db = conn.lock().map_err(|e| {
PinakesError::Database(format!("failed to acquire database lock: {e}"))
})?;
db.execute("VACUUM INTO ?1", params![dest.to_string_lossy()])?;
Ok(())
});
tokio::time::timeout(std::time::Duration::from_mins(5), fut)
.await
.map_err(|_| PinakesError::Database("backup timed out".into()))?
.map_err(|e: tokio::task::JoinError| {
PinakesError::Database(format!("backup: {e}"))
})?
}
}
// Helper function to parse a markdown link row
fn row_to_markdown_link(
row: &Row,
) -> rusqlite::Result<crate::model::MarkdownLink> {
let id_str: String = row.get(0)?;
let source_id_str: String = row.get(1)?;
let target_path: String = row.get(2)?;
let target_id: Option<String> = row.get(3)?;
let link_type_str: String = row.get(4)?;
let link_text: Option<String> = row.get(5)?;
let line_number: Option<i32> = row.get(6)?;
let context: Option<String> = row.get(7)?;
let created_at_str: String = row.get(8)?;
Ok(crate::model::MarkdownLink {
id: parse_uuid(&id_str)?,
source_media_id: MediaId(parse_uuid(&source_id_str)?),
target_path,
target_media_id: target_id
.and_then(|s| Uuid::parse_str(&s).ok())
.map(MediaId),
link_type: link_type_str
.parse()
.unwrap_or(crate::model::LinkType::Wikilink),
link_text,
line_number,
context,
created_at: parse_datetime(&created_at_str),
})
}
// Helper function to parse a share row
fn row_to_share(row: &Row) -> rusqlite::Result<crate::sharing::Share> {
let id_str: String = row.get(0)?;
let target_type: String = row.get(1)?;
let target_id_str: String = row.get(2)?;
let owner_id_str: String = row.get(3)?;
let recipient_type: String = row.get(4)?;
let recipient_user_id: Option<String> = row.get(5)?;
let public_token: Option<String> = row.get(6)?;
let password_hash: Option<String> = row.get(7)?;
let target = match target_type.as_str() {
"media" => {
crate::sharing::ShareTarget::Media {
media_id: MediaId(parse_uuid(&target_id_str)?),
}
},
"collection" => {
crate::sharing::ShareTarget::Collection {
collection_id: parse_uuid(&target_id_str)?,
}
},
"tag" => {
crate::sharing::ShareTarget::Tag {
tag_id: parse_uuid(&target_id_str)?,
}
},
"saved_search" => {
crate::sharing::ShareTarget::SavedSearch {
search_id: parse_uuid(&target_id_str)?,
}
},
_ => {
crate::sharing::ShareTarget::Media {
media_id: MediaId(parse_uuid(&target_id_str)?),
}
},
};
let recipient = match recipient_type.as_str() {
"public_link" => {
crate::sharing::ShareRecipient::PublicLink {
token: public_token.unwrap_or_default(),
password_hash,
}
},
"user" => {
crate::sharing::ShareRecipient::User {
user_id: crate::users::UserId(parse_uuid(
&recipient_user_id.unwrap_or_default(),
)?),
}
},
"group" => {
crate::sharing::ShareRecipient::Group {
group_id: Uuid::nil(),
}
},
_ => {
crate::sharing::ShareRecipient::PublicLink {
token: public_token.unwrap_or_default(),
password_hash,
}
},
};
Ok(crate::sharing::Share {
id: crate::sharing::ShareId(parse_uuid(&id_str)?),
target,
owner_id: crate::users::UserId(parse_uuid(&owner_id_str)?),
recipient,
permissions: crate::sharing::SharePermissions {
view: crate::sharing::ShareViewPermissions {
can_view: row.get(8)?,
can_download: row.get(9)?,
can_reshare: row.get(12)?,
},
mutate: crate::sharing::ShareMutatePermissions {
can_edit: row.get(10)?,
can_delete: row.get(11)?,
can_add: row.get(13)?,
},
},
note: row.get(14)?,
expires_at: row
.get::<_, Option<String>>(15)?
.map(|s| parse_datetime(&s)),
access_count: row.get::<_, i64>(16)?.cast_unsigned(),
last_accessed: row
.get::<_, Option<String>>(17)?
.map(|s| parse_datetime(&s)),
inherit_to_children: row.get(18)?,
parent_share_id: row
.get::<_, Option<String>>(19)?
.and_then(|s| Uuid::parse_str(&s).ok().map(crate::sharing::ShareId)),
created_at: parse_datetime(&row.get::<_, String>(20)?),
updated_at: parse_datetime(&row.get::<_, String>(21)?),
})
}
// Needed for `query_row(...).optional()`
use rusqlite::OptionalExtension;