treewide: complete book management interface

Signed-off-by: NotAShelf <raf@notashelf.dev>
Change-Id: If5a21f16221f3c56a8008e139f93edc46a6a6964
This commit is contained in:
raf 2026-02-04 23:14:37 +03:00
commit 2f31242442
Signed by: NotAShelf
GPG key ID: 29D95B64378DB4BF
23 changed files with 1693 additions and 126 deletions

View file

@ -1734,11 +1734,7 @@ impl StorageBackend for SqliteBackend {
[],
|r| r.get(0),
)?;
let avg_size: u64 = if total_media > 0 {
total_size / total_media
} else {
0
};
let avg_size: u64 = total_size.checked_div(total_media).unwrap_or(0);
// Media count by type
let mut stmt = db.prepare("SELECT media_type, COUNT(*) FROM media_items GROUP BY media_type ORDER BY COUNT(*) DESC")?;
@ -3801,6 +3797,543 @@ impl StorageBackend for SqliteBackend {
.map_err(|_| PinakesError::Database("list_active_sessions timed out".into()))?
.map_err(|e: tokio::task::JoinError| PinakesError::Database(e.to_string()))?
}
// Book Management Methods
async fn upsert_book_metadata(&self, metadata: &crate::model::BookMetadata) -> Result<()> {
let conn = self.conn.clone();
let media_id_str = metadata.media_id.to_string();
let isbn = metadata.isbn.clone();
let isbn13 = metadata.isbn13.clone();
let publisher = metadata.publisher.clone();
let language = metadata.language.clone();
let page_count = metadata.page_count;
let publication_date = metadata.publication_date.map(|d| d.to_string());
let series_name = metadata.series_name.clone();
let series_index = metadata.series_index;
let format = metadata.format.clone();
let authors = metadata.authors.clone();
let identifiers = metadata.identifiers.clone();
let fut = tokio::task::spawn_blocking(move || {
let mut conn = conn.lock().unwrap();
let tx = conn.transaction()?;
// Upsert book_metadata
tx.execute(
"INSERT INTO book_metadata (
media_id, isbn, isbn13, publisher, language, page_count,
publication_date, series_name, series_index, format
) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10)
ON CONFLICT(media_id) DO UPDATE SET
isbn = ?2, isbn13 = ?3, publisher = ?4, language = ?5,
page_count = ?6, publication_date = ?7, series_name = ?8,
series_index = ?9, format = ?10, updated_at = datetime('now')",
rusqlite::params![
media_id_str,
isbn,
isbn13,
publisher,
language,
page_count,
publication_date,
series_name,
series_index,
format
],
)?;
// Clear existing authors and identifiers
tx.execute(
"DELETE FROM book_authors WHERE media_id = ?1",
[&media_id_str],
)?;
tx.execute(
"DELETE FROM book_identifiers WHERE media_id = ?1",
[&media_id_str],
)?;
// Insert authors
for author in &authors {
tx.execute(
"INSERT INTO book_authors (media_id, author_name, author_sort, role, position)
VALUES (?1, ?2, ?3, ?4, ?5)",
rusqlite::params![
media_id_str,
author.name,
author.file_as,
author.role,
author.position
],
)?;
}
// Insert identifiers
for (id_type, values) in &identifiers {
for value in values {
tx.execute(
"INSERT INTO book_identifiers (media_id, identifier_type, identifier_value)
VALUES (?1, ?2, ?3)",
rusqlite::params![media_id_str, id_type, value],
)?;
}
}
tx.commit()?;
Ok::<_, rusqlite::Error>(())
});
tokio::time::timeout(std::time::Duration::from_secs(30), fut)
.await
.map_err(|_| PinakesError::Database("upsert_book_metadata timed out".into()))?
.map_err(|e: tokio::task::JoinError| PinakesError::Database(e.to_string()))??;
Ok(())
}
async fn get_book_metadata(
&self,
media_id: MediaId,
) -> Result<Option<crate::model::BookMetadata>> {
let conn = self.conn.clone();
let media_id_str = media_id.to_string();
let fut = tokio::task::spawn_blocking(move || {
let conn = conn.lock().unwrap();
// Get base book metadata
let metadata_row = conn
.query_row(
"SELECT isbn, isbn13, publisher, language, page_count,
publication_date, series_name, series_index, format,
created_at, updated_at
FROM book_metadata WHERE media_id = ?1",
[&media_id_str],
|row| {
Ok((
row.get::<_, Option<String>>(0)?,
row.get::<_, Option<String>>(1)?,
row.get::<_, Option<String>>(2)?,
row.get::<_, Option<String>>(3)?,
row.get::<_, Option<i32>>(4)?,
row.get::<_, Option<String>>(5)?,
row.get::<_, Option<String>>(6)?,
row.get::<_, Option<f64>>(7)?,
row.get::<_, Option<String>>(8)?,
row.get::<_, String>(9)?,
row.get::<_, String>(10)?,
))
},
)
.optional()?;
if metadata_row.is_none() {
return Ok::<_, rusqlite::Error>(None);
}
let (
isbn,
isbn13,
publisher,
language,
page_count,
publication_date,
series_name,
series_index,
format,
created_at,
updated_at,
) = metadata_row.unwrap();
// Get authors
let mut stmt = conn.prepare(
"SELECT author_name, author_sort, role, position
FROM book_authors WHERE media_id = ?1 ORDER BY position",
)?;
let authors: Vec<crate::model::AuthorInfo> = stmt
.query_map([&media_id_str], |row| {
Ok(crate::model::AuthorInfo {
name: row.get(0)?,
file_as: row.get(1)?,
role: row.get(2)?,
position: row.get(3)?,
})
})?
.collect::<rusqlite::Result<Vec<_>>>()?;
// Get identifiers
let mut stmt = conn.prepare(
"SELECT identifier_type, identifier_value
FROM book_identifiers WHERE media_id = ?1",
)?;
let mut identifiers: std::collections::HashMap<String, Vec<String>> =
std::collections::HashMap::new();
for row in stmt.query_map([&media_id_str], |row| {
Ok((row.get::<_, String>(0)?, row.get::<_, String>(1)?))
})? {
let (id_type, value) = row?;
identifiers.entry(id_type).or_default().push(value);
}
let parsed_date = publication_date
.and_then(|d| chrono::NaiveDate::parse_from_str(&d, "%Y-%m-%d").ok());
Ok(Some(crate::model::BookMetadata {
media_id,
isbn,
isbn13,
publisher,
language,
page_count,
publication_date: parsed_date,
series_name,
series_index,
format,
authors,
identifiers,
created_at: chrono::DateTime::parse_from_rfc3339(&created_at)
.unwrap()
.with_timezone(&chrono::Utc),
updated_at: chrono::DateTime::parse_from_rfc3339(&updated_at)
.unwrap()
.with_timezone(&chrono::Utc),
}))
});
Ok(
tokio::time::timeout(std::time::Duration::from_secs(10), fut)
.await
.map_err(|_| PinakesError::Database("get_book_metadata timed out".into()))?
.map_err(|e: tokio::task::JoinError| PinakesError::Database(e.to_string()))??,
)
}
async fn add_book_author(
&self,
media_id: MediaId,
author: &crate::model::AuthorInfo,
) -> Result<()> {
let conn = self.conn.clone();
let media_id_str = media_id.to_string();
let author_clone = author.clone();
let fut = tokio::task::spawn_blocking(move || {
let conn = conn.lock().unwrap();
conn.execute(
"INSERT INTO book_authors (media_id, author_name, author_sort, role, position)
VALUES (?1, ?2, ?3, ?4, ?5)
ON CONFLICT(media_id, author_name, role) DO UPDATE SET
author_sort = ?3, position = ?5",
rusqlite::params![
media_id_str,
author_clone.name,
author_clone.file_as,
author_clone.role,
author_clone.position
],
)?;
Ok::<_, rusqlite::Error>(())
});
tokio::time::timeout(std::time::Duration::from_secs(5), fut)
.await
.map_err(|_| PinakesError::Database("add_book_author timed out".into()))?
.map_err(|e: tokio::task::JoinError| PinakesError::Database(e.to_string()))??;
Ok(())
}
async fn get_book_authors(&self, media_id: MediaId) -> Result<Vec<crate::model::AuthorInfo>> {
let conn = self.conn.clone();
let media_id_str = media_id.to_string();
let fut = tokio::task::spawn_blocking(move || {
let conn = conn.lock().unwrap();
let mut stmt = conn.prepare(
"SELECT author_name, author_sort, role, position
FROM book_authors WHERE media_id = ?1 ORDER BY position",
)?;
let authors: Vec<crate::model::AuthorInfo> = stmt
.query_map([&media_id_str], |row| {
Ok(crate::model::AuthorInfo {
name: row.get(0)?,
file_as: row.get(1)?,
role: row.get(2)?,
position: row.get(3)?,
})
})?
.collect::<rusqlite::Result<Vec<_>>>()?;
Ok::<_, rusqlite::Error>(authors)
});
Ok(tokio::time::timeout(std::time::Duration::from_secs(5), fut)
.await
.map_err(|_| PinakesError::Database("get_book_authors timed out".into()))?
.map_err(|e: tokio::task::JoinError| PinakesError::Database(e.to_string()))??)
}
async fn list_all_authors(&self, pagination: &Pagination) -> Result<Vec<(String, u64)>> {
let conn = self.conn.clone();
let offset = pagination.offset;
let limit = pagination.limit;
let fut = tokio::task::spawn_blocking(move || {
let conn = conn.lock().unwrap();
let mut stmt = conn.prepare(
"SELECT author_name, COUNT(DISTINCT media_id) as book_count
FROM book_authors
GROUP BY author_name
ORDER BY book_count DESC, author_name
LIMIT ?1 OFFSET ?2",
)?;
let authors: Vec<(String, u64)> = stmt
.query_map([limit as i64, offset as i64], |row| {
Ok((row.get(0)?, row.get::<_, i64>(1)? as u64))
})?
.collect::<rusqlite::Result<Vec<_>>>()?;
Ok::<_, rusqlite::Error>(authors)
});
Ok(
tokio::time::timeout(std::time::Duration::from_secs(10), fut)
.await
.map_err(|_| PinakesError::Database("list_all_authors timed out".into()))?
.map_err(|e: tokio::task::JoinError| PinakesError::Database(e.to_string()))??,
)
}
async fn list_series(&self) -> Result<Vec<(String, u64)>> {
let conn = self.conn.clone();
let fut = tokio::task::spawn_blocking(move || {
let conn = conn.lock().unwrap();
let mut stmt = conn.prepare(
"SELECT series_name, COUNT(*) as book_count
FROM book_metadata
WHERE series_name IS NOT NULL
GROUP BY series_name
ORDER BY series_name",
)?;
let series: Vec<(String, u64)> = stmt
.query_map([], |row| Ok((row.get(0)?, row.get::<_, i64>(1)? as u64)))?
.collect::<rusqlite::Result<Vec<_>>>()?;
Ok::<_, rusqlite::Error>(series)
});
Ok(
tokio::time::timeout(std::time::Duration::from_secs(10), fut)
.await
.map_err(|_| PinakesError::Database("list_series timed out".into()))?
.map_err(|e: tokio::task::JoinError| PinakesError::Database(e.to_string()))??,
)
}
async fn get_series_books(&self, series_name: &str) -> Result<Vec<MediaItem>> {
let conn = self.conn.clone();
let series = series_name.to_string();
let fut = tokio::task::spawn_blocking(move || {
let conn = conn.lock().unwrap();
let mut stmt = conn.prepare(
"SELECT m.id, m.path, m.file_name, m.media_type, m.content_hash,
m.file_size, m.title, m.artist, m.album, m.genre, m.year,
m.duration_secs, m.description, m.thumbnail_path, m.file_mtime,
m.created_at, m.updated_at
FROM media_items m
INNER JOIN book_metadata b ON m.id = b.media_id
WHERE b.series_name = ?1
ORDER BY b.series_index, m.title",
)?;
let items = stmt
.query_map([&series], row_to_media_item)?
.collect::<rusqlite::Result<Vec<_>>>()?;
Ok::<_, rusqlite::Error>(items)
});
Ok(
tokio::time::timeout(std::time::Duration::from_secs(10), fut)
.await
.map_err(|_| PinakesError::Database("get_series_books timed out".into()))?
.map_err(|e: tokio::task::JoinError| PinakesError::Database(e.to_string()))??,
)
}
async fn update_reading_progress(
&self,
user_id: uuid::Uuid,
media_id: MediaId,
current_page: i32,
) -> Result<()> {
// Reuse watch_history table: progress_secs stores current page for books
let conn = self.conn.clone();
let user_id_str = user_id.to_string();
let media_id_str = media_id.to_string();
let fut = tokio::task::spawn_blocking(move || {
let conn = conn.lock().unwrap();
conn.execute(
"INSERT INTO watch_history (user_id, media_id, progress_secs, last_watched_at)
VALUES (?1, ?2, ?3, datetime('now'))
ON CONFLICT(user_id, media_id) DO UPDATE SET
progress_secs = ?3, last_watched_at = datetime('now')",
rusqlite::params![user_id_str, media_id_str, current_page as f64],
)?;
Ok::<_, rusqlite::Error>(())
});
tokio::time::timeout(std::time::Duration::from_secs(5), fut)
.await
.map_err(|_| PinakesError::Database("update_reading_progress timed out".into()))?
.map_err(|e: tokio::task::JoinError| PinakesError::Database(e.to_string()))??;
Ok(())
}
async fn get_reading_progress(
&self,
user_id: uuid::Uuid,
media_id: MediaId,
) -> Result<Option<crate::model::ReadingProgress>> {
let conn = self.conn.clone();
let user_id_str = user_id.to_string();
let media_id_str = media_id.to_string();
let fut = tokio::task::spawn_blocking(move || {
let conn = conn.lock().unwrap();
let result = conn
.query_row(
"SELECT wh.progress_secs, bm.page_count, wh.last_watched_at
FROM watch_history wh
LEFT JOIN book_metadata bm ON wh.media_id = bm.media_id
WHERE wh.user_id = ?1 AND wh.media_id = ?2",
[&user_id_str, &media_id_str],
|row| {
let current_page = row.get::<_, f64>(0)? as i32;
let total_pages = row.get::<_, Option<i32>>(1)?;
let last_read_str = row.get::<_, String>(2)?;
Ok((current_page, total_pages, last_read_str))
},
)
.optional()?;
Ok::<_, rusqlite::Error>(result.map(|(current_page, total_pages, last_read_str)| {
crate::model::ReadingProgress {
media_id,
user_id,
current_page,
total_pages,
progress_percent: if let Some(total) = total_pages {
if total > 0 {
(current_page as f64 / total as f64 * 100.0).min(100.0)
} else {
0.0
}
} else {
0.0
},
last_read_at: chrono::DateTime::parse_from_rfc3339(&last_read_str)
.unwrap()
.with_timezone(&chrono::Utc),
}
}))
});
Ok(tokio::time::timeout(std::time::Duration::from_secs(5), fut)
.await
.map_err(|_| PinakesError::Database("get_reading_progress timed out".into()))?
.map_err(|e: tokio::task::JoinError| PinakesError::Database(e.to_string()))??)
}
async fn get_reading_list(
&self,
_user_id: uuid::Uuid,
_status: Option<crate::model::ReadingStatus>,
) -> Result<Vec<MediaItem>> {
// TODO: Implement reading list with explicit status tracking
// For now, return empty list as this requires additional schema
Ok(Vec::new())
}
#[allow(clippy::too_many_arguments)]
async fn search_books(
&self,
isbn: Option<&str>,
author: Option<&str>,
series: Option<&str>,
publisher: Option<&str>,
language: Option<&str>,
pagination: &Pagination,
) -> Result<Vec<MediaItem>> {
let conn = self.conn.clone();
let isbn = isbn.map(String::from);
let author = author.map(String::from);
let series = series.map(String::from);
let publisher = publisher.map(String::from);
let language = language.map(String::from);
let offset = pagination.offset;
let limit = pagination.limit;
let fut = tokio::task::spawn_blocking(move || {
let conn = conn.lock().unwrap();
let mut query = String::from(
"SELECT DISTINCT m.id, m.path, m.file_name, m.media_type, m.content_hash,
m.file_size, m.title, m.artist, m.album, m.genre, m.year,
m.duration_secs, m.description, m.thumbnail_path, m.file_mtime,
m.created_at, m.updated_at
FROM media_items m
INNER JOIN book_metadata bm ON m.id = bm.media_id",
);
let mut conditions = Vec::new();
let mut params: Vec<Box<dyn rusqlite::ToSql>> = Vec::new();
if let Some(ref i) = isbn {
conditions.push("(bm.isbn = ? OR bm.isbn13 = ?)");
params.push(Box::new(i.clone()));
params.push(Box::new(i.clone()));
}
if let Some(ref a) = author {
query.push_str(" INNER JOIN book_authors ba ON m.id = ba.media_id");
conditions.push("ba.author_name LIKE ?");
params.push(Box::new(format!("%{}%", a)));
}
if let Some(ref s) = series {
conditions.push("bm.series_name LIKE ?");
params.push(Box::new(format!("%{}%", s)));
}
if let Some(ref p) = publisher {
conditions.push("bm.publisher LIKE ?");
params.push(Box::new(format!("%{}%", p)));
}
if let Some(ref l) = language {
conditions.push("bm.language = ?");
params.push(Box::new(l.clone()));
}
if !conditions.is_empty() {
query.push_str(" WHERE ");
query.push_str(&conditions.join(" AND "));
}
query.push_str(" ORDER BY m.title LIMIT ? OFFSET ?");
params.push(Box::new(limit as i64));
params.push(Box::new(offset as i64));
let params_refs: Vec<&dyn rusqlite::ToSql> =
params.iter().map(|p| p.as_ref()).collect();
let mut stmt = conn.prepare(&query)?;
let items = stmt
.query_map(&*params_refs, row_to_media_item)?
.collect::<rusqlite::Result<Vec<_>>>()?;
Ok::<_, rusqlite::Error>(items)
});
Ok(
tokio::time::timeout(std::time::Duration::from_secs(10), fut)
.await
.map_err(|_| PinakesError::Database("search_books timed out".into()))?
.map_err(|e: tokio::task::JoinError| PinakesError::Database(e.to_string()))??,
)
}
}
// Needed for `query_row(...).optional()`