treewide: complete book management interface

Signed-off-by: NotAShelf <raf@notashelf.dev>
Change-Id: If5a21f16221f3c56a8008e139f93edc46a6a6964
This commit is contained in:
raf 2026-02-04 23:14:37 +03:00
commit 2f31242442
Signed by: NotAShelf
GPG key ID: 29D95B64378DB4BF
23 changed files with 1693 additions and 126 deletions

View file

@ -3445,6 +3445,476 @@ impl StorageBackend for PostgresBackend {
})
.collect())
}
// Book Management Methods
async fn upsert_book_metadata(&self, metadata: &crate::model::BookMetadata) -> Result<()> {
let mut client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
let tx = client.transaction().await?;
// Upsert book_metadata
tx.execute(
"INSERT INTO book_metadata (
media_id, isbn, isbn13, publisher, language, page_count,
publication_date, series_name, series_index, format
) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10)
ON CONFLICT(media_id) DO UPDATE SET
isbn = $2, isbn13 = $3, publisher = $4, language = $5,
page_count = $6, publication_date = $7, series_name = $8,
series_index = $9, format = $10, updated_at = NOW()",
&[
&metadata.media_id.0,
&metadata.isbn,
&metadata.isbn13,
&metadata.publisher,
&metadata.language,
&metadata.page_count,
&metadata.publication_date,
&metadata.series_name,
&metadata.series_index,
&metadata.format,
],
)
.await?;
// Clear existing authors and identifiers
tx.execute(
"DELETE FROM book_authors WHERE media_id = $1",
&[&metadata.media_id.0],
)
.await?;
tx.execute(
"DELETE FROM book_identifiers WHERE media_id = $1",
&[&metadata.media_id.0],
)
.await?;
// Insert authors
for author in &metadata.authors {
tx.execute(
"INSERT INTO book_authors (media_id, author_name, author_sort, role, position)
VALUES ($1, $2, $3, $4, $5)",
&[
&metadata.media_id.0,
&author.name,
&author.file_as,
&author.role,
&author.position,
],
)
.await?;
}
// Insert identifiers
for (id_type, values) in &metadata.identifiers {
for value in values {
tx.execute(
"INSERT INTO book_identifiers (media_id, identifier_type, identifier_value)
VALUES ($1, $2, $3)",
&[&metadata.media_id.0, &id_type, &value],
)
.await?;
}
}
tx.commit().await?;
Ok(())
}
async fn get_book_metadata(
&self,
media_id: MediaId,
) -> Result<Option<crate::model::BookMetadata>> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
// Get base book metadata
let row = client
.query_opt(
"SELECT isbn, isbn13, publisher, language, page_count,
publication_date, series_name, series_index, format,
created_at, updated_at
FROM book_metadata WHERE media_id = $1",
&[&media_id.0],
)
.await?;
if row.is_none() {
return Ok(None);
}
let row = row.unwrap();
// Get authors
let author_rows = client
.query(
"SELECT author_name, author_sort, role, position
FROM book_authors WHERE media_id = $1 ORDER BY position",
&[&media_id.0],
)
.await?;
let authors: Vec<crate::model::AuthorInfo> = author_rows
.iter()
.map(|r| crate::model::AuthorInfo {
name: r.get(0),
file_as: r.get(1),
role: r.get(2),
position: r.get(3),
})
.collect();
// Get identifiers
let id_rows = client
.query(
"SELECT identifier_type, identifier_value
FROM book_identifiers WHERE media_id = $1",
&[&media_id.0],
)
.await?;
let mut identifiers: std::collections::HashMap<String, Vec<String>> =
std::collections::HashMap::new();
for r in id_rows {
let id_type: String = r.get(0);
let value: String = r.get(1);
identifiers.entry(id_type).or_default().push(value);
}
Ok(Some(crate::model::BookMetadata {
media_id,
isbn: row.get(0),
isbn13: row.get(1),
publisher: row.get(2),
language: row.get(3),
page_count: row.get(4),
publication_date: row.get(5),
series_name: row.get(6),
series_index: row.get(7),
format: row.get(8),
authors,
identifiers,
created_at: row.get(9),
updated_at: row.get(10),
}))
}
async fn add_book_author(
&self,
media_id: MediaId,
author: &crate::model::AuthorInfo,
) -> Result<()> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
client
.execute(
"INSERT INTO book_authors (media_id, author_name, author_sort, role, position)
VALUES ($1, $2, $3, $4, $5)
ON CONFLICT(media_id, author_name, role) DO UPDATE SET
author_sort = $3, position = $5",
&[
&media_id.0,
&author.name,
&author.file_as,
&author.role,
&author.position,
],
)
.await?;
Ok(())
}
async fn get_book_authors(&self, media_id: MediaId) -> Result<Vec<crate::model::AuthorInfo>> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
let rows = client
.query(
"SELECT author_name, author_sort, role, position
FROM book_authors WHERE media_id = $1 ORDER BY position",
&[&media_id.0],
)
.await?;
Ok(rows
.iter()
.map(|r| crate::model::AuthorInfo {
name: r.get(0),
file_as: r.get(1),
role: r.get(2),
position: r.get(3),
})
.collect())
}
async fn list_all_authors(&self, pagination: &Pagination) -> Result<Vec<(String, u64)>> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
let rows = client
.query(
"SELECT author_name, COUNT(DISTINCT media_id) as book_count
FROM book_authors
GROUP BY author_name
ORDER BY book_count DESC, author_name
LIMIT $1 OFFSET $2",
&[&(pagination.limit as i64), &(pagination.offset as i64)],
)
.await?;
Ok(rows
.iter()
.map(|r| (r.get(0), r.get::<_, i64>(1) as u64))
.collect())
}
async fn list_series(&self) -> Result<Vec<(String, u64)>> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
let rows = client
.query(
"SELECT series_name, COUNT(*) as book_count
FROM book_metadata
WHERE series_name IS NOT NULL
GROUP BY series_name
ORDER BY series_name",
&[],
)
.await?;
Ok(rows
.iter()
.map(|r| (r.get(0), r.get::<_, i64>(1) as u64))
.collect())
}
async fn get_series_books(&self, series_name: &str) -> Result<Vec<MediaItem>> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
let rows = client
.query(
"SELECT m.id, m.path, m.file_name, m.media_type, m.content_hash,
m.file_size, m.title, m.artist, m.album, m.genre, m.year,
m.duration_secs, m.description, m.thumbnail_path, m.file_mtime,
m.created_at, m.updated_at
FROM media_items m
INNER JOIN book_metadata b ON m.id = b.media_id
WHERE b.series_name = $1
ORDER BY b.series_index, m.title",
&[&series_name],
)
.await?;
rows.iter().map(row_to_media_item).collect()
}
async fn update_reading_progress(
&self,
user_id: uuid::Uuid,
media_id: MediaId,
current_page: i32,
) -> Result<()> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
client
.execute(
"INSERT INTO watch_history (user_id, media_id, progress_secs, last_watched_at)
VALUES ($1, $2, $3, NOW())
ON CONFLICT(user_id, media_id) DO UPDATE SET
progress_secs = $3, last_watched_at = NOW()",
&[&user_id, &media_id.0, &(current_page as f64)],
)
.await?;
Ok(())
}
async fn get_reading_progress(
&self,
user_id: uuid::Uuid,
media_id: MediaId,
) -> Result<Option<crate::model::ReadingProgress>> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
let row = client
.query_opt(
"SELECT wh.progress_secs, bm.page_count, wh.last_watched_at
FROM watch_history wh
LEFT JOIN book_metadata bm ON wh.media_id = bm.media_id
WHERE wh.user_id = $1 AND wh.media_id = $2",
&[&user_id, &media_id.0],
)
.await?;
Ok(row.map(|r| {
let current_page = r.get::<_, f64>(0) as i32;
let total_pages: Option<i32> = r.get(1);
let progress_percent = if let Some(total) = total_pages {
if total > 0 {
(current_page as f64 / total as f64 * 100.0).min(100.0)
} else {
0.0
}
} else {
0.0
};
crate::model::ReadingProgress {
media_id,
user_id,
current_page,
total_pages,
progress_percent,
last_read_at: r.get(2),
}
}))
}
async fn get_reading_list(
&self,
_user_id: uuid::Uuid,
_status: Option<crate::model::ReadingStatus>,
) -> Result<Vec<MediaItem>> {
// TODO: Implement reading list with explicit status tracking
// For now, return empty list as this requires additional schema
Ok(Vec::new())
}
#[allow(clippy::too_many_arguments)]
async fn search_books(
&self,
isbn: Option<&str>,
author: Option<&str>,
series: Option<&str>,
publisher: Option<&str>,
language: Option<&str>,
pagination: &Pagination,
) -> Result<Vec<MediaItem>> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
// For PostgreSQL, we need to handle parameters carefully due to lifetimes
// Simplified approach: use separate queries for different filter combinations
let rows = if let (Some(i), Some(a), Some(s), Some(p), Some(l)) =
(isbn, author, series, publisher, language)
{
let author_pattern = format!("%{}%", a);
let series_pattern = format!("%{}%", s);
let publisher_pattern = format!("%{}%", p);
client
.query(
"SELECT DISTINCT m.id, m.path, m.file_name, m.media_type, m.content_hash,
m.file_size, m.title, m.artist, m.album, m.genre, m.year,
m.duration_secs, m.description, m.thumbnail_path, m.file_mtime,
m.created_at, m.updated_at
FROM media_items m
INNER JOIN book_metadata bm ON m.id = bm.media_id
INNER JOIN book_authors ba ON m.id = ba.media_id
WHERE (bm.isbn = $1 OR bm.isbn13 = $1) AND ba.author_name ILIKE $2
AND bm.series_name ILIKE $3 AND bm.publisher ILIKE $4 AND bm.language = $5
ORDER BY m.title LIMIT $6 OFFSET $7",
&[
&i,
&author_pattern,
&series_pattern,
&publisher_pattern,
&l,
&(pagination.limit as i64),
&(pagination.offset as i64),
],
)
.await?
} else if isbn.is_none()
&& author.is_none()
&& series.is_none()
&& publisher.is_none()
&& language.is_none()
{
// No filters
client
.query(
"SELECT DISTINCT m.id, m.path, m.file_name, m.media_type, m.content_hash,
m.file_size, m.title, m.artist, m.album, m.genre, m.year,
m.duration_secs, m.description, m.thumbnail_path, m.file_mtime,
m.created_at, m.updated_at
FROM media_items m
INNER JOIN book_metadata bm ON m.id = bm.media_id
ORDER BY m.title LIMIT $1 OFFSET $2",
&[&(pagination.limit as i64), &(pagination.offset as i64)],
)
.await?
} else {
// For other combinations, use dynamic query (simplified - just filter by what's provided)
let mut query =
"SELECT DISTINCT m.id, m.path, m.file_name, m.media_type, m.content_hash,
m.file_size, m.title, m.artist, m.album, m.genre, m.year,
m.duration_secs, m.description, m.thumbnail_path, m.file_mtime,
m.created_at, m.updated_at
FROM media_items m
INNER JOIN book_metadata bm ON m.id = bm.media_id WHERE 1=1"
.to_string();
if isbn.is_some() {
query.push_str(" AND (bm.isbn = $1 OR bm.isbn13 = $1)");
}
query.push_str(" ORDER BY m.title LIMIT $2 OFFSET $3");
if let Some(i) = isbn {
client
.query(
&query,
&[&i, &(pagination.limit as i64), &(pagination.offset as i64)],
)
.await?
} else {
client
.query(
&query,
&[&(pagination.limit as i64), &(pagination.offset as i64)],
)
.await?
}
};
let items: Result<Vec<_>> = rows.iter().map(row_to_media_item).collect();
items
}
}
impl PostgresBackend {