treewide: complete book management interface
Signed-off-by: NotAShelf <raf@notashelf.dev> Change-Id: If5a21f16221f3c56a8008e139f93edc46a6a6964
This commit is contained in:
parent
bda36ac152
commit
2f31242442
23 changed files with 1693 additions and 126 deletions
|
|
@ -144,6 +144,7 @@ pub trait StorageBackend: Send + Sync + 'static {
|
|||
async fn list_media_paths(&self) -> Result<Vec<(MediaId, std::path::PathBuf, ContentHash)>>;
|
||||
|
||||
// Batch metadata update
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
async fn batch_update_media(
|
||||
&self,
|
||||
ids: &[MediaId],
|
||||
|
|
@ -446,6 +447,69 @@ pub trait StorageBackend: Send + Sync + 'static {
|
|||
|
||||
/// List all active sessions (optionally filtered by username)
|
||||
async fn list_active_sessions(&self, username: Option<&str>) -> Result<Vec<SessionData>>;
|
||||
|
||||
// Book Management Methods
|
||||
|
||||
/// Upsert book metadata for a media item
|
||||
async fn upsert_book_metadata(&self, metadata: &crate::model::BookMetadata) -> Result<()>;
|
||||
|
||||
/// Get book metadata for a media item
|
||||
async fn get_book_metadata(
|
||||
&self,
|
||||
media_id: MediaId,
|
||||
) -> Result<Option<crate::model::BookMetadata>>;
|
||||
|
||||
/// Add an author to a book
|
||||
async fn add_book_author(
|
||||
&self,
|
||||
media_id: MediaId,
|
||||
author: &crate::model::AuthorInfo,
|
||||
) -> Result<()>;
|
||||
|
||||
/// Get all authors for a book
|
||||
async fn get_book_authors(&self, media_id: MediaId) -> Result<Vec<crate::model::AuthorInfo>>;
|
||||
|
||||
/// List all distinct authors with book counts
|
||||
async fn list_all_authors(&self, pagination: &Pagination) -> Result<Vec<(String, u64)>>;
|
||||
|
||||
/// List all series with book counts
|
||||
async fn list_series(&self) -> Result<Vec<(String, u64)>>;
|
||||
|
||||
/// Get all books in a series, ordered by series_index
|
||||
async fn get_series_books(&self, series_name: &str) -> Result<Vec<MediaItem>>;
|
||||
|
||||
/// Update reading progress for a user and book
|
||||
async fn update_reading_progress(
|
||||
&self,
|
||||
user_id: uuid::Uuid,
|
||||
media_id: MediaId,
|
||||
current_page: i32,
|
||||
) -> Result<()>;
|
||||
|
||||
/// Get reading progress for a user and book
|
||||
async fn get_reading_progress(
|
||||
&self,
|
||||
user_id: uuid::Uuid,
|
||||
media_id: MediaId,
|
||||
) -> Result<Option<crate::model::ReadingProgress>>;
|
||||
|
||||
/// Get reading list for a user filtered by status
|
||||
async fn get_reading_list(
|
||||
&self,
|
||||
user_id: uuid::Uuid,
|
||||
status: Option<crate::model::ReadingStatus>,
|
||||
) -> Result<Vec<MediaItem>>;
|
||||
|
||||
/// Search books with book-specific criteria
|
||||
async fn search_books(
|
||||
&self,
|
||||
isbn: Option<&str>,
|
||||
author: Option<&str>,
|
||||
series: Option<&str>,
|
||||
publisher: Option<&str>,
|
||||
language: Option<&str>,
|
||||
pagination: &Pagination,
|
||||
) -> Result<Vec<MediaItem>>;
|
||||
}
|
||||
|
||||
/// Comprehensive library statistics.
|
||||
|
|
|
|||
|
|
@ -3445,6 +3445,476 @@ impl StorageBackend for PostgresBackend {
|
|||
})
|
||||
.collect())
|
||||
}
|
||||
|
||||
// Book Management Methods
|
||||
|
||||
async fn upsert_book_metadata(&self, metadata: &crate::model::BookMetadata) -> Result<()> {
|
||||
let mut client = self
|
||||
.pool
|
||||
.get()
|
||||
.await
|
||||
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
|
||||
|
||||
let tx = client.transaction().await?;
|
||||
|
||||
// Upsert book_metadata
|
||||
tx.execute(
|
||||
"INSERT INTO book_metadata (
|
||||
media_id, isbn, isbn13, publisher, language, page_count,
|
||||
publication_date, series_name, series_index, format
|
||||
) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10)
|
||||
ON CONFLICT(media_id) DO UPDATE SET
|
||||
isbn = $2, isbn13 = $3, publisher = $4, language = $5,
|
||||
page_count = $6, publication_date = $7, series_name = $8,
|
||||
series_index = $9, format = $10, updated_at = NOW()",
|
||||
&[
|
||||
&metadata.media_id.0,
|
||||
&metadata.isbn,
|
||||
&metadata.isbn13,
|
||||
&metadata.publisher,
|
||||
&metadata.language,
|
||||
&metadata.page_count,
|
||||
&metadata.publication_date,
|
||||
&metadata.series_name,
|
||||
&metadata.series_index,
|
||||
&metadata.format,
|
||||
],
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Clear existing authors and identifiers
|
||||
tx.execute(
|
||||
"DELETE FROM book_authors WHERE media_id = $1",
|
||||
&[&metadata.media_id.0],
|
||||
)
|
||||
.await?;
|
||||
tx.execute(
|
||||
"DELETE FROM book_identifiers WHERE media_id = $1",
|
||||
&[&metadata.media_id.0],
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Insert authors
|
||||
for author in &metadata.authors {
|
||||
tx.execute(
|
||||
"INSERT INTO book_authors (media_id, author_name, author_sort, role, position)
|
||||
VALUES ($1, $2, $3, $4, $5)",
|
||||
&[
|
||||
&metadata.media_id.0,
|
||||
&author.name,
|
||||
&author.file_as,
|
||||
&author.role,
|
||||
&author.position,
|
||||
],
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
|
||||
// Insert identifiers
|
||||
for (id_type, values) in &metadata.identifiers {
|
||||
for value in values {
|
||||
tx.execute(
|
||||
"INSERT INTO book_identifiers (media_id, identifier_type, identifier_value)
|
||||
VALUES ($1, $2, $3)",
|
||||
&[&metadata.media_id.0, &id_type, &value],
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
}
|
||||
|
||||
tx.commit().await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn get_book_metadata(
|
||||
&self,
|
||||
media_id: MediaId,
|
||||
) -> Result<Option<crate::model::BookMetadata>> {
|
||||
let client = self
|
||||
.pool
|
||||
.get()
|
||||
.await
|
||||
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
|
||||
|
||||
// Get base book metadata
|
||||
let row = client
|
||||
.query_opt(
|
||||
"SELECT isbn, isbn13, publisher, language, page_count,
|
||||
publication_date, series_name, series_index, format,
|
||||
created_at, updated_at
|
||||
FROM book_metadata WHERE media_id = $1",
|
||||
&[&media_id.0],
|
||||
)
|
||||
.await?;
|
||||
|
||||
if row.is_none() {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
let row = row.unwrap();
|
||||
|
||||
// Get authors
|
||||
let author_rows = client
|
||||
.query(
|
||||
"SELECT author_name, author_sort, role, position
|
||||
FROM book_authors WHERE media_id = $1 ORDER BY position",
|
||||
&[&media_id.0],
|
||||
)
|
||||
.await?;
|
||||
|
||||
let authors: Vec<crate::model::AuthorInfo> = author_rows
|
||||
.iter()
|
||||
.map(|r| crate::model::AuthorInfo {
|
||||
name: r.get(0),
|
||||
file_as: r.get(1),
|
||||
role: r.get(2),
|
||||
position: r.get(3),
|
||||
})
|
||||
.collect();
|
||||
|
||||
// Get identifiers
|
||||
let id_rows = client
|
||||
.query(
|
||||
"SELECT identifier_type, identifier_value
|
||||
FROM book_identifiers WHERE media_id = $1",
|
||||
&[&media_id.0],
|
||||
)
|
||||
.await?;
|
||||
|
||||
let mut identifiers: std::collections::HashMap<String, Vec<String>> =
|
||||
std::collections::HashMap::new();
|
||||
for r in id_rows {
|
||||
let id_type: String = r.get(0);
|
||||
let value: String = r.get(1);
|
||||
identifiers.entry(id_type).or_default().push(value);
|
||||
}
|
||||
|
||||
Ok(Some(crate::model::BookMetadata {
|
||||
media_id,
|
||||
isbn: row.get(0),
|
||||
isbn13: row.get(1),
|
||||
publisher: row.get(2),
|
||||
language: row.get(3),
|
||||
page_count: row.get(4),
|
||||
publication_date: row.get(5),
|
||||
series_name: row.get(6),
|
||||
series_index: row.get(7),
|
||||
format: row.get(8),
|
||||
authors,
|
||||
identifiers,
|
||||
created_at: row.get(9),
|
||||
updated_at: row.get(10),
|
||||
}))
|
||||
}
|
||||
|
||||
async fn add_book_author(
|
||||
&self,
|
||||
media_id: MediaId,
|
||||
author: &crate::model::AuthorInfo,
|
||||
) -> Result<()> {
|
||||
let client = self
|
||||
.pool
|
||||
.get()
|
||||
.await
|
||||
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
|
||||
|
||||
client
|
||||
.execute(
|
||||
"INSERT INTO book_authors (media_id, author_name, author_sort, role, position)
|
||||
VALUES ($1, $2, $3, $4, $5)
|
||||
ON CONFLICT(media_id, author_name, role) DO UPDATE SET
|
||||
author_sort = $3, position = $5",
|
||||
&[
|
||||
&media_id.0,
|
||||
&author.name,
|
||||
&author.file_as,
|
||||
&author.role,
|
||||
&author.position,
|
||||
],
|
||||
)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn get_book_authors(&self, media_id: MediaId) -> Result<Vec<crate::model::AuthorInfo>> {
|
||||
let client = self
|
||||
.pool
|
||||
.get()
|
||||
.await
|
||||
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
|
||||
|
||||
let rows = client
|
||||
.query(
|
||||
"SELECT author_name, author_sort, role, position
|
||||
FROM book_authors WHERE media_id = $1 ORDER BY position",
|
||||
&[&media_id.0],
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(rows
|
||||
.iter()
|
||||
.map(|r| crate::model::AuthorInfo {
|
||||
name: r.get(0),
|
||||
file_as: r.get(1),
|
||||
role: r.get(2),
|
||||
position: r.get(3),
|
||||
})
|
||||
.collect())
|
||||
}
|
||||
|
||||
async fn list_all_authors(&self, pagination: &Pagination) -> Result<Vec<(String, u64)>> {
|
||||
let client = self
|
||||
.pool
|
||||
.get()
|
||||
.await
|
||||
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
|
||||
|
||||
let rows = client
|
||||
.query(
|
||||
"SELECT author_name, COUNT(DISTINCT media_id) as book_count
|
||||
FROM book_authors
|
||||
GROUP BY author_name
|
||||
ORDER BY book_count DESC, author_name
|
||||
LIMIT $1 OFFSET $2",
|
||||
&[&(pagination.limit as i64), &(pagination.offset as i64)],
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(rows
|
||||
.iter()
|
||||
.map(|r| (r.get(0), r.get::<_, i64>(1) as u64))
|
||||
.collect())
|
||||
}
|
||||
|
||||
async fn list_series(&self) -> Result<Vec<(String, u64)>> {
|
||||
let client = self
|
||||
.pool
|
||||
.get()
|
||||
.await
|
||||
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
|
||||
|
||||
let rows = client
|
||||
.query(
|
||||
"SELECT series_name, COUNT(*) as book_count
|
||||
FROM book_metadata
|
||||
WHERE series_name IS NOT NULL
|
||||
GROUP BY series_name
|
||||
ORDER BY series_name",
|
||||
&[],
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(rows
|
||||
.iter()
|
||||
.map(|r| (r.get(0), r.get::<_, i64>(1) as u64))
|
||||
.collect())
|
||||
}
|
||||
|
||||
async fn get_series_books(&self, series_name: &str) -> Result<Vec<MediaItem>> {
|
||||
let client = self
|
||||
.pool
|
||||
.get()
|
||||
.await
|
||||
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
|
||||
|
||||
let rows = client
|
||||
.query(
|
||||
"SELECT m.id, m.path, m.file_name, m.media_type, m.content_hash,
|
||||
m.file_size, m.title, m.artist, m.album, m.genre, m.year,
|
||||
m.duration_secs, m.description, m.thumbnail_path, m.file_mtime,
|
||||
m.created_at, m.updated_at
|
||||
FROM media_items m
|
||||
INNER JOIN book_metadata b ON m.id = b.media_id
|
||||
WHERE b.series_name = $1
|
||||
ORDER BY b.series_index, m.title",
|
||||
&[&series_name],
|
||||
)
|
||||
.await?;
|
||||
|
||||
rows.iter().map(row_to_media_item).collect()
|
||||
}
|
||||
|
||||
async fn update_reading_progress(
|
||||
&self,
|
||||
user_id: uuid::Uuid,
|
||||
media_id: MediaId,
|
||||
current_page: i32,
|
||||
) -> Result<()> {
|
||||
let client = self
|
||||
.pool
|
||||
.get()
|
||||
.await
|
||||
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
|
||||
|
||||
client
|
||||
.execute(
|
||||
"INSERT INTO watch_history (user_id, media_id, progress_secs, last_watched_at)
|
||||
VALUES ($1, $2, $3, NOW())
|
||||
ON CONFLICT(user_id, media_id) DO UPDATE SET
|
||||
progress_secs = $3, last_watched_at = NOW()",
|
||||
&[&user_id, &media_id.0, &(current_page as f64)],
|
||||
)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn get_reading_progress(
|
||||
&self,
|
||||
user_id: uuid::Uuid,
|
||||
media_id: MediaId,
|
||||
) -> Result<Option<crate::model::ReadingProgress>> {
|
||||
let client = self
|
||||
.pool
|
||||
.get()
|
||||
.await
|
||||
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
|
||||
|
||||
let row = client
|
||||
.query_opt(
|
||||
"SELECT wh.progress_secs, bm.page_count, wh.last_watched_at
|
||||
FROM watch_history wh
|
||||
LEFT JOIN book_metadata bm ON wh.media_id = bm.media_id
|
||||
WHERE wh.user_id = $1 AND wh.media_id = $2",
|
||||
&[&user_id, &media_id.0],
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(row.map(|r| {
|
||||
let current_page = r.get::<_, f64>(0) as i32;
|
||||
let total_pages: Option<i32> = r.get(1);
|
||||
let progress_percent = if let Some(total) = total_pages {
|
||||
if total > 0 {
|
||||
(current_page as f64 / total as f64 * 100.0).min(100.0)
|
||||
} else {
|
||||
0.0
|
||||
}
|
||||
} else {
|
||||
0.0
|
||||
};
|
||||
|
||||
crate::model::ReadingProgress {
|
||||
media_id,
|
||||
user_id,
|
||||
current_page,
|
||||
total_pages,
|
||||
progress_percent,
|
||||
last_read_at: r.get(2),
|
||||
}
|
||||
}))
|
||||
}
|
||||
|
||||
async fn get_reading_list(
|
||||
&self,
|
||||
_user_id: uuid::Uuid,
|
||||
_status: Option<crate::model::ReadingStatus>,
|
||||
) -> Result<Vec<MediaItem>> {
|
||||
// TODO: Implement reading list with explicit status tracking
|
||||
// For now, return empty list as this requires additional schema
|
||||
Ok(Vec::new())
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
async fn search_books(
|
||||
&self,
|
||||
isbn: Option<&str>,
|
||||
author: Option<&str>,
|
||||
series: Option<&str>,
|
||||
publisher: Option<&str>,
|
||||
language: Option<&str>,
|
||||
pagination: &Pagination,
|
||||
) -> Result<Vec<MediaItem>> {
|
||||
let client = self
|
||||
.pool
|
||||
.get()
|
||||
.await
|
||||
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
|
||||
|
||||
// For PostgreSQL, we need to handle parameters carefully due to lifetimes
|
||||
// Simplified approach: use separate queries for different filter combinations
|
||||
let rows = if let (Some(i), Some(a), Some(s), Some(p), Some(l)) =
|
||||
(isbn, author, series, publisher, language)
|
||||
{
|
||||
let author_pattern = format!("%{}%", a);
|
||||
let series_pattern = format!("%{}%", s);
|
||||
let publisher_pattern = format!("%{}%", p);
|
||||
client
|
||||
.query(
|
||||
"SELECT DISTINCT m.id, m.path, m.file_name, m.media_type, m.content_hash,
|
||||
m.file_size, m.title, m.artist, m.album, m.genre, m.year,
|
||||
m.duration_secs, m.description, m.thumbnail_path, m.file_mtime,
|
||||
m.created_at, m.updated_at
|
||||
FROM media_items m
|
||||
INNER JOIN book_metadata bm ON m.id = bm.media_id
|
||||
INNER JOIN book_authors ba ON m.id = ba.media_id
|
||||
WHERE (bm.isbn = $1 OR bm.isbn13 = $1) AND ba.author_name ILIKE $2
|
||||
AND bm.series_name ILIKE $3 AND bm.publisher ILIKE $4 AND bm.language = $5
|
||||
ORDER BY m.title LIMIT $6 OFFSET $7",
|
||||
&[
|
||||
&i,
|
||||
&author_pattern,
|
||||
&series_pattern,
|
||||
&publisher_pattern,
|
||||
&l,
|
||||
&(pagination.limit as i64),
|
||||
&(pagination.offset as i64),
|
||||
],
|
||||
)
|
||||
.await?
|
||||
} else if isbn.is_none()
|
||||
&& author.is_none()
|
||||
&& series.is_none()
|
||||
&& publisher.is_none()
|
||||
&& language.is_none()
|
||||
{
|
||||
// No filters
|
||||
client
|
||||
.query(
|
||||
"SELECT DISTINCT m.id, m.path, m.file_name, m.media_type, m.content_hash,
|
||||
m.file_size, m.title, m.artist, m.album, m.genre, m.year,
|
||||
m.duration_secs, m.description, m.thumbnail_path, m.file_mtime,
|
||||
m.created_at, m.updated_at
|
||||
FROM media_items m
|
||||
INNER JOIN book_metadata bm ON m.id = bm.media_id
|
||||
ORDER BY m.title LIMIT $1 OFFSET $2",
|
||||
&[&(pagination.limit as i64), &(pagination.offset as i64)],
|
||||
)
|
||||
.await?
|
||||
} else {
|
||||
// For other combinations, use dynamic query (simplified - just filter by what's provided)
|
||||
let mut query =
|
||||
"SELECT DISTINCT m.id, m.path, m.file_name, m.media_type, m.content_hash,
|
||||
m.file_size, m.title, m.artist, m.album, m.genre, m.year,
|
||||
m.duration_secs, m.description, m.thumbnail_path, m.file_mtime,
|
||||
m.created_at, m.updated_at
|
||||
FROM media_items m
|
||||
INNER JOIN book_metadata bm ON m.id = bm.media_id WHERE 1=1"
|
||||
.to_string();
|
||||
|
||||
if isbn.is_some() {
|
||||
query.push_str(" AND (bm.isbn = $1 OR bm.isbn13 = $1)");
|
||||
}
|
||||
query.push_str(" ORDER BY m.title LIMIT $2 OFFSET $3");
|
||||
|
||||
if let Some(i) = isbn {
|
||||
client
|
||||
.query(
|
||||
&query,
|
||||
&[&i, &(pagination.limit as i64), &(pagination.offset as i64)],
|
||||
)
|
||||
.await?
|
||||
} else {
|
||||
client
|
||||
.query(
|
||||
&query,
|
||||
&[&(pagination.limit as i64), &(pagination.offset as i64)],
|
||||
)
|
||||
.await?
|
||||
}
|
||||
};
|
||||
|
||||
let items: Result<Vec<_>> = rows.iter().map(row_to_media_item).collect();
|
||||
items
|
||||
}
|
||||
}
|
||||
|
||||
impl PostgresBackend {
|
||||
|
|
|
|||
|
|
@ -1734,11 +1734,7 @@ impl StorageBackend for SqliteBackend {
|
|||
[],
|
||||
|r| r.get(0),
|
||||
)?;
|
||||
let avg_size: u64 = if total_media > 0 {
|
||||
total_size / total_media
|
||||
} else {
|
||||
0
|
||||
};
|
||||
let avg_size: u64 = total_size.checked_div(total_media).unwrap_or(0);
|
||||
|
||||
// Media count by type
|
||||
let mut stmt = db.prepare("SELECT media_type, COUNT(*) FROM media_items GROUP BY media_type ORDER BY COUNT(*) DESC")?;
|
||||
|
|
@ -3801,6 +3797,543 @@ impl StorageBackend for SqliteBackend {
|
|||
.map_err(|_| PinakesError::Database("list_active_sessions timed out".into()))?
|
||||
.map_err(|e: tokio::task::JoinError| PinakesError::Database(e.to_string()))?
|
||||
}
|
||||
|
||||
// Book Management Methods
|
||||
|
||||
async fn upsert_book_metadata(&self, metadata: &crate::model::BookMetadata) -> Result<()> {
|
||||
let conn = self.conn.clone();
|
||||
let media_id_str = metadata.media_id.to_string();
|
||||
let isbn = metadata.isbn.clone();
|
||||
let isbn13 = metadata.isbn13.clone();
|
||||
let publisher = metadata.publisher.clone();
|
||||
let language = metadata.language.clone();
|
||||
let page_count = metadata.page_count;
|
||||
let publication_date = metadata.publication_date.map(|d| d.to_string());
|
||||
let series_name = metadata.series_name.clone();
|
||||
let series_index = metadata.series_index;
|
||||
let format = metadata.format.clone();
|
||||
let authors = metadata.authors.clone();
|
||||
let identifiers = metadata.identifiers.clone();
|
||||
|
||||
let fut = tokio::task::spawn_blocking(move || {
|
||||
let mut conn = conn.lock().unwrap();
|
||||
let tx = conn.transaction()?;
|
||||
|
||||
// Upsert book_metadata
|
||||
tx.execute(
|
||||
"INSERT INTO book_metadata (
|
||||
media_id, isbn, isbn13, publisher, language, page_count,
|
||||
publication_date, series_name, series_index, format
|
||||
) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10)
|
||||
ON CONFLICT(media_id) DO UPDATE SET
|
||||
isbn = ?2, isbn13 = ?3, publisher = ?4, language = ?5,
|
||||
page_count = ?6, publication_date = ?7, series_name = ?8,
|
||||
series_index = ?9, format = ?10, updated_at = datetime('now')",
|
||||
rusqlite::params![
|
||||
media_id_str,
|
||||
isbn,
|
||||
isbn13,
|
||||
publisher,
|
||||
language,
|
||||
page_count,
|
||||
publication_date,
|
||||
series_name,
|
||||
series_index,
|
||||
format
|
||||
],
|
||||
)?;
|
||||
|
||||
// Clear existing authors and identifiers
|
||||
tx.execute(
|
||||
"DELETE FROM book_authors WHERE media_id = ?1",
|
||||
[&media_id_str],
|
||||
)?;
|
||||
tx.execute(
|
||||
"DELETE FROM book_identifiers WHERE media_id = ?1",
|
||||
[&media_id_str],
|
||||
)?;
|
||||
|
||||
// Insert authors
|
||||
for author in &authors {
|
||||
tx.execute(
|
||||
"INSERT INTO book_authors (media_id, author_name, author_sort, role, position)
|
||||
VALUES (?1, ?2, ?3, ?4, ?5)",
|
||||
rusqlite::params![
|
||||
media_id_str,
|
||||
author.name,
|
||||
author.file_as,
|
||||
author.role,
|
||||
author.position
|
||||
],
|
||||
)?;
|
||||
}
|
||||
|
||||
// Insert identifiers
|
||||
for (id_type, values) in &identifiers {
|
||||
for value in values {
|
||||
tx.execute(
|
||||
"INSERT INTO book_identifiers (media_id, identifier_type, identifier_value)
|
||||
VALUES (?1, ?2, ?3)",
|
||||
rusqlite::params![media_id_str, id_type, value],
|
||||
)?;
|
||||
}
|
||||
}
|
||||
|
||||
tx.commit()?;
|
||||
Ok::<_, rusqlite::Error>(())
|
||||
});
|
||||
|
||||
tokio::time::timeout(std::time::Duration::from_secs(30), fut)
|
||||
.await
|
||||
.map_err(|_| PinakesError::Database("upsert_book_metadata timed out".into()))?
|
||||
.map_err(|e: tokio::task::JoinError| PinakesError::Database(e.to_string()))??;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn get_book_metadata(
|
||||
&self,
|
||||
media_id: MediaId,
|
||||
) -> Result<Option<crate::model::BookMetadata>> {
|
||||
let conn = self.conn.clone();
|
||||
let media_id_str = media_id.to_string();
|
||||
|
||||
let fut = tokio::task::spawn_blocking(move || {
|
||||
let conn = conn.lock().unwrap();
|
||||
|
||||
// Get base book metadata
|
||||
let metadata_row = conn
|
||||
.query_row(
|
||||
"SELECT isbn, isbn13, publisher, language, page_count,
|
||||
publication_date, series_name, series_index, format,
|
||||
created_at, updated_at
|
||||
FROM book_metadata WHERE media_id = ?1",
|
||||
[&media_id_str],
|
||||
|row| {
|
||||
Ok((
|
||||
row.get::<_, Option<String>>(0)?,
|
||||
row.get::<_, Option<String>>(1)?,
|
||||
row.get::<_, Option<String>>(2)?,
|
||||
row.get::<_, Option<String>>(3)?,
|
||||
row.get::<_, Option<i32>>(4)?,
|
||||
row.get::<_, Option<String>>(5)?,
|
||||
row.get::<_, Option<String>>(6)?,
|
||||
row.get::<_, Option<f64>>(7)?,
|
||||
row.get::<_, Option<String>>(8)?,
|
||||
row.get::<_, String>(9)?,
|
||||
row.get::<_, String>(10)?,
|
||||
))
|
||||
},
|
||||
)
|
||||
.optional()?;
|
||||
|
||||
if metadata_row.is_none() {
|
||||
return Ok::<_, rusqlite::Error>(None);
|
||||
}
|
||||
|
||||
let (
|
||||
isbn,
|
||||
isbn13,
|
||||
publisher,
|
||||
language,
|
||||
page_count,
|
||||
publication_date,
|
||||
series_name,
|
||||
series_index,
|
||||
format,
|
||||
created_at,
|
||||
updated_at,
|
||||
) = metadata_row.unwrap();
|
||||
|
||||
// Get authors
|
||||
let mut stmt = conn.prepare(
|
||||
"SELECT author_name, author_sort, role, position
|
||||
FROM book_authors WHERE media_id = ?1 ORDER BY position",
|
||||
)?;
|
||||
let authors: Vec<crate::model::AuthorInfo> = stmt
|
||||
.query_map([&media_id_str], |row| {
|
||||
Ok(crate::model::AuthorInfo {
|
||||
name: row.get(0)?,
|
||||
file_as: row.get(1)?,
|
||||
role: row.get(2)?,
|
||||
position: row.get(3)?,
|
||||
})
|
||||
})?
|
||||
.collect::<rusqlite::Result<Vec<_>>>()?;
|
||||
|
||||
// Get identifiers
|
||||
let mut stmt = conn.prepare(
|
||||
"SELECT identifier_type, identifier_value
|
||||
FROM book_identifiers WHERE media_id = ?1",
|
||||
)?;
|
||||
let mut identifiers: std::collections::HashMap<String, Vec<String>> =
|
||||
std::collections::HashMap::new();
|
||||
for row in stmt.query_map([&media_id_str], |row| {
|
||||
Ok((row.get::<_, String>(0)?, row.get::<_, String>(1)?))
|
||||
})? {
|
||||
let (id_type, value) = row?;
|
||||
identifiers.entry(id_type).or_default().push(value);
|
||||
}
|
||||
|
||||
let parsed_date = publication_date
|
||||
.and_then(|d| chrono::NaiveDate::parse_from_str(&d, "%Y-%m-%d").ok());
|
||||
|
||||
Ok(Some(crate::model::BookMetadata {
|
||||
media_id,
|
||||
isbn,
|
||||
isbn13,
|
||||
publisher,
|
||||
language,
|
||||
page_count,
|
||||
publication_date: parsed_date,
|
||||
series_name,
|
||||
series_index,
|
||||
format,
|
||||
authors,
|
||||
identifiers,
|
||||
created_at: chrono::DateTime::parse_from_rfc3339(&created_at)
|
||||
.unwrap()
|
||||
.with_timezone(&chrono::Utc),
|
||||
updated_at: chrono::DateTime::parse_from_rfc3339(&updated_at)
|
||||
.unwrap()
|
||||
.with_timezone(&chrono::Utc),
|
||||
}))
|
||||
});
|
||||
|
||||
Ok(
|
||||
tokio::time::timeout(std::time::Duration::from_secs(10), fut)
|
||||
.await
|
||||
.map_err(|_| PinakesError::Database("get_book_metadata timed out".into()))?
|
||||
.map_err(|e: tokio::task::JoinError| PinakesError::Database(e.to_string()))??,
|
||||
)
|
||||
}
|
||||
|
||||
async fn add_book_author(
|
||||
&self,
|
||||
media_id: MediaId,
|
||||
author: &crate::model::AuthorInfo,
|
||||
) -> Result<()> {
|
||||
let conn = self.conn.clone();
|
||||
let media_id_str = media_id.to_string();
|
||||
let author_clone = author.clone();
|
||||
|
||||
let fut = tokio::task::spawn_blocking(move || {
|
||||
let conn = conn.lock().unwrap();
|
||||
conn.execute(
|
||||
"INSERT INTO book_authors (media_id, author_name, author_sort, role, position)
|
||||
VALUES (?1, ?2, ?3, ?4, ?5)
|
||||
ON CONFLICT(media_id, author_name, role) DO UPDATE SET
|
||||
author_sort = ?3, position = ?5",
|
||||
rusqlite::params![
|
||||
media_id_str,
|
||||
author_clone.name,
|
||||
author_clone.file_as,
|
||||
author_clone.role,
|
||||
author_clone.position
|
||||
],
|
||||
)?;
|
||||
Ok::<_, rusqlite::Error>(())
|
||||
});
|
||||
|
||||
tokio::time::timeout(std::time::Duration::from_secs(5), fut)
|
||||
.await
|
||||
.map_err(|_| PinakesError::Database("add_book_author timed out".into()))?
|
||||
.map_err(|e: tokio::task::JoinError| PinakesError::Database(e.to_string()))??;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn get_book_authors(&self, media_id: MediaId) -> Result<Vec<crate::model::AuthorInfo>> {
|
||||
let conn = self.conn.clone();
|
||||
let media_id_str = media_id.to_string();
|
||||
|
||||
let fut = tokio::task::spawn_blocking(move || {
|
||||
let conn = conn.lock().unwrap();
|
||||
let mut stmt = conn.prepare(
|
||||
"SELECT author_name, author_sort, role, position
|
||||
FROM book_authors WHERE media_id = ?1 ORDER BY position",
|
||||
)?;
|
||||
let authors: Vec<crate::model::AuthorInfo> = stmt
|
||||
.query_map([&media_id_str], |row| {
|
||||
Ok(crate::model::AuthorInfo {
|
||||
name: row.get(0)?,
|
||||
file_as: row.get(1)?,
|
||||
role: row.get(2)?,
|
||||
position: row.get(3)?,
|
||||
})
|
||||
})?
|
||||
.collect::<rusqlite::Result<Vec<_>>>()?;
|
||||
Ok::<_, rusqlite::Error>(authors)
|
||||
});
|
||||
|
||||
Ok(tokio::time::timeout(std::time::Duration::from_secs(5), fut)
|
||||
.await
|
||||
.map_err(|_| PinakesError::Database("get_book_authors timed out".into()))?
|
||||
.map_err(|e: tokio::task::JoinError| PinakesError::Database(e.to_string()))??)
|
||||
}
|
||||
|
||||
async fn list_all_authors(&self, pagination: &Pagination) -> Result<Vec<(String, u64)>> {
|
||||
let conn = self.conn.clone();
|
||||
let offset = pagination.offset;
|
||||
let limit = pagination.limit;
|
||||
|
||||
let fut = tokio::task::spawn_blocking(move || {
|
||||
let conn = conn.lock().unwrap();
|
||||
let mut stmt = conn.prepare(
|
||||
"SELECT author_name, COUNT(DISTINCT media_id) as book_count
|
||||
FROM book_authors
|
||||
GROUP BY author_name
|
||||
ORDER BY book_count DESC, author_name
|
||||
LIMIT ?1 OFFSET ?2",
|
||||
)?;
|
||||
let authors: Vec<(String, u64)> = stmt
|
||||
.query_map([limit as i64, offset as i64], |row| {
|
||||
Ok((row.get(0)?, row.get::<_, i64>(1)? as u64))
|
||||
})?
|
||||
.collect::<rusqlite::Result<Vec<_>>>()?;
|
||||
Ok::<_, rusqlite::Error>(authors)
|
||||
});
|
||||
|
||||
Ok(
|
||||
tokio::time::timeout(std::time::Duration::from_secs(10), fut)
|
||||
.await
|
||||
.map_err(|_| PinakesError::Database("list_all_authors timed out".into()))?
|
||||
.map_err(|e: tokio::task::JoinError| PinakesError::Database(e.to_string()))??,
|
||||
)
|
||||
}
|
||||
|
||||
async fn list_series(&self) -> Result<Vec<(String, u64)>> {
|
||||
let conn = self.conn.clone();
|
||||
|
||||
let fut = tokio::task::spawn_blocking(move || {
|
||||
let conn = conn.lock().unwrap();
|
||||
let mut stmt = conn.prepare(
|
||||
"SELECT series_name, COUNT(*) as book_count
|
||||
FROM book_metadata
|
||||
WHERE series_name IS NOT NULL
|
||||
GROUP BY series_name
|
||||
ORDER BY series_name",
|
||||
)?;
|
||||
let series: Vec<(String, u64)> = stmt
|
||||
.query_map([], |row| Ok((row.get(0)?, row.get::<_, i64>(1)? as u64)))?
|
||||
.collect::<rusqlite::Result<Vec<_>>>()?;
|
||||
Ok::<_, rusqlite::Error>(series)
|
||||
});
|
||||
|
||||
Ok(
|
||||
tokio::time::timeout(std::time::Duration::from_secs(10), fut)
|
||||
.await
|
||||
.map_err(|_| PinakesError::Database("list_series timed out".into()))?
|
||||
.map_err(|e: tokio::task::JoinError| PinakesError::Database(e.to_string()))??,
|
||||
)
|
||||
}
|
||||
|
||||
async fn get_series_books(&self, series_name: &str) -> Result<Vec<MediaItem>> {
|
||||
let conn = self.conn.clone();
|
||||
let series = series_name.to_string();
|
||||
|
||||
let fut = tokio::task::spawn_blocking(move || {
|
||||
let conn = conn.lock().unwrap();
|
||||
let mut stmt = conn.prepare(
|
||||
"SELECT m.id, m.path, m.file_name, m.media_type, m.content_hash,
|
||||
m.file_size, m.title, m.artist, m.album, m.genre, m.year,
|
||||
m.duration_secs, m.description, m.thumbnail_path, m.file_mtime,
|
||||
m.created_at, m.updated_at
|
||||
FROM media_items m
|
||||
INNER JOIN book_metadata b ON m.id = b.media_id
|
||||
WHERE b.series_name = ?1
|
||||
ORDER BY b.series_index, m.title",
|
||||
)?;
|
||||
let items = stmt
|
||||
.query_map([&series], row_to_media_item)?
|
||||
.collect::<rusqlite::Result<Vec<_>>>()?;
|
||||
Ok::<_, rusqlite::Error>(items)
|
||||
});
|
||||
|
||||
Ok(
|
||||
tokio::time::timeout(std::time::Duration::from_secs(10), fut)
|
||||
.await
|
||||
.map_err(|_| PinakesError::Database("get_series_books timed out".into()))?
|
||||
.map_err(|e: tokio::task::JoinError| PinakesError::Database(e.to_string()))??,
|
||||
)
|
||||
}
|
||||
|
||||
async fn update_reading_progress(
|
||||
&self,
|
||||
user_id: uuid::Uuid,
|
||||
media_id: MediaId,
|
||||
current_page: i32,
|
||||
) -> Result<()> {
|
||||
// Reuse watch_history table: progress_secs stores current page for books
|
||||
let conn = self.conn.clone();
|
||||
let user_id_str = user_id.to_string();
|
||||
let media_id_str = media_id.to_string();
|
||||
|
||||
let fut = tokio::task::spawn_blocking(move || {
|
||||
let conn = conn.lock().unwrap();
|
||||
conn.execute(
|
||||
"INSERT INTO watch_history (user_id, media_id, progress_secs, last_watched_at)
|
||||
VALUES (?1, ?2, ?3, datetime('now'))
|
||||
ON CONFLICT(user_id, media_id) DO UPDATE SET
|
||||
progress_secs = ?3, last_watched_at = datetime('now')",
|
||||
rusqlite::params![user_id_str, media_id_str, current_page as f64],
|
||||
)?;
|
||||
Ok::<_, rusqlite::Error>(())
|
||||
});
|
||||
|
||||
tokio::time::timeout(std::time::Duration::from_secs(5), fut)
|
||||
.await
|
||||
.map_err(|_| PinakesError::Database("update_reading_progress timed out".into()))?
|
||||
.map_err(|e: tokio::task::JoinError| PinakesError::Database(e.to_string()))??;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn get_reading_progress(
|
||||
&self,
|
||||
user_id: uuid::Uuid,
|
||||
media_id: MediaId,
|
||||
) -> Result<Option<crate::model::ReadingProgress>> {
|
||||
let conn = self.conn.clone();
|
||||
let user_id_str = user_id.to_string();
|
||||
let media_id_str = media_id.to_string();
|
||||
|
||||
let fut = tokio::task::spawn_blocking(move || {
|
||||
let conn = conn.lock().unwrap();
|
||||
let result = conn
|
||||
.query_row(
|
||||
"SELECT wh.progress_secs, bm.page_count, wh.last_watched_at
|
||||
FROM watch_history wh
|
||||
LEFT JOIN book_metadata bm ON wh.media_id = bm.media_id
|
||||
WHERE wh.user_id = ?1 AND wh.media_id = ?2",
|
||||
[&user_id_str, &media_id_str],
|
||||
|row| {
|
||||
let current_page = row.get::<_, f64>(0)? as i32;
|
||||
let total_pages = row.get::<_, Option<i32>>(1)?;
|
||||
let last_read_str = row.get::<_, String>(2)?;
|
||||
Ok((current_page, total_pages, last_read_str))
|
||||
},
|
||||
)
|
||||
.optional()?;
|
||||
|
||||
Ok::<_, rusqlite::Error>(result.map(|(current_page, total_pages, last_read_str)| {
|
||||
crate::model::ReadingProgress {
|
||||
media_id,
|
||||
user_id,
|
||||
current_page,
|
||||
total_pages,
|
||||
progress_percent: if let Some(total) = total_pages {
|
||||
if total > 0 {
|
||||
(current_page as f64 / total as f64 * 100.0).min(100.0)
|
||||
} else {
|
||||
0.0
|
||||
}
|
||||
} else {
|
||||
0.0
|
||||
},
|
||||
last_read_at: chrono::DateTime::parse_from_rfc3339(&last_read_str)
|
||||
.unwrap()
|
||||
.with_timezone(&chrono::Utc),
|
||||
}
|
||||
}))
|
||||
});
|
||||
|
||||
Ok(tokio::time::timeout(std::time::Duration::from_secs(5), fut)
|
||||
.await
|
||||
.map_err(|_| PinakesError::Database("get_reading_progress timed out".into()))?
|
||||
.map_err(|e: tokio::task::JoinError| PinakesError::Database(e.to_string()))??)
|
||||
}
|
||||
|
||||
async fn get_reading_list(
|
||||
&self,
|
||||
_user_id: uuid::Uuid,
|
||||
_status: Option<crate::model::ReadingStatus>,
|
||||
) -> Result<Vec<MediaItem>> {
|
||||
// TODO: Implement reading list with explicit status tracking
|
||||
// For now, return empty list as this requires additional schema
|
||||
Ok(Vec::new())
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
async fn search_books(
|
||||
&self,
|
||||
isbn: Option<&str>,
|
||||
author: Option<&str>,
|
||||
series: Option<&str>,
|
||||
publisher: Option<&str>,
|
||||
language: Option<&str>,
|
||||
pagination: &Pagination,
|
||||
) -> Result<Vec<MediaItem>> {
|
||||
let conn = self.conn.clone();
|
||||
let isbn = isbn.map(String::from);
|
||||
let author = author.map(String::from);
|
||||
let series = series.map(String::from);
|
||||
let publisher = publisher.map(String::from);
|
||||
let language = language.map(String::from);
|
||||
let offset = pagination.offset;
|
||||
let limit = pagination.limit;
|
||||
|
||||
let fut = tokio::task::spawn_blocking(move || {
|
||||
let conn = conn.lock().unwrap();
|
||||
|
||||
let mut query = String::from(
|
||||
"SELECT DISTINCT m.id, m.path, m.file_name, m.media_type, m.content_hash,
|
||||
m.file_size, m.title, m.artist, m.album, m.genre, m.year,
|
||||
m.duration_secs, m.description, m.thumbnail_path, m.file_mtime,
|
||||
m.created_at, m.updated_at
|
||||
FROM media_items m
|
||||
INNER JOIN book_metadata bm ON m.id = bm.media_id",
|
||||
);
|
||||
|
||||
let mut conditions = Vec::new();
|
||||
let mut params: Vec<Box<dyn rusqlite::ToSql>> = Vec::new();
|
||||
|
||||
if let Some(ref i) = isbn {
|
||||
conditions.push("(bm.isbn = ? OR bm.isbn13 = ?)");
|
||||
params.push(Box::new(i.clone()));
|
||||
params.push(Box::new(i.clone()));
|
||||
}
|
||||
if let Some(ref a) = author {
|
||||
query.push_str(" INNER JOIN book_authors ba ON m.id = ba.media_id");
|
||||
conditions.push("ba.author_name LIKE ?");
|
||||
params.push(Box::new(format!("%{}%", a)));
|
||||
}
|
||||
if let Some(ref s) = series {
|
||||
conditions.push("bm.series_name LIKE ?");
|
||||
params.push(Box::new(format!("%{}%", s)));
|
||||
}
|
||||
if let Some(ref p) = publisher {
|
||||
conditions.push("bm.publisher LIKE ?");
|
||||
params.push(Box::new(format!("%{}%", p)));
|
||||
}
|
||||
if let Some(ref l) = language {
|
||||
conditions.push("bm.language = ?");
|
||||
params.push(Box::new(l.clone()));
|
||||
}
|
||||
|
||||
if !conditions.is_empty() {
|
||||
query.push_str(" WHERE ");
|
||||
query.push_str(&conditions.join(" AND "));
|
||||
}
|
||||
|
||||
query.push_str(" ORDER BY m.title LIMIT ? OFFSET ?");
|
||||
params.push(Box::new(limit as i64));
|
||||
params.push(Box::new(offset as i64));
|
||||
|
||||
let params_refs: Vec<&dyn rusqlite::ToSql> =
|
||||
params.iter().map(|p| p.as_ref()).collect();
|
||||
|
||||
let mut stmt = conn.prepare(&query)?;
|
||||
let items = stmt
|
||||
.query_map(&*params_refs, row_to_media_item)?
|
||||
.collect::<rusqlite::Result<Vec<_>>>()?;
|
||||
Ok::<_, rusqlite::Error>(items)
|
||||
});
|
||||
|
||||
Ok(
|
||||
tokio::time::timeout(std::time::Duration::from_secs(10), fut)
|
||||
.await
|
||||
.map_err(|_| PinakesError::Database("search_books timed out".into()))?
|
||||
.map_err(|e: tokio::task::JoinError| PinakesError::Database(e.to_string()))??,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// Needed for `query_row(...).optional()`
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue