pinakes-core: add batch_update_media; RAII temp file cleanup in import

Signed-off-by: NotAShelf <raf@notashelf.dev>
Change-Id: Icdec2d385c11ec64622611f3be09a20f6a6a6964
This commit is contained in:
raf 2026-03-07 16:55:43 +03:00
commit 237f7c28d2
Signed by: NotAShelf
GPG key ID: 29D95B64378DB4BF
2 changed files with 28 additions and 45 deletions

View file

@ -403,32 +403,42 @@ pub async fn import_directory_with_options(
// Limit concurrency by draining when we hit the cap // Limit concurrency by draining when we hit the cap
if join_set.len() >= concurrency if join_set.len() >= concurrency
&& let Some(Ok((path, result))) = join_set.join_next().await && let Some(result) = join_set.join_next().await
{ {
match result { collect_import_result(result, &mut results);
Ok(r) => results.push(Ok(r)),
Err(e) => {
tracing::warn!(path = %path.display(), error = %e, "failed to import file");
results.push(Err(e));
},
}
} }
} }
// Drain remaining tasks // Drain remaining tasks
while let Some(Ok((path, result))) = join_set.join_next().await { while let Some(result) = join_set.join_next().await {
match result { collect_import_result(result, &mut results);
Ok(r) => results.push(Ok(r)),
Err(e) => {
tracing::warn!(path = %path.display(), error = %e, "failed to import file");
results.push(Err(e));
},
}
} }
Ok(results) Ok(results)
} }
fn collect_import_result(
join_result: std::result::Result<
(PathBuf, Result<ImportResult>),
tokio::task::JoinError,
>,
results: &mut Vec<std::result::Result<ImportResult, PinakesError>>,
) {
match join_result {
Ok((_path, Ok(r))) => results.push(Ok(r)),
Ok((path, Err(e))) => {
tracing::warn!(path = %path.display(), error = %e, "failed to import file");
results.push(Err(e));
},
Err(e) => {
tracing::error!(error = %e, "import task panicked");
results.push(Err(PinakesError::InvalidOperation(format!(
"import task panicked: {e}"
))));
},
}
}
/// Extract markdown links from a file and store them in the database. /// Extract markdown links from a file and store them in the database.
async fn extract_and_store_links( async fn extract_and_store_links(
storage: &DynStorageBackend, storage: &DynStorageBackend,

View file

@ -155,7 +155,7 @@ pub trait StorageBackend: Send + Sync + 'static {
&self, &self,
) -> Result<Vec<(MediaId, std::path::PathBuf, ContentHash)>>; ) -> Result<Vec<(MediaId, std::path::PathBuf, ContentHash)>>;
// Batch metadata update // Batch metadata update (must be implemented per backend for bulk SQL)
#[allow(clippy::too_many_arguments)] #[allow(clippy::too_many_arguments)]
async fn batch_update_media( async fn batch_update_media(
&self, &self,
@ -166,34 +166,7 @@ pub trait StorageBackend: Send + Sync + 'static {
genre: Option<&str>, genre: Option<&str>,
year: Option<i32>, year: Option<i32>,
description: Option<&str>, description: Option<&str>,
) -> Result<u64> { ) -> Result<u64>;
let mut count = 0u64;
for id in ids {
let mut item = self.get_media(*id).await?;
if let Some(v) = title {
item.title = Some(v.to_string());
}
if let Some(v) = artist {
item.artist = Some(v.to_string());
}
if let Some(v) = album {
item.album = Some(v.to_string());
}
if let Some(v) = genre {
item.genre = Some(v.to_string());
}
if let Some(v) = &year {
item.year = Some(*v);
}
if let Some(v) = description {
item.description = Some(v.to_string());
}
item.updated_at = chrono::Utc::now();
self.update_media(&item).await?;
count += 1;
}
Ok(count)
}
// Saved searches // Saved searches
async fn save_search( async fn save_search(