various: inherit workspace lints in all crates; eliminate unwrap()

throughout

Signed-off-by: NotAShelf <raf@notashelf.dev>
Change-Id: Id8de9d65139ec4cf4cdeaee14c8c95b06a6a6964
This commit is contained in:
raf 2026-03-07 16:55:43 +03:00
commit b8ff35acea
Signed by: NotAShelf
GPG key ID: 29D95B64378DB4BF
12 changed files with 514 additions and 239 deletions

View file

@ -46,5 +46,8 @@ image_hasher = { workspace = true }
pinakes-plugin-api.workspace = true
wasmtime.workspace = true
[lints]
workspace = true
[dev-dependencies]
tempfile = "3.25.0"

View file

@ -132,7 +132,9 @@ pub fn parse_author_file_as(name: &str) -> String {
1 => parts[0].to_string(),
_ => {
// Last part is surname, rest is given names
let surname = parts.last().unwrap();
let Some(surname) = parts.last() else {
return String::new();
};
let given_names = parts[..parts.len() - 1].join(" ");
format!("{}, {}", surname, given_names)
},

View file

@ -117,7 +117,10 @@ impl MetadataEnricher for MusicBrainzEnricher {
return Ok(None);
}
let recording = &recordings.unwrap()[0];
let Some(recordings) = recordings else {
return Ok(None);
};
let recording = &recordings[0];
let external_id = recording
.get("id")
.and_then(|id| id.as_str())

View file

@ -89,7 +89,10 @@ impl MetadataEnricher for TmdbEnricher {
return Ok(None);
}
let movie = &results.unwrap()[0];
let Some(results) = results else {
return Ok(None);
};
let movie = &results[0];
let external_id = match movie.get("id").and_then(|id| id.as_i64()) {
Some(id) => id.to_string(),
None => return Ok(None),

View file

@ -77,17 +77,22 @@ pub fn detect_events(
return Ok(Vec::new());
}
// Sort by date_taken
items.sort_by_key(|a| a.date_taken.unwrap());
// Sort by date_taken (None < Some, but all are Some after retain)
items.sort_by_key(|a| a.date_taken);
let mut events: Vec<DetectedEvent> = Vec::new();
let Some(first_date) = items[0].date_taken else {
return Ok(Vec::new());
};
let mut current_event_items: Vec<MediaId> = vec![items[0].id];
let mut current_start_time = items[0].date_taken.unwrap();
let mut current_last_time = items[0].date_taken.unwrap();
let mut current_start_time = first_date;
let mut current_last_time = first_date;
let mut current_location = items[0].latitude.zip(items[0].longitude);
for item in items.iter().skip(1) {
let item_time = item.date_taken.unwrap();
let Some(item_time) = item.date_taken else {
continue;
};
let time_gap = (item_time - current_last_time).num_seconds();
// Check time gap
@ -180,15 +185,20 @@ pub fn detect_bursts(
return Ok(Vec::new());
}
// Sort by date_taken
items.sort_by_key(|a| a.date_taken.unwrap());
// Sort by date_taken (None < Some, but all are Some after retain)
items.sort_by_key(|a| a.date_taken);
let mut bursts: Vec<Vec<MediaId>> = Vec::new();
let Some(first_date) = items[0].date_taken else {
return Ok(Vec::new());
};
let mut current_burst: Vec<MediaId> = vec![items[0].id];
let mut last_time = items[0].date_taken.unwrap();
let mut last_time = first_date;
for item in items.iter().skip(1) {
let item_time = item.date_taken.unwrap();
let Some(item_time) = item.date_taken else {
continue;
};
let gap = (item_time - last_time).num_seconds();
if gap <= max_gap_secs {

View file

@ -8,13 +8,26 @@
//! - Link resolution strategies
//! - Context extraction for backlink previews
use std::path::Path;
use std::{path::Path, sync::LazyLock};
use regex::Regex;
use uuid::Uuid;
use crate::model::{LinkType, MarkdownLink, MediaId};
// Compile regexes once at startup to avoid recompilation on every call
static WIKILINK_RE: LazyLock<Regex> = LazyLock::new(|| {
Regex::new(r"\[\[([^\]|]+)(?:\|([^\]]+))?\]\]").expect("valid wikilink regex")
});
static EMBED_RE: LazyLock<Regex> = LazyLock::new(|| {
Regex::new(r"!\[\[([^\]|]+)(?:\|([^\]]+))?\]\]").expect("valid embed regex")
});
static MARKDOWN_LINK_RE: LazyLock<Regex> = LazyLock::new(|| {
Regex::new(r"\[([^\]]+)\]\(([^)]+)\)").expect("valid markdown link regex")
});
/// Configuration for context extraction around links
const CONTEXT_CHARS_BEFORE: usize = 50;
const CONTEXT_CHARS_AFTER: usize = 50;
@ -50,13 +63,13 @@ fn extract_wikilinks(
source_media_id: MediaId,
content: &str,
) -> Vec<MarkdownLink> {
// Match [[...]] - we'll manually filter out embeds that are preceded by !
let re = Regex::new(r"\[\[([^\]|]+)(?:\|([^\]]+))?\]\]").unwrap();
let mut links = Vec::new();
for (line_num, line) in content.lines().enumerate() {
for cap in re.captures_iter(line) {
let full_match = cap.get(0).unwrap();
for cap in WIKILINK_RE.captures_iter(line) {
let Some(full_match) = cap.get(0) else {
continue;
};
let match_start = full_match.start();
// Check if preceded by ! (which would make it an embed, not a wikilink)
@ -67,7 +80,10 @@ fn extract_wikilinks(
}
}
let target = cap.get(1).unwrap().as_str().trim();
let Some(target_match) = cap.get(1) else {
continue;
};
let target = target_match.as_str().trim();
let display_text = cap.get(2).map(|m| m.as_str().trim().to_string());
let context = extract_context(
@ -100,13 +116,17 @@ fn extract_embeds(
source_media_id: MediaId,
content: &str,
) -> Vec<MarkdownLink> {
let re = Regex::new(r"!\[\[([^\]|]+)(?:\|([^\]]+))?\]\]").unwrap();
let mut links = Vec::new();
for (line_num, line) in content.lines().enumerate() {
for cap in re.captures_iter(line) {
let full_match = cap.get(0).unwrap();
let target = cap.get(1).unwrap().as_str().trim();
for cap in EMBED_RE.captures_iter(line) {
let Some(full_match) = cap.get(0) else {
continue;
};
let Some(target_match) = cap.get(1) else {
continue;
};
let target = target_match.as_str().trim();
let display_text = cap.get(2).map(|m| m.as_str().trim().to_string());
let context = extract_context(
@ -139,13 +159,13 @@ fn extract_markdown_links(
source_media_id: MediaId,
content: &str,
) -> Vec<MarkdownLink> {
// Match [text](path) where path doesn't start with http:// or https://
let re = Regex::new(r"\[([^\]]+)\]\(([^)]+)\)").unwrap();
let mut links = Vec::new();
for (line_num, line) in content.lines().enumerate() {
for cap in re.captures_iter(line) {
let full_match = cap.get(0).unwrap();
for cap in MARKDOWN_LINK_RE.captures_iter(line) {
let Some(full_match) = cap.get(0) else {
continue;
};
let match_start = full_match.start();
// Skip markdown images: ![alt](image.png)
@ -155,8 +175,14 @@ fn extract_markdown_links(
continue;
}
let text = cap.get(1).unwrap().as_str().trim();
let path = cap.get(2).unwrap().as_str().trim();
let Some(text_match) = cap.get(1) else {
continue;
};
let Some(path_match) = cap.get(2) else {
continue;
};
let text = text_match.as_str().trim();
let path = path_match.as_str().trim();
// Skip external links
if path.starts_with("http://")

View file

@ -20,6 +20,15 @@ pub struct PostgresBackend {
pool: Pool,
}
/// Escape special LIKE pattern characters (`%`, `_`, `\`) in user input
/// to prevent wildcard injection.
fn escape_like_pattern(input: &str) -> String {
input
.replace('\\', "\\\\")
.replace('%', "\\%")
.replace('_', "\\_")
}
impl PostgresBackend {
pub async fn new(config: &PostgresConfig) -> Result<Self> {
let mut pool_config = PoolConfig::new();
@ -335,7 +344,7 @@ fn build_search_inner(
params.push(Box::new(text.clone()));
params.push(Box::new(prefix_query));
params.push(Box::new(format!("%{}%", text)));
params.push(Box::new(format!("%{}%", escape_like_pattern(&text))));
params.push(Box::new(text.clone()));
params.push(Box::new(text.clone()));
params.push(Box::new(text.clone()));
@ -377,7 +386,7 @@ fn build_search_inner(
params.push(Box::new(term.clone()));
params.push(Box::new(term.clone()));
params.push(Box::new(term.clone()));
params.push(Box::new(format!("%{}%", term)));
params.push(Box::new(format!("%{}%", escape_like_pattern(&term))));
Ok(format!(
"(similarity(COALESCE(title, ''), ${idx_title}) > 0.3 OR \
similarity(COALESCE(artist, ''), ${idx_artist}) > 0.3 OR \
@ -1086,6 +1095,88 @@ impl StorageBackend for PostgresBackend {
Ok(rows)
}
async fn batch_update_media(
&self,
ids: &[MediaId],
title: Option<&str>,
artist: Option<&str>,
album: Option<&str>,
genre: Option<&str>,
year: Option<i32>,
description: Option<&str>,
) -> Result<u64> {
if ids.is_empty() {
return Ok(0);
}
// Build SET clause dynamically from provided fields
let mut set_parts = Vec::new();
let mut params: Vec<Box<dyn tokio_postgres::types::ToSql + Sync + Send>> =
Vec::new();
let mut idx = 1;
if let Some(v) = title {
set_parts.push(format!("title = ${idx}"));
params.push(Box::new(v.to_string()));
idx += 1;
}
if let Some(v) = artist {
set_parts.push(format!("artist = ${idx}"));
params.push(Box::new(v.to_string()));
idx += 1;
}
if let Some(v) = album {
set_parts.push(format!("album = ${idx}"));
params.push(Box::new(v.to_string()));
idx += 1;
}
if let Some(v) = genre {
set_parts.push(format!("genre = ${idx}"));
params.push(Box::new(v.to_string()));
idx += 1;
}
if let Some(v) = year {
set_parts.push(format!("year = ${idx}"));
params.push(Box::new(v));
idx += 1;
}
if let Some(v) = description {
set_parts.push(format!("description = ${idx}"));
params.push(Box::new(v.to_string()));
idx += 1;
}
// Always update updated_at
let now = chrono::Utc::now();
set_parts.push(format!("updated_at = ${idx}"));
params.push(Box::new(now));
idx += 1;
if set_parts.len() == 1 {
return Ok(0);
}
let uuids: Vec<Uuid> = ids.iter().map(|id| id.0).collect();
let sql = format!(
"UPDATE media_items SET {} WHERE id = ANY(${idx})",
set_parts.join(", ")
);
params.push(Box::new(uuids));
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
let param_refs: Vec<&(dyn tokio_postgres::types::ToSql + Sync)> = params
.iter()
.map(|p| p.as_ref() as &(dyn tokio_postgres::types::ToSql + Sync))
.collect();
let rows = client.execute(&sql, &param_refs).await?;
Ok(rows)
}
// Tags
async fn create_tag(
&self,
@ -4186,11 +4277,9 @@ impl StorageBackend for PostgresBackend {
)
.await?;
if row.is_none() {
let Some(row) = row else {
return Ok(None);
}
let row = row.unwrap();
};
// Get authors
let author_rows = client
@ -4552,9 +4641,9 @@ impl StorageBackend for PostgresBackend {
let rows = if let (Some(i), Some(a), Some(s), Some(p), Some(l)) =
(isbn, author, series, publisher, language)
{
let author_pattern = format!("%{}%", a);
let series_pattern = format!("%{}%", s);
let publisher_pattern = format!("%{}%", p);
let author_pattern = format!("%{}%", escape_like_pattern(a));
let series_pattern = format!("%{}%", escape_like_pattern(s));
let publisher_pattern = format!("%{}%", escape_like_pattern(p));
client
.query(
"SELECT DISTINCT m.id, m.path, m.file_name, m.media_type, \

File diff suppressed because it is too large Load diff

View file

@ -22,6 +22,9 @@ mime_guess = { workspace = true }
# WASM bridge types
wit-bindgen = { workspace = true, optional = true }
[lints]
workspace = true
[features]
default = []
wasm = ["wit-bindgen"]

View file

@ -32,6 +32,9 @@ rand = { workspace = true }
percent-encoding = { workspace = true }
http = { workspace = true }
[lints]
workspace = true
[dev-dependencies]
http-body-util = "0.1.3"
tempfile = "3.25.0"

View file

@ -18,3 +18,6 @@ tracing-subscriber = { workspace = true }
reqwest = { workspace = true }
ratatui = { workspace = true }
crossterm = { workspace = true }
[lints]
workspace = true

View file

@ -26,6 +26,9 @@ dioxus-free-icons = { workspace = true }
gloo-timers = { workspace = true }
rand = { workspace = true }
[lints]
workspace = true
[features]
default = ["web"]
web = ["dioxus/web"]