initial commit

Signed-off-by: NotAShelf <raf@notashelf.dev>
Change-Id: I4a6b498153eccd5407510dd541b7f4816a6a6964
This commit is contained in:
raf 2026-01-30 22:05:46 +03:00
commit 6a73d11c4b
Signed by: NotAShelf
GPG key ID: 29D95B64378DB4BF
124 changed files with 34856 additions and 0 deletions

View file

@ -0,0 +1,448 @@
use std::path::PathBuf;
use std::sync::Arc;
use anyhow::Result;
use clap::Parser;
use tokio::sync::RwLock;
use tracing::info;
use tracing_subscriber::EnvFilter;
use pinakes_core::config::Config;
use pinakes_core::storage::StorageBackend;
use pinakes_server::app;
use pinakes_server::state::AppState;
/// Pinakes media cataloging server
#[derive(Parser)]
#[command(name = "pinakes-server", version, about)]
struct Cli {
/// Path to configuration file
#[arg(short, long, env = "PINAKES_CONFIG")]
config: Option<PathBuf>,
/// Override listen host
#[arg(long)]
host: Option<String>,
/// Override listen port
#[arg(short, long)]
port: Option<u16>,
/// Set log level (trace, debug, info, warn, error)
#[arg(long, default_value = "info")]
log_level: String,
/// Log output format (compact, full, pretty, json)
#[arg(long, default_value = "compact")]
log_format: String,
/// Run database migrations only, then exit
#[arg(long)]
migrate_only: bool,
}
fn resolve_config_path(explicit: Option<&std::path::Path>) -> PathBuf {
if let Some(path) = explicit {
return path.to_path_buf();
}
// Check current directory
let local = PathBuf::from("pinakes.toml");
if local.exists() {
return local;
}
// XDG default
Config::default_config_path()
}
#[tokio::main]
async fn main() -> Result<()> {
let cli = Cli::parse();
// Initialize logging
let env_filter = EnvFilter::try_new(&cli.log_level).unwrap_or_else(|_| EnvFilter::new("info"));
match cli.log_format.as_str() {
"json" => {
tracing_subscriber::fmt()
.with_env_filter(env_filter)
.json()
.init();
}
"pretty" => {
tracing_subscriber::fmt()
.with_env_filter(env_filter)
.pretty()
.init();
}
"full" => {
tracing_subscriber::fmt().with_env_filter(env_filter).init();
}
_ => {
tracing_subscriber::fmt()
.with_env_filter(env_filter)
.compact()
.init();
}
}
let config_path = resolve_config_path(cli.config.as_deref());
info!(path = %config_path.display(), "loading configuration");
let mut config = Config::load_or_default(&config_path)?;
config.ensure_dirs()?;
config
.validate()
.map_err(|e| anyhow::anyhow!("invalid configuration: {e}"))?;
// Apply CLI overrides
if let Some(host) = cli.host {
config.server.host = host;
}
if let Some(port) = cli.port {
config.server.port = port;
}
// Storage backend initialization
let storage: pinakes_core::storage::DynStorageBackend = match config.storage.backend {
pinakes_core::config::StorageBackendType::Sqlite => {
let sqlite_config = config.storage.sqlite.as_ref().ok_or_else(|| {
anyhow::anyhow!(
"sqlite storage selected but [storage.sqlite] config section missing"
)
})?;
info!(path = %sqlite_config.path.display(), "initializing sqlite storage");
let backend = pinakes_core::storage::sqlite::SqliteBackend::new(&sqlite_config.path)?;
backend.run_migrations().await?;
Arc::new(backend)
}
pinakes_core::config::StorageBackendType::Postgres => {
let pg_config = config.storage.postgres.as_ref().ok_or_else(|| {
anyhow::anyhow!(
"postgres storage selected but [storage.postgres] config section missing"
)
})?;
info!(host = %pg_config.host, port = pg_config.port, database = %pg_config.database, "initializing postgres storage");
let backend = pinakes_core::storage::postgres::PostgresBackend::new(pg_config).await?;
backend.run_migrations().await?;
Arc::new(backend)
}
};
if cli.migrate_only {
info!("migrations complete, exiting");
return Ok(());
}
// Register root directories
for root in &config.directories.roots {
if root.exists() {
storage.add_root_dir(root.clone()).await?;
info!(path = %root.display(), "registered root directory");
} else {
tracing::warn!(path = %root.display(), "root directory does not exist, skipping");
}
}
// Start filesystem watcher if configured
if config.scanning.watch {
let watch_storage = storage.clone();
let watch_dirs = config.directories.roots.clone();
let watch_ignore = config.scanning.ignore_patterns.clone();
tokio::spawn(async move {
if let Err(e) =
pinakes_core::scan::watch_and_import(watch_storage, watch_dirs, watch_ignore).await
{
tracing::error!(error = %e, "filesystem watcher failed");
}
});
info!("filesystem watcher started");
}
let addr = format!("{}:{}", config.server.host, config.server.port);
// Initialize job queue with executor
let job_storage = storage.clone();
let job_config = config.clone();
let job_queue = pinakes_core::jobs::JobQueue::new(
config.jobs.worker_count,
move |job_id, kind, cancel, jobs| {
let storage = job_storage.clone();
let config = job_config.clone();
tokio::spawn(async move {
use pinakes_core::jobs::{JobKind, JobQueue};
let result = match kind {
JobKind::Scan { path } => {
let ignore = config.scanning.ignore_patterns.clone();
let res = if let Some(p) = path {
pinakes_core::scan::scan_directory(&storage, &p, &ignore).await
} else {
pinakes_core::scan::scan_all_roots(&storage, &ignore)
.await
.map(|statuses| {
let total_found: usize =
statuses.iter().map(|s| s.files_found).sum();
let total_processed: usize =
statuses.iter().map(|s| s.files_processed).sum();
let all_errors: Vec<String> =
statuses.into_iter().flat_map(|s| s.errors).collect();
pinakes_core::scan::ScanStatus {
scanning: false,
files_found: total_found,
files_processed: total_processed,
errors: all_errors,
}
})
};
match res {
Ok(status) => {
JobQueue::complete(
&jobs,
job_id,
serde_json::json!({
"files_found": status.files_found,
"files_processed": status.files_processed,
"errors": status.errors,
}),
)
.await;
}
Err(e) => {
JobQueue::fail(&jobs, job_id, e.to_string()).await;
}
}
}
JobKind::GenerateThumbnails { media_ids } => {
let thumb_dir = pinakes_core::thumbnail::default_thumbnail_dir();
let thumb_config = config.thumbnails.clone();
let total = media_ids.len();
let mut generated = 0usize;
let mut errors = Vec::new();
for (i, mid) in media_ids.iter().enumerate() {
if cancel.is_cancelled() {
break;
}
JobQueue::update_progress(
&jobs,
job_id,
i as f32 / total as f32,
format!("{}/{}", i, total),
)
.await;
match storage.get_media(*mid).await {
Ok(item) => {
let source = item.path.clone();
let mt = item.media_type;
let id = item.id;
let td = thumb_dir.clone();
let tc = thumb_config.clone();
let res = tokio::task::spawn_blocking(move || {
pinakes_core::thumbnail::generate_thumbnail_with_config(
id, &source, mt, &td, &tc,
)
})
.await;
match res {
Ok(Ok(Some(path))) => {
let mut updated = item;
updated.thumbnail_path = Some(path);
let _ = storage.update_media(&updated).await;
generated += 1;
}
Ok(Ok(None)) => {}
Ok(Err(e)) => errors.push(format!("{}: {}", mid, e)),
Err(e) => errors.push(format!("{}: {}", mid, e)),
}
}
Err(e) => errors.push(format!("{}: {}", mid, e)),
}
}
JobQueue::complete(
&jobs,
job_id,
serde_json::json!({
"generated": generated, "errors": errors
}),
)
.await;
}
JobKind::VerifyIntegrity { media_ids } => {
let ids = if media_ids.is_empty() {
None
} else {
Some(media_ids.as_slice())
};
match pinakes_core::integrity::verify_integrity(&storage, ids).await {
Ok(report) => {
JobQueue::complete(
&jobs,
job_id,
serde_json::to_value(&report).unwrap_or_default(),
)
.await;
}
Err(e) => JobQueue::fail(&jobs, job_id, e.to_string()).await,
}
}
JobKind::OrphanDetection => {
match pinakes_core::integrity::detect_orphans(&storage).await {
Ok(report) => {
JobQueue::complete(
&jobs,
job_id,
serde_json::to_value(&report).unwrap_or_default(),
)
.await;
}
Err(e) => JobQueue::fail(&jobs, job_id, e.to_string()).await,
}
}
JobKind::CleanupThumbnails => {
let thumb_dir = pinakes_core::thumbnail::default_thumbnail_dir();
match pinakes_core::integrity::cleanup_orphaned_thumbnails(
&storage, &thumb_dir,
)
.await
{
Ok(removed) => {
JobQueue::complete(
&jobs,
job_id,
serde_json::json!({ "removed": removed }),
)
.await;
}
Err(e) => JobQueue::fail(&jobs, job_id, e.to_string()).await,
}
}
JobKind::Export {
format,
destination,
} => {
match pinakes_core::export::export_library(&storage, &format, &destination)
.await
{
Ok(result) => {
JobQueue::complete(
&jobs,
job_id,
serde_json::to_value(&result).unwrap_or_default(),
)
.await;
}
Err(e) => JobQueue::fail(&jobs, job_id, e.to_string()).await,
}
}
};
let _ = result;
drop(cancel);
})
},
);
// Initialize cache layer
let cache = std::sync::Arc::new(pinakes_core::cache::CacheLayer::new(
config.jobs.cache_ttl_secs,
));
// Initialize scheduler with cancellation support
let shutdown_token = tokio_util::sync::CancellationToken::new();
let config_arc = Arc::new(RwLock::new(config));
let scheduler = pinakes_core::scheduler::TaskScheduler::new(
job_queue.clone(),
shutdown_token.clone(),
config_arc.clone(),
Some(config_path.clone()),
);
let scheduler = Arc::new(scheduler);
// Restore saved scheduler state from config
scheduler.restore_state().await;
// Spawn scheduler background loop
{
let scheduler = scheduler.clone();
tokio::spawn(async move {
scheduler.run().await;
});
}
let state = AppState {
storage: storage.clone(),
config: config_arc,
config_path: Some(config_path),
scan_progress: pinakes_core::scan::ScanProgress::new(),
sessions: Arc::new(RwLock::new(std::collections::HashMap::new())),
job_queue,
cache,
scheduler,
};
// Periodic session cleanup (every 15 minutes)
{
let sessions = state.sessions.clone();
let cancel = shutdown_token.clone();
tokio::spawn(async move {
let mut interval = tokio::time::interval(std::time::Duration::from_secs(15 * 60));
loop {
tokio::select! {
_ = interval.tick() => {
pinakes_server::state::cleanup_expired_sessions(&sessions).await;
}
_ = cancel.cancelled() => {
break;
}
}
}
});
}
let router = app::create_router(state);
info!(addr = %addr, "server listening");
let listener = tokio::net::TcpListener::bind(&addr).await?;
axum::serve(
listener,
router.into_make_service_with_connect_info::<std::net::SocketAddr>(),
)
.with_graceful_shutdown(shutdown_signal())
.await?;
shutdown_token.cancel();
info!("server shut down");
Ok(())
}
async fn shutdown_signal() {
let ctrl_c = async {
match tokio::signal::ctrl_c().await {
Ok(()) => {}
Err(e) => {
tracing::warn!(error = %e, "failed to install Ctrl+C handler");
std::future::pending::<()>().await;
}
}
};
#[cfg(unix)]
let terminate = async {
match tokio::signal::unix::signal(tokio::signal::unix::SignalKind::terminate()) {
Ok(mut signal) => {
signal.recv().await;
}
Err(e) => {
tracing::warn!(error = %e, "failed to install SIGTERM handler");
std::future::pending::<()>().await;
}
}
};
#[cfg(not(unix))]
let terminate = std::future::pending::<()>();
tokio::select! {
_ = ctrl_c => info!("received Ctrl+C, shutting down"),
_ = terminate => info!("received SIGTERM, shutting down"),
}
}