chore: format with updated rustfmt and taplo rules

Signed-off-by: NotAShelf <raf@notashelf.dev>
Change-Id: Ie9ef5fc421fa20071946cf1073f7920c6a6a6964
This commit is contained in:
raf 2026-02-02 02:23:50 +03:00
commit c306383d27
Signed by: NotAShelf
GPG key ID: 29D95B64378DB4BF
72 changed files with 11217 additions and 10487 deletions

View file

@ -1,301 +1,306 @@
use std::path::Path;
use std::time::Duration;
use std::{path::Path, time::Duration};
use fc_common::CiError;
use fc_common::error::Result;
use fc_common::{CiError, error::Result};
use tokio::io::{AsyncBufReadExt, BufReader};
const MAX_LOG_SIZE: usize = 100 * 1024 * 1024; // 100MB
/// Run a build on a remote machine via `nix build --store ssh://...`.
#[tracing::instrument(skip(work_dir, live_log_path), fields(drv_path, store_uri))]
#[tracing::instrument(
skip(work_dir, live_log_path),
fields(drv_path, store_uri)
)]
pub async fn run_nix_build_remote(
drv_path: &str,
work_dir: &Path,
timeout: Duration,
store_uri: &str,
ssh_key_file: Option<&str>,
live_log_path: Option<&Path>,
drv_path: &str,
work_dir: &Path,
timeout: Duration,
store_uri: &str,
ssh_key_file: Option<&str>,
live_log_path: Option<&Path>,
) -> Result<BuildResult> {
let result = tokio::time::timeout(timeout, async {
let mut cmd = tokio::process::Command::new("nix");
cmd.args([
"build",
"--no-link",
"--print-out-paths",
"--log-format",
"internal-json",
"--option",
"sandbox",
"true",
"--max-build-log-size",
"104857600",
"--store",
store_uri,
drv_path,
])
.current_dir(work_dir)
.kill_on_drop(true)
.stdout(std::process::Stdio::piped())
.stderr(std::process::Stdio::piped());
let result = tokio::time::timeout(timeout, async {
let mut cmd = tokio::process::Command::new("nix");
cmd
.args([
"build",
"--no-link",
"--print-out-paths",
"--log-format",
"internal-json",
"--option",
"sandbox",
"true",
"--max-build-log-size",
"104857600",
"--store",
store_uri,
drv_path,
])
.current_dir(work_dir)
.kill_on_drop(true)
.stdout(std::process::Stdio::piped())
.stderr(std::process::Stdio::piped());
if let Some(key_file) = ssh_key_file {
cmd.env(
"NIX_SSHOPTS",
format!("-i {key_file} -o StrictHostKeyChecking=accept-new"),
);
}
let mut child = cmd
.spawn()
.map_err(|e| CiError::Build(format!("Failed to run remote nix build: {e}")))?;
let stdout_handle = child.stdout.take();
let stderr_handle = child.stderr.take();
let stdout_task = tokio::spawn(async move {
let mut buf = String::new();
if let Some(stdout) = stdout_handle {
let mut reader = BufReader::new(stdout);
let mut line = String::new();
while reader.read_line(&mut line).await.unwrap_or(0) > 0 {
buf.push_str(&line);
line.clear();
}
}
buf
});
let live_log_path_owned = live_log_path.map(|p| p.to_path_buf());
let stderr_task = tokio::spawn(async move {
let mut buf = String::new();
let steps: Vec<SubStep> = Vec::new();
let mut log_file = if let Some(ref path) = live_log_path_owned {
tokio::fs::File::create(path).await.ok()
} else {
None
};
if let Some(stderr) = stderr_handle {
let mut reader = BufReader::new(stderr);
let mut line = String::new();
while reader.read_line(&mut line).await.unwrap_or(0) > 0 {
if let Some(ref mut f) = log_file {
use tokio::io::AsyncWriteExt;
let _ = f.write_all(line.as_bytes()).await;
let _ = f.flush().await;
}
if buf.len() < MAX_LOG_SIZE {
buf.push_str(&line);
}
line.clear();
}
}
(buf, steps)
});
let stdout_buf = stdout_task.await.unwrap_or_default();
let (stderr_buf, sub_steps) = stderr_task.await.unwrap_or_default();
let status = child
.wait()
.await
.map_err(|e| CiError::Build(format!("Failed to wait for remote nix build: {e}")))?;
let output_paths: Vec<String> = stdout_buf
.lines()
.map(|s| s.trim().to_string())
.filter(|s| !s.is_empty())
.collect();
Ok::<_, CiError>(BuildResult {
success: status.success(),
stdout: stdout_buf,
stderr: stderr_buf,
output_paths,
sub_steps,
})
})
.await;
match result {
Ok(inner) => inner,
Err(_) => Err(CiError::Timeout(format!(
"Remote build timed out after {timeout:?}"
))),
if let Some(key_file) = ssh_key_file {
cmd.env(
"NIX_SSHOPTS",
format!("-i {key_file} -o StrictHostKeyChecking=accept-new"),
);
}
let mut child = cmd.spawn().map_err(|e| {
CiError::Build(format!("Failed to run remote nix build: {e}"))
})?;
let stdout_handle = child.stdout.take();
let stderr_handle = child.stderr.take();
let stdout_task = tokio::spawn(async move {
let mut buf = String::new();
if let Some(stdout) = stdout_handle {
let mut reader = BufReader::new(stdout);
let mut line = String::new();
while reader.read_line(&mut line).await.unwrap_or(0) > 0 {
buf.push_str(&line);
line.clear();
}
}
buf
});
let live_log_path_owned = live_log_path.map(|p| p.to_path_buf());
let stderr_task = tokio::spawn(async move {
let mut buf = String::new();
let steps: Vec<SubStep> = Vec::new();
let mut log_file = if let Some(ref path) = live_log_path_owned {
tokio::fs::File::create(path).await.ok()
} else {
None
};
if let Some(stderr) = stderr_handle {
let mut reader = BufReader::new(stderr);
let mut line = String::new();
while reader.read_line(&mut line).await.unwrap_or(0) > 0 {
if let Some(ref mut f) = log_file {
use tokio::io::AsyncWriteExt;
let _ = f.write_all(line.as_bytes()).await;
let _ = f.flush().await;
}
if buf.len() < MAX_LOG_SIZE {
buf.push_str(&line);
}
line.clear();
}
}
(buf, steps)
});
let stdout_buf = stdout_task.await.unwrap_or_default();
let (stderr_buf, sub_steps) = stderr_task.await.unwrap_or_default();
let status = child.wait().await.map_err(|e| {
CiError::Build(format!("Failed to wait for remote nix build: {e}"))
})?;
let output_paths: Vec<String> = stdout_buf
.lines()
.map(|s| s.trim().to_string())
.filter(|s| !s.is_empty())
.collect();
Ok::<_, CiError>(BuildResult {
success: status.success(),
stdout: stdout_buf,
stderr: stderr_buf,
output_paths,
sub_steps,
})
})
.await;
match result {
Ok(inner) => inner,
Err(_) => {
Err(CiError::Timeout(format!(
"Remote build timed out after {timeout:?}"
)))
},
}
}
pub struct BuildResult {
pub success: bool,
pub stdout: String,
pub stderr: String,
pub output_paths: Vec<String>,
pub sub_steps: Vec<SubStep>,
pub success: bool,
pub stdout: String,
pub stderr: String,
pub output_paths: Vec<String>,
pub sub_steps: Vec<SubStep>,
}
/// A sub-step parsed from nix's internal JSON log format.
pub struct SubStep {
pub drv_path: String,
pub completed_at: Option<chrono::DateTime<chrono::Utc>>,
pub success: bool,
pub drv_path: String,
pub completed_at: Option<chrono::DateTime<chrono::Utc>>,
pub success: bool,
}
/// Parse a single nix internal JSON log line (`@nix {...}`).
/// Returns `Some(action, drv_path)` if the line contains a derivation action.
pub fn parse_nix_log_line(line: &str) -> Option<(&'static str, String)> {
let json_str = line.strip_prefix("@nix ")?.trim();
let parsed: serde_json::Value = serde_json::from_str(json_str).ok()?;
let action = parsed.get("action")?.as_str()?;
let drv = parsed.get("derivation")?.as_str()?.to_string();
let json_str = line.strip_prefix("@nix ")?.trim();
let parsed: serde_json::Value = serde_json::from_str(json_str).ok()?;
let action = parsed.get("action")?.as_str()?;
let drv = parsed.get("derivation")?.as_str()?.to_string();
match action {
"start" => Some(("start", drv)),
"stop" => Some(("stop", drv)),
_ => None,
}
match action {
"start" => Some(("start", drv)),
"stop" => Some(("stop", drv)),
_ => None,
}
}
/// Run `nix build` for a derivation path.
/// If `live_log_path` is provided, build output is streamed to that file incrementally.
/// If `live_log_path` is provided, build output is streamed to that file
/// incrementally.
#[tracing::instrument(skip(work_dir, live_log_path), fields(drv_path))]
pub async fn run_nix_build(
drv_path: &str,
work_dir: &Path,
timeout: Duration,
live_log_path: Option<&Path>,
drv_path: &str,
work_dir: &Path,
timeout: Duration,
live_log_path: Option<&Path>,
) -> Result<BuildResult> {
let result = tokio::time::timeout(timeout, async {
let mut child = tokio::process::Command::new("nix")
.args([
"build",
"--no-link",
"--print-out-paths",
"--log-format",
"internal-json",
"--option",
"sandbox",
"true",
"--max-build-log-size",
"104857600",
drv_path,
])
.current_dir(work_dir)
.kill_on_drop(true)
.stdout(std::process::Stdio::piped())
.stderr(std::process::Stdio::piped())
.spawn()
.map_err(|e| CiError::Build(format!("Failed to run nix build: {e}")))?;
let result = tokio::time::timeout(timeout, async {
let mut child = tokio::process::Command::new("nix")
.args([
"build",
"--no-link",
"--print-out-paths",
"--log-format",
"internal-json",
"--option",
"sandbox",
"true",
"--max-build-log-size",
"104857600",
drv_path,
])
.current_dir(work_dir)
.kill_on_drop(true)
.stdout(std::process::Stdio::piped())
.stderr(std::process::Stdio::piped())
.spawn()
.map_err(|e| CiError::Build(format!("Failed to run nix build: {e}")))?;
let stdout_handle = child.stdout.take();
let stderr_handle = child.stderr.take();
let stdout_handle = child.stdout.take();
let stderr_handle = child.stderr.take();
// Read stdout (output paths)
let stdout_task = tokio::spawn(async move {
let mut buf = String::new();
if let Some(stdout) = stdout_handle {
let mut reader = BufReader::new(stdout);
let mut line = String::new();
while reader.read_line(&mut line).await.unwrap_or(0) > 0 {
buf.push_str(&line);
line.clear();
// Read stdout (output paths)
let stdout_task = tokio::spawn(async move {
let mut buf = String::new();
if let Some(stdout) = stdout_handle {
let mut reader = BufReader::new(stdout);
let mut line = String::new();
while reader.read_line(&mut line).await.unwrap_or(0) > 0 {
buf.push_str(&line);
line.clear();
}
}
buf
});
// Read stderr (logs + internal JSON)
let live_log_path_owned = live_log_path.map(|p| p.to_path_buf());
let stderr_task = tokio::spawn(async move {
let mut buf = String::new();
let mut steps: Vec<SubStep> = Vec::new();
let mut log_file = if let Some(ref path) = live_log_path_owned {
tokio::fs::File::create(path).await.ok()
} else {
None
};
if let Some(stderr) = stderr_handle {
let mut reader = BufReader::new(stderr);
let mut line = String::new();
while reader.read_line(&mut line).await.unwrap_or(0) > 0 {
// Write to live log file if available
if let Some(ref mut f) = log_file {
use tokio::io::AsyncWriteExt;
let _ = f.write_all(line.as_bytes()).await;
let _ = f.flush().await;
}
// Parse nix internal JSON log lines
if line.starts_with("@nix ")
&& let Some(json_str) = line.strip_prefix("@nix ")
&& let Ok(parsed) =
serde_json::from_str::<serde_json::Value>(json_str.trim())
&& let Some(action) = parsed.get("action").and_then(|a| a.as_str())
{
match action {
"start" => {
if let Some(drv) =
parsed.get("derivation").and_then(|d| d.as_str())
{
steps.push(SubStep {
drv_path: drv.to_string(),
completed_at: None,
success: false,
});
}
}
buf
});
// Read stderr (logs + internal JSON)
let live_log_path_owned = live_log_path.map(|p| p.to_path_buf());
let stderr_task = tokio::spawn(async move {
let mut buf = String::new();
let mut steps: Vec<SubStep> = Vec::new();
let mut log_file = if let Some(ref path) = live_log_path_owned {
tokio::fs::File::create(path).await.ok()
} else {
None
};
if let Some(stderr) = stderr_handle {
let mut reader = BufReader::new(stderr);
let mut line = String::new();
while reader.read_line(&mut line).await.unwrap_or(0) > 0 {
// Write to live log file if available
if let Some(ref mut f) = log_file {
use tokio::io::AsyncWriteExt;
let _ = f.write_all(line.as_bytes()).await;
let _ = f.flush().await;
}
// Parse nix internal JSON log lines
if line.starts_with("@nix ")
&& let Some(json_str) = line.strip_prefix("@nix ")
&& let Ok(parsed) =
serde_json::from_str::<serde_json::Value>(json_str.trim())
&& let Some(action) = parsed.get("action").and_then(|a| a.as_str())
{
match action {
"start" => {
if let Some(drv) =
parsed.get("derivation").and_then(|d| d.as_str())
{
steps.push(SubStep {
drv_path: drv.to_string(),
completed_at: None,
success: false,
});
}
}
"stop" => {
if let Some(drv) =
parsed.get("derivation").and_then(|d| d.as_str())
&& let Some(step) =
steps.iter_mut().rfind(|s| s.drv_path == drv)
{
step.completed_at = Some(chrono::Utc::now());
step.success = true;
}
}
_ => {}
}
}
if buf.len() < MAX_LOG_SIZE {
buf.push_str(&line);
}
line.clear();
},
"stop" => {
if let Some(drv) =
parsed.get("derivation").and_then(|d| d.as_str())
&& let Some(step) =
steps.iter_mut().rfind(|s| s.drv_path == drv)
{
step.completed_at = Some(chrono::Utc::now());
step.success = true;
}
},
_ => {},
}
(buf, steps)
});
}
let stdout_buf = stdout_task.await.unwrap_or_default();
let (stderr_buf, sub_steps) = stderr_task.await.unwrap_or_default();
if buf.len() < MAX_LOG_SIZE {
buf.push_str(&line);
}
line.clear();
}
}
(buf, steps)
});
let status = child
.wait()
.await
.map_err(|e| CiError::Build(format!("Failed to wait for nix build: {e}")))?;
let stdout_buf = stdout_task.await.unwrap_or_default();
let (stderr_buf, sub_steps) = stderr_task.await.unwrap_or_default();
let output_paths: Vec<String> = stdout_buf
.lines()
.map(|s| s.trim().to_string())
.filter(|s| !s.is_empty())
.collect();
let status = child.wait().await.map_err(|e| {
CiError::Build(format!("Failed to wait for nix build: {e}"))
})?;
Ok::<_, CiError>(BuildResult {
success: status.success(),
stdout: stdout_buf,
stderr: stderr_buf,
output_paths,
sub_steps,
})
let output_paths: Vec<String> = stdout_buf
.lines()
.map(|s| s.trim().to_string())
.filter(|s| !s.is_empty())
.collect();
Ok::<_, CiError>(BuildResult {
success: status.success(),
stdout: stdout_buf,
stderr: stderr_buf,
output_paths,
sub_steps,
})
.await;
})
.await;
match result {
Ok(inner) => inner,
Err(_) => Err(CiError::Timeout(format!(
"Build timed out after {timeout:?}"
))),
}
match result {
Ok(inner) => inner,
Err(_) => {
Err(CiError::Timeout(format!(
"Build timed out after {timeout:?}"
)))
},
}
}

View file

@ -1,162 +1,161 @@
use std::time::Duration;
use std::{sync::Arc, time::Duration};
use clap::Parser;
use fc_common::config::{Config, GcConfig};
use fc_common::database::Database;
use fc_common::gc_roots;
use std::sync::Arc;
use fc_common::{
config::{Config, GcConfig},
database::Database,
gc_roots,
};
use fc_queue_runner::worker::WorkerPool;
#[derive(Parser)]
#[command(name = "fc-queue-runner")]
#[command(about = "CI Queue Runner - Build dispatch and execution")]
struct Cli {
#[arg(short, long)]
workers: Option<usize>,
#[arg(short, long)]
workers: Option<usize>,
}
#[tokio::main]
async fn main() -> anyhow::Result<()> {
let cli = Cli::parse();
let cli = Cli::parse();
let config = Config::load()?;
fc_common::init_tracing(&config.tracing);
let config = Config::load()?;
fc_common::init_tracing(&config.tracing);
tracing::info!("Starting CI Queue Runner");
let log_config = config.logs;
let gc_config = config.gc;
let gc_config_for_loop = gc_config.clone();
let notifications_config = config.notifications;
let signing_config = config.signing;
let cache_upload_config = config.cache_upload;
let qr_config = config.queue_runner;
tracing::info!("Starting CI Queue Runner");
let log_config = config.logs;
let gc_config = config.gc;
let gc_config_for_loop = gc_config.clone();
let notifications_config = config.notifications;
let signing_config = config.signing;
let cache_upload_config = config.cache_upload;
let qr_config = config.queue_runner;
let workers = cli.workers.unwrap_or(qr_config.workers);
let poll_interval = Duration::from_secs(qr_config.poll_interval);
let build_timeout = Duration::from_secs(qr_config.build_timeout);
let work_dir = qr_config.work_dir;
let workers = cli.workers.unwrap_or(qr_config.workers);
let poll_interval = Duration::from_secs(qr_config.poll_interval);
let build_timeout = Duration::from_secs(qr_config.build_timeout);
let work_dir = qr_config.work_dir;
// Ensure the work directory exists
tokio::fs::create_dir_all(&work_dir).await?;
// Ensure the work directory exists
tokio::fs::create_dir_all(&work_dir).await?;
// Clean up orphaned active logs from previous crashes
cleanup_stale_logs(&log_config.log_dir).await;
// Clean up orphaned active logs from previous crashes
cleanup_stale_logs(&log_config.log_dir).await;
let db = Database::new(config.database).await?;
let db = Database::new(config.database).await?;
let worker_pool = Arc::new(WorkerPool::new(
db.pool().clone(),
workers,
work_dir.clone(),
build_timeout,
log_config,
gc_config,
notifications_config,
signing_config,
cache_upload_config,
));
let worker_pool = Arc::new(WorkerPool::new(
db.pool().clone(),
workers,
work_dir.clone(),
build_timeout,
log_config,
gc_config,
notifications_config,
signing_config,
cache_upload_config,
));
tracing::info!(
workers = workers,
poll_interval = ?poll_interval,
build_timeout = ?build_timeout,
work_dir = %work_dir.display(),
"Queue runner configured"
);
tracing::info!(
workers = workers,
poll_interval = ?poll_interval,
build_timeout = ?build_timeout,
work_dir = %work_dir.display(),
"Queue runner configured"
);
let worker_pool_for_drain = worker_pool.clone();
let worker_pool_for_drain = worker_pool.clone();
tokio::select! {
result = fc_queue_runner::runner_loop::run(db.pool().clone(), worker_pool, poll_interval) => {
if let Err(e) = result {
tracing::error!("Runner loop failed: {e}");
}
}
() = gc_loop(gc_config_for_loop) => {}
() = shutdown_signal() => {
tracing::info!("Shutdown signal received, draining in-flight builds...");
worker_pool_for_drain.drain();
worker_pool_for_drain.wait_for_drain().await;
tracing::info!("All in-flight builds completed");
}
}
tokio::select! {
result = fc_queue_runner::runner_loop::run(db.pool().clone(), worker_pool, poll_interval) => {
if let Err(e) = result {
tracing::error!("Runner loop failed: {e}");
}
}
() = gc_loop(gc_config_for_loop) => {}
() = shutdown_signal() => {
tracing::info!("Shutdown signal received, draining in-flight builds...");
worker_pool_for_drain.drain();
worker_pool_for_drain.wait_for_drain().await;
tracing::info!("All in-flight builds completed");
}
}
tracing::info!("Queue runner shutting down, closing database pool");
db.close().await;
tracing::info!("Queue runner shutting down, closing database pool");
db.close().await;
Ok(())
Ok(())
}
async fn cleanup_stale_logs(log_dir: &std::path::Path) {
if let Ok(mut entries) = tokio::fs::read_dir(log_dir).await {
while let Ok(Some(entry)) = entries.next_entry().await {
if entry.file_name().to_string_lossy().ends_with(".active.log") {
let _ = tokio::fs::remove_file(entry.path()).await;
tracing::info!("Removed stale active log: {}", entry.path().display());
}
}
if let Ok(mut entries) = tokio::fs::read_dir(log_dir).await {
while let Ok(Some(entry)) = entries.next_entry().await {
if entry.file_name().to_string_lossy().ends_with(".active.log") {
let _ = tokio::fs::remove_file(entry.path()).await;
tracing::info!("Removed stale active log: {}", entry.path().display());
}
}
}
}
async fn gc_loop(gc_config: GcConfig) {
if !gc_config.enabled {
return std::future::pending().await;
}
let interval = std::time::Duration::from_secs(gc_config.cleanup_interval);
let max_age = std::time::Duration::from_secs(gc_config.max_age_days * 86400);
if !gc_config.enabled {
return std::future::pending().await;
}
let interval = std::time::Duration::from_secs(gc_config.cleanup_interval);
let max_age = std::time::Duration::from_secs(gc_config.max_age_days * 86400);
loop {
tokio::time::sleep(interval).await;
match gc_roots::cleanup_old_roots(&gc_config.gc_roots_dir, max_age) {
Ok(count) if count > 0 => {
tracing::info!(count, "Cleaned up old GC roots");
// Optionally run nix-collect-garbage
match tokio::process::Command::new("nix-collect-garbage")
.output()
.await
{
Ok(output) if output.status.success() => {
tracing::info!("nix-collect-garbage completed");
}
Ok(output) => {
let stderr = String::from_utf8_lossy(&output.stderr);
tracing::warn!("nix-collect-garbage failed: {stderr}");
}
Err(e) => {
tracing::warn!("Failed to run nix-collect-garbage: {e}");
}
}
}
Ok(_) => {}
Err(e) => {
tracing::error!("GC cleanup failed: {e}");
}
loop {
tokio::time::sleep(interval).await;
match gc_roots::cleanup_old_roots(&gc_config.gc_roots_dir, max_age) {
Ok(count) if count > 0 => {
tracing::info!(count, "Cleaned up old GC roots");
// Optionally run nix-collect-garbage
match tokio::process::Command::new("nix-collect-garbage")
.output()
.await
{
Ok(output) if output.status.success() => {
tracing::info!("nix-collect-garbage completed");
},
Ok(output) => {
let stderr = String::from_utf8_lossy(&output.stderr);
tracing::warn!("nix-collect-garbage failed: {stderr}");
},
Err(e) => {
tracing::warn!("Failed to run nix-collect-garbage: {e}");
},
}
},
Ok(_) => {},
Err(e) => {
tracing::error!("GC cleanup failed: {e}");
},
}
}
}
async fn shutdown_signal() {
let ctrl_c = async {
tokio::signal::ctrl_c()
.await
.expect("failed to install Ctrl+C handler");
};
let ctrl_c = async {
tokio::signal::ctrl_c()
.await
.expect("failed to install Ctrl+C handler");
};
#[cfg(unix)]
let terminate = async {
tokio::signal::unix::signal(tokio::signal::unix::SignalKind::terminate())
.expect("failed to install SIGTERM handler")
.recv()
.await;
};
#[cfg(unix)]
let terminate = async {
tokio::signal::unix::signal(tokio::signal::unix::SignalKind::terminate())
.expect("failed to install SIGTERM handler")
.recv()
.await;
};
#[cfg(not(unix))]
let terminate = std::future::pending::<()>();
#[cfg(not(unix))]
let terminate = std::future::pending::<()>();
tokio::select! {
() = ctrl_c => {},
() = terminate => {},
}
tokio::select! {
() = ctrl_c => {},
() = terminate => {},
}
}

View file

@ -1,125 +1,129 @@
use std::sync::Arc;
use std::time::Duration;
use std::{sync::Arc, time::Duration};
use fc_common::{models::BuildStatus, repo};
use sqlx::PgPool;
use fc_common::models::BuildStatus;
use fc_common::repo;
use crate::worker::WorkerPool;
pub async fn run(
pool: PgPool,
worker_pool: Arc<WorkerPool>,
poll_interval: Duration,
pool: PgPool,
worker_pool: Arc<WorkerPool>,
poll_interval: Duration,
) -> anyhow::Result<()> {
// Reset orphaned builds from previous crashes (older than 5 minutes)
match repo::builds::reset_orphaned(&pool, 300).await {
Ok(count) if count > 0 => {
tracing::warn!(count, "Reset orphaned builds back to pending");
// Reset orphaned builds from previous crashes (older than 5 minutes)
match repo::builds::reset_orphaned(&pool, 300).await {
Ok(count) if count > 0 => {
tracing::warn!(count, "Reset orphaned builds back to pending");
},
Ok(_) => {},
Err(e) => {
tracing::error!("Failed to reset orphaned builds: {e}");
},
}
loop {
match repo::builds::list_pending(&pool, 10).await {
Ok(builds) => {
if !builds.is_empty() {
tracing::info!("Found {} pending builds", builds.len());
}
Ok(_) => {}
Err(e) => {
tracing::error!("Failed to reset orphaned builds: {e}");
}
}
loop {
match repo::builds::list_pending(&pool, 10).await {
Ok(builds) => {
if !builds.is_empty() {
tracing::info!("Found {} pending builds", builds.len());
}
for build in builds {
// Aggregate builds: check if all constituents are done
if build.is_aggregate {
match repo::build_dependencies::all_deps_completed(&pool, build.id).await {
Ok(true) => {
// All constituents done — mark aggregate as completed
tracing::info!(
build_id = %build.id,
job = %build.job_name,
"Aggregate build: all constituents completed"
);
let _ = repo::builds::start(&pool, build.id).await;
let _ = repo::builds::complete(
&pool,
build.id,
BuildStatus::Completed,
None,
None,
None,
)
.await;
continue;
}
Ok(false) => {
tracing::debug!(
build_id = %build.id,
"Aggregate build waiting for constituents"
);
continue;
}
Err(e) => {
tracing::error!(
build_id = %build.id,
"Failed to check aggregate deps: {e}"
);
continue;
}
}
}
// Derivation deduplication: reuse result if same drv was already built
match repo::builds::get_completed_by_drv_path(&pool, &build.drv_path).await {
Ok(Some(existing)) if existing.id != build.id => {
tracing::info!(
build_id = %build.id,
existing_id = %existing.id,
drv = %build.drv_path,
"Dedup: reusing result from existing build"
);
let _ = repo::builds::start(&pool, build.id).await;
let _ = repo::builds::complete(
&pool,
build.id,
BuildStatus::Completed,
existing.log_path.as_deref(),
existing.build_output_path.as_deref(),
None,
)
.await;
continue;
}
_ => {}
}
// Dependency-aware scheduling: skip if deps not met
match repo::build_dependencies::all_deps_completed(&pool, build.id).await {
Ok(true) => {}
Ok(false) => {
tracing::debug!(
build_id = %build.id,
"Build waiting for dependencies"
);
continue;
}
Err(e) => {
tracing::error!(
build_id = %build.id,
"Failed to check build deps: {e}"
);
continue;
}
}
worker_pool.dispatch(build);
}
for build in builds {
// Aggregate builds: check if all constituents are done
if build.is_aggregate {
match repo::build_dependencies::all_deps_completed(&pool, build.id)
.await
{
Ok(true) => {
// All constituents done — mark aggregate as completed
tracing::info!(
build_id = %build.id,
job = %build.job_name,
"Aggregate build: all constituents completed"
);
let _ = repo::builds::start(&pool, build.id).await;
let _ = repo::builds::complete(
&pool,
build.id,
BuildStatus::Completed,
None,
None,
None,
)
.await;
continue;
},
Ok(false) => {
tracing::debug!(
build_id = %build.id,
"Aggregate build waiting for constituents"
);
continue;
},
Err(e) => {
tracing::error!(
build_id = %build.id,
"Failed to check aggregate deps: {e}"
);
continue;
},
}
}
// Derivation deduplication: reuse result if same drv was already
// built
match repo::builds::get_completed_by_drv_path(&pool, &build.drv_path)
.await
{
Ok(Some(existing)) if existing.id != build.id => {
tracing::info!(
build_id = %build.id,
existing_id = %existing.id,
drv = %build.drv_path,
"Dedup: reusing result from existing build"
);
let _ = repo::builds::start(&pool, build.id).await;
let _ = repo::builds::complete(
&pool,
build.id,
BuildStatus::Completed,
existing.log_path.as_deref(),
existing.build_output_path.as_deref(),
None,
)
.await;
continue;
},
_ => {},
}
// Dependency-aware scheduling: skip if deps not met
match repo::build_dependencies::all_deps_completed(&pool, build.id)
.await
{
Ok(true) => {},
Ok(false) => {
tracing::debug!(
build_id = %build.id,
"Build waiting for dependencies"
);
continue;
},
Err(e) => {
tracing::error!("Failed to fetch pending builds: {e}");
}
tracing::error!(
build_id = %build.id,
"Failed to check build deps: {e}"
);
continue;
},
}
worker_pool.dispatch(build);
}
tokio::time::sleep(poll_interval).await;
},
Err(e) => {
tracing::error!("Failed to fetch pending builds: {e}");
},
}
tokio::time::sleep(poll_interval).await;
}
}

File diff suppressed because it is too large Load diff