chore: format with updated rustfmt and taplo rules

Signed-off-by: NotAShelf <raf@notashelf.dev>
Change-Id: Ie9ef5fc421fa20071946cf1073f7920c6a6a6964
This commit is contained in:
raf 2026-02-02 02:23:50 +03:00
commit c306383d27
Signed by: NotAShelf
GPG key ID: 29D95B64378DB4BF
72 changed files with 11217 additions and 10487 deletions

27
.rustfmt.toml Normal file
View file

@ -0,0 +1,27 @@
condense_wildcard_suffixes = true
doc_comment_code_block_width = 80
edition = "2024" # Keep in sync with Cargo.toml.
enum_discrim_align_threshold = 60
force_explicit_abi = false
force_multiline_blocks = true
format_code_in_doc_comments = true
format_macro_matchers = true
format_strings = true
group_imports = "StdExternalCrate"
hex_literal_case = "Upper"
imports_granularity = "Crate"
imports_layout = "HorizontalVertical"
inline_attribute_width = 60
match_block_trailing_comma = true
max_width = 80
newline_style = "Unix"
normalize_comments = true
normalize_doc_attributes = true
overflow_delimited_expr = true
struct_field_align_threshold = 60
tab_spaces = 2
unstable_features = true
use_field_init_shorthand = true
use_try_shorthand = true
wrap_comments = true

15
.taplo.toml Normal file
View file

@ -0,0 +1,15 @@
[formatting]
align_entries = true
column_width = 110
compact_arrays = false
reorder_inline_tables = false
reorder_keys = true
[[rule]]
include = [ "**/Cargo.toml" ]
keys = [ "package" ]
[rule.formatting]
reorder_keys = false

View file

@ -1,57 +1,62 @@
[workspace]
members = [
"crates/server",
"crates/evaluator",
"crates/queue-runner",
"crates/common",
"crates/migrate-cli",
"crates/server",
"crates/evaluator",
"crates/queue-runner",
"crates/common",
"crates/migrate-cli",
]
resolver = "3"
[workspace.package]
version = "0.1.0"
edition = "2024"
license = "MPL-2.0"
repository = "https://gitub.com/feel-co/fc"
authors = ["NotAShelf <raf@notashelf.dev"]
authors = [ "NotAShelf <raf@notashelf.dev" ]
edition = "2024"
license = "MPL-2.0"
repository = "https://gitub.com/feel-co/fc"
rust-version = "1.91.1"
version = "0.1.0"
[workspace.dependencies]
# Components
fc-common = {path = "./crates/common"}
fc-evaluator = {path = "./crates/evaluator"}
fc-queue-runner = {path = "./crates/queue-runner"}
fc-server = {path = "./crates/server"}
fc-common = { path = "./crates/common" }
fc-evaluator = { path = "./crates/evaluator" }
fc-queue-runner = { path = "./crates/queue-runner" }
fc-server = { path = "./crates/server" }
tokio = { version = "1.48.0", features = ["full"] }
axum = "0.8.8"
sqlx = { version = "0.8.6", features = ["runtime-tokio-rustls", "postgres", "chrono", "uuid", "migrate"] }
serde = { version = "1.0.228", features = ["derive"] }
serde_json = "1.0.145"
uuid = { version = "1.18.1", features = ["v4", "serde"] }
chrono = { version = "0.4.42", features = ["serde"] }
tracing = "0.1.41"
tracing-subscriber = { version = "0.3.20", features = ["env-filter", "json"] }
anyhow = "1.0.100"
thiserror = "2.0.17"
git2 = "0.20.2"
clap = { version = "4.5.51", features = ["derive"] }
config = "0.15.18"
tempfile = "3.8"
toml = "0.9.8"
tower-http = { version = "0.6.8", features = ["cors", "trace", "limit", "fs", "set-header"] }
tower = "0.5.3"
futures = "0.3.31"
reqwest = { version = "0.12", default-features = false, features = ["json", "rustls-tls"] }
sha2 = "0.10"
hex = "0.4"
hmac = "0.12"
axum-extra = { version = "0.10", features = ["typed-header"] }
tokio-util = { version = "0.7", features = ["io"] }
nix-nar = "0.3"
lettre = { version = "0.11", default-features = false, features = ["tokio1-rustls-tls", "smtp-transport", "builder"] }
async-stream = "0.3"
dashmap = "6"
regex = "1"
askama = "0.12"
askama_axum = "0.4"
async-stream = "0.3"
axum = "0.8.8"
axum-extra = { version = "0.10", features = [ "typed-header" ] }
chrono = { version = "0.4.42", features = [ "serde" ] }
clap = { version = "4.5.51", features = [ "derive" ] }
config = "0.15.18"
dashmap = "6"
futures = "0.3.31"
git2 = "0.20.2"
hex = "0.4"
hmac = "0.12"
lettre = { version = "0.11", default-features = false, features = [
"tokio1-rustls-tls",
"smtp-transport",
"builder",
] }
nix-nar = "0.3"
regex = "1"
reqwest = { version = "0.12", default-features = false, features = [ "json", "rustls-tls" ] }
serde = { version = "1.0.228", features = [ "derive" ] }
serde_json = "1.0.145"
sha2 = "0.10"
sqlx = { version = "0.8.6", features = [ "runtime-tokio-rustls", "postgres", "chrono", "uuid", "migrate" ] }
tempfile = "3.8"
thiserror = "2.0.17"
tokio = { version = "1.48.0", features = [ "full" ] }
tokio-util = { version = "0.7", features = [ "io" ] }
toml = "0.9.8"
tower = "0.5.3"
tower-http = { version = "0.6.8", features = [ "cors", "trace", "limit", "fs", "set-header" ] }
tracing = "0.1.41"
tracing-subscriber = { version = "0.3.20", features = [ "env-filter", "json" ] }
uuid = { version = "1.18.1", features = [ "v4", "serde" ] }

View file

@ -1,29 +1,29 @@
[package]
name = "fc-common"
version.workspace = true
edition.workspace = true
authors.workspace = true
license.workspace = true
name = "fc-common"
version.workspace = true
edition.workspace = true
authors.workspace = true
license.workspace = true
repository.workspace = true
[dependencies]
sqlx.workspace = true
serde.workspace = true
serde_json.workspace = true
uuid.workspace = true
chrono.workspace = true
anyhow.workspace = true
thiserror.workspace = true
git2.workspace = true
tracing.workspace = true
anyhow.workspace = true
chrono.workspace = true
clap.workspace = true
config.workspace = true
git2.workspace = true
hex.workspace = true
lettre.workspace = true
regex.workspace = true
reqwest.workspace = true
serde.workspace = true
serde_json.workspace = true
sha2.workspace = true
sqlx.workspace = true
tempfile.workspace = true
thiserror.workspace = true
tokio.workspace = true
toml.workspace = true
tracing.workspace = true
tracing-subscriber.workspace = true
clap.workspace = true
config.workspace = true
tempfile.workspace = true
toml.workspace = true
tokio.workspace = true
reqwest.workspace = true
sha2.workspace = true
hex.workspace = true
lettre.workspace = true
regex.workspace = true
uuid.workspace = true

View file

@ -6,10 +6,12 @@
use sha2::{Digest, Sha256};
use sqlx::PgPool;
use crate::config::DeclarativeConfig;
use crate::error::Result;
use crate::models::{CreateJobset, CreateProject};
use crate::repo;
use crate::{
config::DeclarativeConfig,
error::Result,
models::{CreateJobset, CreateProject},
repo,
};
/// Bootstrap declarative configuration into the database.
///
@ -17,79 +19,74 @@ use crate::repo;
/// produces the same database state. It upserts (insert or update) all
/// configured projects, jobsets, and API keys.
pub async fn run(pool: &PgPool, config: &DeclarativeConfig) -> Result<()> {
if config.projects.is_empty() && config.api_keys.is_empty() {
return Ok(());
}
if config.projects.is_empty() && config.api_keys.is_empty() {
return Ok(());
}
let n_projects = config.projects.len();
let n_jobsets: usize = config.projects.iter().map(|p| p.jobsets.len()).sum();
let n_keys = config.api_keys.len();
let n_projects = config.projects.len();
let n_jobsets: usize = config.projects.iter().map(|p| p.jobsets.len()).sum();
let n_keys = config.api_keys.len();
tracing::info!(
projects = n_projects,
jobsets = n_jobsets,
api_keys = n_keys,
"Bootstrapping declarative configuration"
);
// Upsert projects and their jobsets
for decl_project in &config.projects {
let project = repo::projects::upsert(pool, CreateProject {
name: decl_project.name.clone(),
repository_url: decl_project.repository_url.clone(),
description: decl_project.description.clone(),
})
.await?;
tracing::info!(
projects = n_projects,
jobsets = n_jobsets,
api_keys = n_keys,
"Bootstrapping declarative configuration"
project = %project.name,
id = %project.id,
"Upserted declarative project"
);
// Upsert projects and their jobsets
for decl_project in &config.projects {
let project = repo::projects::upsert(
pool,
CreateProject {
name: decl_project.name.clone(),
repository_url: decl_project.repository_url.clone(),
description: decl_project.description.clone(),
},
)
for decl_jobset in &decl_project.jobsets {
let jobset = repo::jobsets::upsert(pool, CreateJobset {
project_id: project.id,
name: decl_jobset.name.clone(),
nix_expression: decl_jobset.nix_expression.clone(),
enabled: Some(decl_jobset.enabled),
flake_mode: Some(decl_jobset.flake_mode),
check_interval: Some(decl_jobset.check_interval),
branch: None,
scheduling_shares: None,
})
.await?;
tracing::info!(
project = %project.name,
jobset = %jobset.name,
"Upserted declarative jobset"
);
}
}
// Upsert API keys
for decl_key in &config.api_keys {
let mut hasher = Sha256::new();
hasher.update(decl_key.key.as_bytes());
let key_hash = hex::encode(hasher.finalize());
let api_key =
repo::api_keys::upsert(pool, &decl_key.name, &key_hash, &decl_key.role)
.await?;
tracing::info!(
project = %project.name,
id = %project.id,
"Upserted declarative project"
);
tracing::info!(
name = %api_key.name,
role = %api_key.role,
"Upserted declarative API key"
);
}
for decl_jobset in &decl_project.jobsets {
let jobset = repo::jobsets::upsert(
pool,
CreateJobset {
project_id: project.id,
name: decl_jobset.name.clone(),
nix_expression: decl_jobset.nix_expression.clone(),
enabled: Some(decl_jobset.enabled),
flake_mode: Some(decl_jobset.flake_mode),
check_interval: Some(decl_jobset.check_interval),
branch: None,
scheduling_shares: None,
},
)
.await?;
tracing::info!(
project = %project.name,
jobset = %jobset.name,
"Upserted declarative jobset"
);
}
}
// Upsert API keys
for decl_key in &config.api_keys {
let mut hasher = Sha256::new();
hasher.update(decl_key.key.as_bytes());
let key_hash = hex::encode(hasher.finalize());
let api_key =
repo::api_keys::upsert(pool, &decl_key.name, &key_hash, &decl_key.role).await?;
tracing::info!(
name = %api_key.name,
role = %api_key.role,
"Upserted declarative API key"
);
}
tracing::info!("Declarative bootstrap complete");
Ok(())
tracing::info!("Declarative bootstrap complete");
Ok(())
}

View file

@ -7,452 +7,458 @@ use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
pub struct Config {
pub database: DatabaseConfig,
pub server: ServerConfig,
pub evaluator: EvaluatorConfig,
pub queue_runner: QueueRunnerConfig,
pub gc: GcConfig,
pub logs: LogConfig,
pub notifications: NotificationsConfig,
pub cache: CacheConfig,
pub signing: SigningConfig,
#[serde(default)]
pub cache_upload: CacheUploadConfig,
pub tracing: TracingConfig,
#[serde(default)]
pub declarative: DeclarativeConfig,
pub database: DatabaseConfig,
pub server: ServerConfig,
pub evaluator: EvaluatorConfig,
pub queue_runner: QueueRunnerConfig,
pub gc: GcConfig,
pub logs: LogConfig,
pub notifications: NotificationsConfig,
pub cache: CacheConfig,
pub signing: SigningConfig,
#[serde(default)]
pub cache_upload: CacheUploadConfig,
pub tracing: TracingConfig,
#[serde(default)]
pub declarative: DeclarativeConfig,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct DatabaseConfig {
pub url: String,
pub max_connections: u32,
pub min_connections: u32,
pub connect_timeout: u64,
pub idle_timeout: u64,
pub max_lifetime: u64,
pub url: String,
pub max_connections: u32,
pub min_connections: u32,
pub connect_timeout: u64,
pub idle_timeout: u64,
pub max_lifetime: u64,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(default)]
pub struct ServerConfig {
pub host: String,
pub port: u16,
pub request_timeout: u64,
pub max_body_size: usize,
pub api_key: Option<String>,
pub allowed_origins: Vec<String>,
pub cors_permissive: bool,
pub rate_limit_rps: Option<u64>,
pub rate_limit_burst: Option<u32>,
pub host: String,
pub port: u16,
pub request_timeout: u64,
pub max_body_size: usize,
pub api_key: Option<String>,
pub allowed_origins: Vec<String>,
pub cors_permissive: bool,
pub rate_limit_rps: Option<u64>,
pub rate_limit_burst: Option<u32>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(default)]
pub struct EvaluatorConfig {
pub poll_interval: u64,
pub git_timeout: u64,
pub nix_timeout: u64,
pub max_concurrent_evals: usize,
pub work_dir: PathBuf,
pub restrict_eval: bool,
pub allow_ifd: bool,
pub poll_interval: u64,
pub git_timeout: u64,
pub nix_timeout: u64,
pub max_concurrent_evals: usize,
pub work_dir: PathBuf,
pub restrict_eval: bool,
pub allow_ifd: bool,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct QueueRunnerConfig {
pub workers: usize,
pub poll_interval: u64,
pub build_timeout: u64,
pub work_dir: PathBuf,
pub workers: usize,
pub poll_interval: u64,
pub build_timeout: u64,
pub work_dir: PathBuf,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(default)]
pub struct GcConfig {
pub gc_roots_dir: PathBuf,
pub enabled: bool,
pub max_age_days: u64,
pub cleanup_interval: u64,
pub gc_roots_dir: PathBuf,
pub enabled: bool,
pub max_age_days: u64,
pub cleanup_interval: u64,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct LogConfig {
pub log_dir: PathBuf,
pub compress: bool,
pub log_dir: PathBuf,
pub compress: bool,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(default)]
#[derive(Default)]
pub struct NotificationsConfig {
pub run_command: Option<String>,
pub github_token: Option<String>,
pub gitea_url: Option<String>,
pub gitea_token: Option<String>,
pub email: Option<EmailConfig>,
pub run_command: Option<String>,
pub github_token: Option<String>,
pub gitea_url: Option<String>,
pub gitea_token: Option<String>,
pub email: Option<EmailConfig>,
}
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
pub struct EmailConfig {
pub smtp_host: String,
pub smtp_port: u16,
pub smtp_user: Option<String>,
pub smtp_password: Option<String>,
pub from_address: String,
pub to_addresses: Vec<String>,
pub tls: bool,
pub on_failure_only: bool,
pub smtp_host: String,
pub smtp_port: u16,
pub smtp_user: Option<String>,
pub smtp_password: Option<String>,
pub from_address: String,
pub to_addresses: Vec<String>,
pub tls: bool,
pub on_failure_only: bool,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct CacheConfig {
pub enabled: bool,
pub secret_key_file: Option<PathBuf>,
pub enabled: bool,
pub secret_key_file: Option<PathBuf>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(default)]
#[derive(Default)]
pub struct SigningConfig {
pub enabled: bool,
pub key_file: Option<PathBuf>,
pub enabled: bool,
pub key_file: Option<PathBuf>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(default)]
#[derive(Default)]
pub struct CacheUploadConfig {
pub enabled: bool,
pub store_uri: Option<String>,
pub enabled: bool,
pub store_uri: Option<String>,
}
/// Declarative project/jobset/api-key definitions.
/// These are upserted on server startup, enabling fully declarative operation.
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
#[serde(default)]
pub struct DeclarativeConfig {
pub projects: Vec<DeclarativeProject>,
pub api_keys: Vec<DeclarativeApiKey>,
pub projects: Vec<DeclarativeProject>,
pub api_keys: Vec<DeclarativeApiKey>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct DeclarativeProject {
pub name: String,
pub repository_url: String,
pub description: Option<String>,
#[serde(default)]
pub jobsets: Vec<DeclarativeJobset>,
pub name: String,
pub repository_url: String,
pub description: Option<String>,
#[serde(default)]
pub jobsets: Vec<DeclarativeJobset>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct DeclarativeJobset {
pub name: String,
pub nix_expression: String,
#[serde(default = "default_true")]
pub enabled: bool,
#[serde(default = "default_true")]
pub flake_mode: bool,
#[serde(default = "default_check_interval")]
pub check_interval: i32,
pub name: String,
pub nix_expression: String,
#[serde(default = "default_true")]
pub enabled: bool,
#[serde(default = "default_true")]
pub flake_mode: bool,
#[serde(default = "default_check_interval")]
pub check_interval: i32,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct DeclarativeApiKey {
pub name: String,
pub key: String,
#[serde(default = "default_role")]
pub role: String,
pub name: String,
pub key: String,
#[serde(default = "default_role")]
pub role: String,
}
fn default_true() -> bool {
true
true
}
fn default_check_interval() -> i32 {
60
60
}
fn default_role() -> String {
"admin".to_string()
"admin".to_string()
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(default)]
pub struct TracingConfig {
pub level: String,
pub format: String,
pub show_targets: bool,
pub show_timestamps: bool,
pub level: String,
pub format: String,
pub show_targets: bool,
pub show_timestamps: bool,
}
impl Default for TracingConfig {
fn default() -> Self {
Self {
level: "info".to_string(),
format: "compact".to_string(),
show_targets: true,
show_timestamps: true,
}
fn default() -> Self {
Self {
level: "info".to_string(),
format: "compact".to_string(),
show_targets: true,
show_timestamps: true,
}
}
}
impl Default for DatabaseConfig {
fn default() -> Self {
Self {
url: "postgresql://fc_ci:password@localhost/fc_ci".to_string(),
max_connections: 20,
min_connections: 5,
connect_timeout: 30,
idle_timeout: 600,
max_lifetime: 1800,
}
fn default() -> Self {
Self {
url: "postgresql://fc_ci:password@localhost/fc_ci"
.to_string(),
max_connections: 20,
min_connections: 5,
connect_timeout: 30,
idle_timeout: 600,
max_lifetime: 1800,
}
}
}
impl DatabaseConfig {
pub fn validate(&self) -> anyhow::Result<()> {
if self.url.is_empty() {
return Err(anyhow::anyhow!("Database URL cannot be empty"));
}
if !self.url.starts_with("postgresql://") && !self.url.starts_with("postgres://") {
return Err(anyhow::anyhow!(
"Database URL must start with postgresql:// or postgres://"
));
}
if self.max_connections == 0 {
return Err(anyhow::anyhow!(
"Max database connections must be greater than 0"
));
}
if self.min_connections > self.max_connections {
return Err(anyhow::anyhow!(
"Min database connections cannot exceed max connections"
));
}
Ok(())
pub fn validate(&self) -> anyhow::Result<()> {
if self.url.is_empty() {
return Err(anyhow::anyhow!("Database URL cannot be empty"));
}
if !self.url.starts_with("postgresql://")
&& !self.url.starts_with("postgres://")
{
return Err(anyhow::anyhow!(
"Database URL must start with postgresql:// or postgres://"
));
}
if self.max_connections == 0 {
return Err(anyhow::anyhow!(
"Max database connections must be greater than 0"
));
}
if self.min_connections > self.max_connections {
return Err(anyhow::anyhow!(
"Min database connections cannot exceed max connections"
));
}
Ok(())
}
}
impl Default for ServerConfig {
fn default() -> Self {
Self {
host: "127.0.0.1".to_string(),
port: 3000,
request_timeout: 30,
max_body_size: 10 * 1024 * 1024, // 10MB
api_key: None,
allowed_origins: Vec::new(),
cors_permissive: false,
rate_limit_rps: None,
rate_limit_burst: None,
}
fn default() -> Self {
Self {
host: "127.0.0.1".to_string(),
port: 3000,
request_timeout: 30,
max_body_size: 10 * 1024 * 1024, // 10MB
api_key: None,
allowed_origins: Vec::new(),
cors_permissive: false,
rate_limit_rps: None,
rate_limit_burst: None,
}
}
}
impl Default for EvaluatorConfig {
fn default() -> Self {
Self {
poll_interval: 60,
git_timeout: 600,
nix_timeout: 1800,
max_concurrent_evals: 4,
work_dir: PathBuf::from("/tmp/fc-evaluator"),
restrict_eval: true,
allow_ifd: false,
}
fn default() -> Self {
Self {
poll_interval: 60,
git_timeout: 600,
nix_timeout: 1800,
max_concurrent_evals: 4,
work_dir: PathBuf::from("/tmp/fc-evaluator"),
restrict_eval: true,
allow_ifd: false,
}
}
}
impl Default for QueueRunnerConfig {
fn default() -> Self {
Self {
workers: 4,
poll_interval: 5,
build_timeout: 3600,
work_dir: PathBuf::from("/tmp/fc-queue-runner"),
}
fn default() -> Self {
Self {
workers: 4,
poll_interval: 5,
build_timeout: 3600,
work_dir: PathBuf::from("/tmp/fc-queue-runner"),
}
}
}
impl Default for GcConfig {
fn default() -> Self {
Self {
gc_roots_dir: PathBuf::from("/nix/var/nix/gcroots/per-user/fc/fc-roots"),
enabled: true,
max_age_days: 30,
cleanup_interval: 3600,
}
fn default() -> Self {
Self {
gc_roots_dir: PathBuf::from(
"/nix/var/nix/gcroots/per-user/fc/fc-roots",
),
enabled: true,
max_age_days: 30,
cleanup_interval: 3600,
}
}
}
impl Default for LogConfig {
fn default() -> Self {
Self {
log_dir: PathBuf::from("/var/lib/fc/logs"),
compress: false,
}
fn default() -> Self {
Self {
log_dir: PathBuf::from("/var/lib/fc/logs"),
compress: false,
}
}
}
impl Default for CacheConfig {
fn default() -> Self {
Self {
enabled: true,
secret_key_file: None,
}
fn default() -> Self {
Self {
enabled: true,
secret_key_file: None,
}
}
}
impl Config {
pub fn load() -> anyhow::Result<Self> {
let mut settings = config_crate::Config::builder();
pub fn load() -> anyhow::Result<Self> {
let mut settings = config_crate::Config::builder();
// Load default configuration
settings = settings.add_source(config_crate::Config::try_from(&Self::default())?);
// Load default configuration
settings =
settings.add_source(config_crate::Config::try_from(&Self::default())?);
// Load from config file if it exists
if let Ok(config_path) = std::env::var("FC_CONFIG_FILE") {
if std::path::Path::new(&config_path).exists() {
settings = settings.add_source(config_crate::File::with_name(&config_path));
}
} else if std::path::Path::new("fc.toml").exists() {
settings = settings.add_source(config_crate::File::with_name("fc").required(false));
}
// Load from environment variables with FC_ prefix (highest priority)
settings = settings.add_source(
config_crate::Environment::with_prefix("FC")
.separator("__")
.try_parsing(true),
);
let config = settings.build()?.try_deserialize::<Self>()?;
// Validate configuration
config.validate()?;
Ok(config)
// Load from config file if it exists
if let Ok(config_path) = std::env::var("FC_CONFIG_FILE") {
if std::path::Path::new(&config_path).exists() {
settings =
settings.add_source(config_crate::File::with_name(&config_path));
}
} else if std::path::Path::new("fc.toml").exists() {
settings = settings
.add_source(config_crate::File::with_name("fc").required(false));
}
pub fn validate(&self) -> anyhow::Result<()> {
// Validate database URL
if self.database.url.is_empty() {
return Err(anyhow::anyhow!("Database URL cannot be empty"));
}
// Load from environment variables with FC_ prefix (highest priority)
settings = settings.add_source(
config_crate::Environment::with_prefix("FC")
.separator("__")
.try_parsing(true),
);
if !self.database.url.starts_with("postgresql://")
&& !self.database.url.starts_with("postgres://")
{
return Err(anyhow::anyhow!(
"Database URL must start with postgresql:// or postgres://"
));
}
let config = settings.build()?.try_deserialize::<Self>()?;
// Validate connection pool settings
if self.database.max_connections == 0 {
return Err(anyhow::anyhow!(
"Max database connections must be greater than 0"
));
}
// Validate configuration
config.validate()?;
if self.database.min_connections > self.database.max_connections {
return Err(anyhow::anyhow!(
"Min database connections cannot exceed max connections"
));
}
Ok(config)
}
// Validate server settings
if self.server.port == 0 {
return Err(anyhow::anyhow!("Server port must be greater than 0"));
}
// Validate evaluator settings
if self.evaluator.poll_interval == 0 {
return Err(anyhow::anyhow!(
"Evaluator poll interval must be greater than 0"
));
}
// Validate queue runner settings
if self.queue_runner.workers == 0 {
return Err(anyhow::anyhow!(
"Queue runner workers must be greater than 0"
));
}
// Validate GC config
if self.gc.enabled && self.gc.gc_roots_dir.as_os_str().is_empty() {
return Err(anyhow::anyhow!(
"GC roots directory cannot be empty when GC is enabled"
));
}
// Validate log config
if self.logs.log_dir.as_os_str().is_empty() {
return Err(anyhow::anyhow!("Log directory cannot be empty"));
}
Ok(())
pub fn validate(&self) -> anyhow::Result<()> {
// Validate database URL
if self.database.url.is_empty() {
return Err(anyhow::anyhow!("Database URL cannot be empty"));
}
if !self.database.url.starts_with("postgresql://")
&& !self.database.url.starts_with("postgres://")
{
return Err(anyhow::anyhow!(
"Database URL must start with postgresql:// or postgres://"
));
}
// Validate connection pool settings
if self.database.max_connections == 0 {
return Err(anyhow::anyhow!(
"Max database connections must be greater than 0"
));
}
if self.database.min_connections > self.database.max_connections {
return Err(anyhow::anyhow!(
"Min database connections cannot exceed max connections"
));
}
// Validate server settings
if self.server.port == 0 {
return Err(anyhow::anyhow!("Server port must be greater than 0"));
}
// Validate evaluator settings
if self.evaluator.poll_interval == 0 {
return Err(anyhow::anyhow!(
"Evaluator poll interval must be greater than 0"
));
}
// Validate queue runner settings
if self.queue_runner.workers == 0 {
return Err(anyhow::anyhow!(
"Queue runner workers must be greater than 0"
));
}
// Validate GC config
if self.gc.enabled && self.gc.gc_roots_dir.as_os_str().is_empty() {
return Err(anyhow::anyhow!(
"GC roots directory cannot be empty when GC is enabled"
));
}
// Validate log config
if self.logs.log_dir.as_os_str().is_empty() {
return Err(anyhow::anyhow!("Log directory cannot be empty"));
}
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::env;
use std::env;
#[test]
fn test_default_config() {
let config = Config::default();
assert!(config.validate().is_ok());
}
use super::*;
#[test]
fn test_invalid_database_url() {
let mut config = Config::default();
config.database.url = "invalid://url".to_string();
assert!(config.validate().is_err());
}
#[test]
fn test_default_config() {
let config = Config::default();
assert!(config.validate().is_ok());
}
#[test]
fn test_invalid_port() {
let mut config = Config::default();
config.server.port = 0;
assert!(config.validate().is_err());
#[test]
fn test_invalid_database_url() {
let mut config = Config::default();
config.database.url = "invalid://url".to_string();
assert!(config.validate().is_err());
}
config.server.port = 65535;
assert!(config.validate().is_ok()); // valid port
}
#[test]
fn test_invalid_port() {
let mut config = Config::default();
config.server.port = 0;
assert!(config.validate().is_err());
#[test]
fn test_invalid_connections() {
let mut config = Config::default();
config.database.max_connections = 0;
assert!(config.validate().is_err());
config.server.port = 65535;
assert!(config.validate().is_ok()); // valid port
}
config.database.max_connections = 10;
config.database.min_connections = 15;
assert!(config.validate().is_err());
}
#[test]
fn test_invalid_connections() {
let mut config = Config::default();
config.database.max_connections = 0;
assert!(config.validate().is_err());
#[test]
fn test_declarative_config_default_is_empty() {
let config = DeclarativeConfig::default();
assert!(config.projects.is_empty());
assert!(config.api_keys.is_empty());
}
config.database.max_connections = 10;
config.database.min_connections = 15;
assert!(config.validate().is_err());
}
#[test]
fn test_declarative_config_deserialization() {
let toml_str = r#"
#[test]
fn test_declarative_config_default_is_empty() {
let config = DeclarativeConfig::default();
assert!(config.projects.is_empty());
assert!(config.api_keys.is_empty());
}
#[test]
fn test_declarative_config_deserialization() {
let toml_str = r#"
[[projects]]
name = "my-project"
repository_url = "https://github.com/test/repo"
@ -467,77 +473,77 @@ mod tests {
key = "fc_secret_key_123"
role = "admin"
"#;
let config: DeclarativeConfig = toml::from_str(toml_str).unwrap();
assert_eq!(config.projects.len(), 1);
assert_eq!(config.projects[0].name, "my-project");
assert_eq!(config.projects[0].jobsets.len(), 1);
assert_eq!(config.projects[0].jobsets[0].name, "packages");
assert!(config.projects[0].jobsets[0].enabled); // default true
assert!(config.projects[0].jobsets[0].flake_mode); // default true
assert_eq!(config.api_keys.len(), 1);
assert_eq!(config.api_keys[0].role, "admin");
let config: DeclarativeConfig = toml::from_str(toml_str).unwrap();
assert_eq!(config.projects.len(), 1);
assert_eq!(config.projects[0].name, "my-project");
assert_eq!(config.projects[0].jobsets.len(), 1);
assert_eq!(config.projects[0].jobsets[0].name, "packages");
assert!(config.projects[0].jobsets[0].enabled); // default true
assert!(config.projects[0].jobsets[0].flake_mode); // default true
assert_eq!(config.api_keys.len(), 1);
assert_eq!(config.api_keys[0].role, "admin");
}
#[test]
fn test_declarative_config_serialization_roundtrip() {
let config = DeclarativeConfig {
projects: vec![DeclarativeProject {
name: "test".to_string(),
repository_url: "https://example.com/repo".to_string(),
description: Some("desc".to_string()),
jobsets: vec![DeclarativeJobset {
name: "checks".to_string(),
nix_expression: "checks".to_string(),
enabled: true,
flake_mode: true,
check_interval: 300,
}],
}],
api_keys: vec![DeclarativeApiKey {
name: "test-key".to_string(),
key: "fc_test".to_string(),
role: "admin".to_string(),
}],
};
let json = serde_json::to_string(&config).unwrap();
let parsed: DeclarativeConfig = serde_json::from_str(&json).unwrap();
assert_eq!(parsed.projects.len(), 1);
assert_eq!(parsed.projects[0].jobsets[0].check_interval, 300);
assert_eq!(parsed.api_keys[0].name, "test-key");
}
#[test]
fn test_declarative_config_with_main_config() {
// Ensure declarative section is optional (default empty)
// Use the config crate loader which provides defaults for missing fields
let config = Config::default();
assert!(config.declarative.projects.is_empty());
assert!(config.declarative.api_keys.is_empty());
// And that the Config can be serialized back with declarative section
let toml_str = toml::to_string_pretty(&config).unwrap();
let parsed: Config = toml::from_str(&toml_str).unwrap();
assert!(parsed.declarative.projects.is_empty());
}
#[test]
fn test_environment_override() {
// Test environment variable parsing directly
unsafe {
env::set_var("FC_DATABASE__URL", "postgresql://test:test@localhost/test");
env::set_var("FC_SERVER__PORT", "8080");
}
#[test]
fn test_declarative_config_serialization_roundtrip() {
let config = DeclarativeConfig {
projects: vec![DeclarativeProject {
name: "test".to_string(),
repository_url: "https://example.com/repo".to_string(),
description: Some("desc".to_string()),
jobsets: vec![DeclarativeJobset {
name: "checks".to_string(),
nix_expression: "checks".to_string(),
enabled: true,
flake_mode: true,
check_interval: 300,
}],
}],
api_keys: vec![DeclarativeApiKey {
name: "test-key".to_string(),
key: "fc_test".to_string(),
role: "admin".to_string(),
}],
};
// Test that environment variables are being read correctly
let db_url = std::env::var("FC_DATABASE__URL").unwrap();
let server_port = std::env::var("FC_SERVER__PORT").unwrap();
let json = serde_json::to_string(&config).unwrap();
let parsed: DeclarativeConfig = serde_json::from_str(&json).unwrap();
assert_eq!(parsed.projects.len(), 1);
assert_eq!(parsed.projects[0].jobsets[0].check_interval, 300);
assert_eq!(parsed.api_keys[0].name, "test-key");
}
#[test]
fn test_declarative_config_with_main_config() {
// Ensure declarative section is optional (default empty)
// Use the config crate loader which provides defaults for missing fields
let config = Config::default();
assert!(config.declarative.projects.is_empty());
assert!(config.declarative.api_keys.is_empty());
// And that the Config can be serialized back with declarative section
let toml_str = toml::to_string_pretty(&config).unwrap();
let parsed: Config = toml::from_str(&toml_str).unwrap();
assert!(parsed.declarative.projects.is_empty());
}
#[test]
fn test_environment_override() {
// Test environment variable parsing directly
unsafe {
env::set_var("FC_DATABASE__URL", "postgresql://test:test@localhost/test");
env::set_var("FC_SERVER__PORT", "8080");
}
// Test that environment variables are being read correctly
let db_url = std::env::var("FC_DATABASE__URL").unwrap();
let server_port = std::env::var("FC_SERVER__PORT").unwrap();
assert_eq!(db_url, "postgresql://test:test@localhost/test");
assert_eq!(server_port, "8080");
unsafe {
env::remove_var("FC_DATABASE__URL");
env::remove_var("FC_SERVER__PORT");
}
assert_eq!(db_url, "postgresql://test:test@localhost/test");
assert_eq!(server_port, "8080");
unsafe {
env::remove_var("FC_DATABASE__URL");
env::remove_var("FC_SERVER__PORT");
}
}
}

View file

@ -1,63 +1,65 @@
//! Database connection and pool management
use crate::config::DatabaseConfig;
use sqlx::{PgPool, Row, postgres::PgPoolOptions};
use std::time::Duration;
use sqlx::{PgPool, Row, postgres::PgPoolOptions};
use tracing::{debug, info, warn};
use crate::config::DatabaseConfig;
pub struct Database {
pool: PgPool,
pool: PgPool,
}
impl Database {
pub async fn new(config: DatabaseConfig) -> anyhow::Result<Self> {
info!("Initializing database connection pool");
pub async fn new(config: DatabaseConfig) -> anyhow::Result<Self> {
info!("Initializing database connection pool");
let pool = PgPoolOptions::new()
.max_connections(config.max_connections)
.min_connections(config.min_connections)
.acquire_timeout(Duration::from_secs(config.connect_timeout))
.idle_timeout(Duration::from_secs(config.idle_timeout))
.max_lifetime(Duration::from_secs(config.max_lifetime))
.connect(&config.url)
.await?;
let pool = PgPoolOptions::new()
.max_connections(config.max_connections)
.min_connections(config.min_connections)
.acquire_timeout(Duration::from_secs(config.connect_timeout))
.idle_timeout(Duration::from_secs(config.idle_timeout))
.max_lifetime(Duration::from_secs(config.max_lifetime))
.connect(&config.url)
.await?;
// Test the connection
Self::health_check(&pool).await?;
// Test the connection
Self::health_check(&pool).await?;
info!("Database connection pool initialized successfully");
info!("Database connection pool initialized successfully");
Ok(Self { pool })
Ok(Self { pool })
}
#[must_use]
pub const fn pool(&self) -> &PgPool {
&self.pool
}
pub async fn health_check(pool: &PgPool) -> anyhow::Result<()> {
debug!("Performing database health check");
let result: i32 = sqlx::query_scalar("SELECT 1").fetch_one(pool).await?;
if result != 1 {
return Err(anyhow::anyhow!(
"Database health check failed: unexpected result"
));
}
#[must_use]
pub const fn pool(&self) -> &PgPool {
&self.pool
}
debug!("Database health check passed");
Ok(())
}
pub async fn health_check(pool: &PgPool) -> anyhow::Result<()> {
debug!("Performing database health check");
pub async fn close(&self) {
info!("Closing database connection pool");
self.pool.close().await;
}
let result: i32 = sqlx::query_scalar("SELECT 1").fetch_one(pool).await?;
if result != 1 {
return Err(anyhow::anyhow!(
"Database health check failed: unexpected result"
));
}
debug!("Database health check passed");
Ok(())
}
pub async fn close(&self) {
info!("Closing database connection pool");
self.pool.close().await;
}
pub async fn get_connection_info(&self) -> anyhow::Result<ConnectionInfo> {
let row = sqlx::query(
r"
pub async fn get_connection_info(&self) -> anyhow::Result<ConnectionInfo> {
let row = sqlx::query(
r"
SELECT
current_database() as database,
current_user as user,
@ -65,81 +67,81 @@ impl Database {
inet_server_addr() as server_ip,
inet_server_port() as server_port
",
)
.fetch_one(&self.pool)
.await?;
)
.fetch_one(&self.pool)
.await?;
Ok(ConnectionInfo {
database: row.get("database"),
user: row.get("user"),
version: row.get("version"),
server_ip: row.get("server_ip"),
server_port: row.get("server_port"),
})
}
pub async fn get_pool_stats(&self) -> PoolStats {
let pool = &self.pool;
PoolStats {
size: pool.size(),
idle: pool.num_idle() as u32,
active: (pool.size() - pool.num_idle() as u32),
}
Ok(ConnectionInfo {
database: row.get("database"),
user: row.get("user"),
version: row.get("version"),
server_ip: row.get("server_ip"),
server_port: row.get("server_port"),
})
}
pub async fn get_pool_stats(&self) -> PoolStats {
let pool = &self.pool;
PoolStats {
size: pool.size(),
idle: pool.num_idle() as u32,
active: (pool.size() - pool.num_idle() as u32),
}
}
}
#[derive(Debug, Clone)]
pub struct ConnectionInfo {
pub database: String,
pub user: String,
pub version: String,
pub server_ip: Option<String>,
pub server_port: Option<i32>,
pub database: String,
pub user: String,
pub version: String,
pub server_ip: Option<String>,
pub server_port: Option<i32>,
}
#[derive(Debug, Clone)]
pub struct PoolStats {
pub size: u32,
pub idle: u32,
pub active: u32,
pub size: u32,
pub idle: u32,
pub active: u32,
}
impl Drop for Database {
fn drop(&mut self) {
warn!("Database connection pool dropped without explicit close");
}
fn drop(&mut self) {
warn!("Database connection pool dropped without explicit close");
}
}
#[cfg(test)]
mod tests {
use super::*;
use super::*;
#[test]
fn test_pool_stats() {
let stats = PoolStats {
size: 10,
idle: 3,
active: 7,
};
#[test]
fn test_pool_stats() {
let stats = PoolStats {
size: 10,
idle: 3,
active: 7,
};
assert_eq!(stats.size, 10);
assert_eq!(stats.idle, 3);
assert_eq!(stats.active, 7);
}
assert_eq!(stats.size, 10);
assert_eq!(stats.idle, 3);
assert_eq!(stats.active, 7);
}
#[test]
fn test_connection_info() {
let info = ConnectionInfo {
database: "test_db".to_string(),
user: "test_user".to_string(),
version: "PostgreSQL 14.0".to_string(),
server_ip: Some("127.0.0.1".to_string()),
server_port: Some(5432),
};
#[test]
fn test_connection_info() {
let info = ConnectionInfo {
database: "test_db".to_string(),
user: "test_user".to_string(),
version: "PostgreSQL 14.0".to_string(),
server_ip: Some("127.0.0.1".to_string()),
server_port: Some(5432),
};
assert_eq!(info.database, "test_db");
assert_eq!(info.user, "test_user");
assert_eq!(info.server_port, Some(5432));
}
assert_eq!(info.database, "test_db");
assert_eq!(info.user, "test_user");
assert_eq!(info.server_port, Some(5432));
}
}

View file

@ -4,44 +4,44 @@ use thiserror::Error;
#[derive(Error, Debug)]
pub enum CiError {
#[error("Database error: {0}")]
Database(#[from] sqlx::Error),
#[error("Database error: {0}")]
Database(#[from] sqlx::Error),
#[error("Git error: {0}")]
Git(#[from] git2::Error),
#[error("Git error: {0}")]
Git(#[from] git2::Error),
#[error("Serialization error: {0}")]
Serialization(#[from] serde_json::Error),
#[error("Serialization error: {0}")]
Serialization(#[from] serde_json::Error),
#[error("IO error: {0}")]
Io(#[from] std::io::Error),
#[error("IO error: {0}")]
Io(#[from] std::io::Error),
#[error("Configuration error: {0}")]
Config(String),
#[error("Configuration error: {0}")]
Config(String),
#[error("Build error: {0}")]
Build(String),
#[error("Build error: {0}")]
Build(String),
#[error("Not found: {0}")]
NotFound(String),
#[error("Not found: {0}")]
NotFound(String),
#[error("Validation error: {0}")]
Validation(String),
#[error("Validation error: {0}")]
Validation(String),
#[error("Conflict: {0}")]
Conflict(String),
#[error("Conflict: {0}")]
Conflict(String),
#[error("Timeout: {0}")]
Timeout(String),
#[error("Timeout: {0}")]
Timeout(String),
#[error("Nix evaluation error: {0}")]
NixEval(String),
#[error("Nix evaluation error: {0}")]
NixEval(String),
#[error("Unauthorized: {0}")]
Unauthorized(String),
#[error("Unauthorized: {0}")]
Unauthorized(String),
#[error("Forbidden: {0}")]
Forbidden(String),
#[error("Forbidden: {0}")]
Forbidden(String),
}
pub type Result<T> = std::result::Result<T, CiError>;

View file

@ -1,103 +1,113 @@
//! GC root management - prevents nix-store --gc from deleting build outputs
use std::os::unix::fs::symlink;
use std::path::{Path, PathBuf};
use std::time::Duration;
use std::{
os::unix::fs::symlink,
path::{Path, PathBuf},
time::Duration,
};
use tracing::{info, warn};
/// Remove GC root symlinks with mtime older than max_age. Returns count removed.
pub fn cleanup_old_roots(roots_dir: &Path, max_age: Duration) -> std::io::Result<u64> {
if !roots_dir.exists() {
return Ok(0);
/// Remove GC root symlinks with mtime older than max_age. Returns count
/// removed.
pub fn cleanup_old_roots(
roots_dir: &Path,
max_age: Duration,
) -> std::io::Result<u64> {
if !roots_dir.exists() {
return Ok(0);
}
let mut count = 0u64;
let now = std::time::SystemTime::now();
for entry in std::fs::read_dir(roots_dir)? {
let entry = entry?;
let metadata = match entry.metadata() {
Ok(m) => m,
Err(_) => continue,
};
let modified = match metadata.modified() {
Ok(t) => t,
Err(_) => continue,
};
if let Ok(age) = now.duration_since(modified)
&& age > max_age
{
if let Err(e) = std::fs::remove_file(entry.path()) {
warn!(
"Failed to remove old GC root {}: {e}",
entry.path().display()
);
} else {
count += 1;
}
}
}
let mut count = 0u64;
let now = std::time::SystemTime::now();
for entry in std::fs::read_dir(roots_dir)? {
let entry = entry?;
let metadata = match entry.metadata() {
Ok(m) => m,
Err(_) => continue,
};
let modified = match metadata.modified() {
Ok(t) => t,
Err(_) => continue,
};
if let Ok(age) = now.duration_since(modified)
&& age > max_age {
if let Err(e) = std::fs::remove_file(entry.path()) {
warn!(
"Failed to remove old GC root {}: {e}",
entry.path().display()
);
} else {
count += 1;
}
}
}
Ok(count)
Ok(count)
}
pub struct GcRoots {
roots_dir: PathBuf,
enabled: bool,
roots_dir: PathBuf,
enabled: bool,
}
impl GcRoots {
pub fn new(roots_dir: PathBuf, enabled: bool) -> std::io::Result<Self> {
if enabled {
std::fs::create_dir_all(&roots_dir)?;
#[cfg(unix)]
{
use std::os::unix::fs::PermissionsExt;
std::fs::set_permissions(&roots_dir, std::fs::Permissions::from_mode(0o700))?;
}
}
Ok(Self { roots_dir, enabled })
pub fn new(roots_dir: PathBuf, enabled: bool) -> std::io::Result<Self> {
if enabled {
std::fs::create_dir_all(&roots_dir)?;
#[cfg(unix)]
{
use std::os::unix::fs::PermissionsExt;
std::fs::set_permissions(
&roots_dir,
std::fs::Permissions::from_mode(0o700),
)?;
}
}
Ok(Self { roots_dir, enabled })
}
/// Register a GC root for a build output. Returns the symlink path.
pub fn register(
&self,
build_id: &uuid::Uuid,
output_path: &str,
) -> std::io::Result<Option<PathBuf>> {
if !self.enabled {
return Ok(None);
}
if !crate::validate::is_valid_store_path(output_path) {
return Err(std::io::Error::new(
std::io::ErrorKind::InvalidInput,
format!("Invalid store path: {output_path}"),
));
}
let link_path = self.roots_dir.join(build_id.to_string());
// Remove existing symlink if present
if link_path.exists() || link_path.symlink_metadata().is_ok() {
std::fs::remove_file(&link_path)?;
}
symlink(output_path, &link_path)?;
info!(build_id = %build_id, output = output_path, "Registered GC root");
Ok(Some(link_path))
/// Register a GC root for a build output. Returns the symlink path.
pub fn register(
&self,
build_id: &uuid::Uuid,
output_path: &str,
) -> std::io::Result<Option<PathBuf>> {
if !self.enabled {
return Ok(None);
}
if !crate::validate::is_valid_store_path(output_path) {
return Err(std::io::Error::new(
std::io::ErrorKind::InvalidInput,
format!("Invalid store path: {output_path}"),
));
}
let link_path = self.roots_dir.join(build_id.to_string());
// Remove existing symlink if present
if link_path.exists() || link_path.symlink_metadata().is_ok() {
std::fs::remove_file(&link_path)?;
}
symlink(output_path, &link_path)?;
info!(build_id = %build_id, output = output_path, "Registered GC root");
Ok(Some(link_path))
}
/// Remove a GC root for a build.
pub fn remove(&self, build_id: &uuid::Uuid) {
if !self.enabled {
return;
}
let link_path = self.roots_dir.join(build_id.to_string());
if let Err(e) = std::fs::remove_file(&link_path) {
if e.kind() != std::io::ErrorKind::NotFound {
warn!(build_id = %build_id, "Failed to remove GC root: {e}");
}
} else {
info!(build_id = %build_id, "Removed GC root");
}
/// Remove a GC root for a build.
pub fn remove(&self, build_id: &uuid::Uuid) {
if !self.enabled {
return;
}
let link_path = self.roots_dir.join(build_id.to_string());
if let Err(e) = std::fs::remove_file(&link_path) {
if e.kind() != std::io::ErrorKind::NotFound {
warn!(build_id = %build_id, "Failed to remove GC root: {e}");
}
} else {
info!(build_id = %build_id, "Removed GC root");
}
}
}

View file

@ -5,64 +5,64 @@ use std::path::PathBuf;
use uuid::Uuid;
pub struct LogStorage {
log_dir: PathBuf,
log_dir: PathBuf,
}
impl LogStorage {
pub fn new(log_dir: PathBuf) -> std::io::Result<Self> {
std::fs::create_dir_all(&log_dir)?;
Ok(Self { log_dir })
}
pub fn new(log_dir: PathBuf) -> std::io::Result<Self> {
std::fs::create_dir_all(&log_dir)?;
Ok(Self { log_dir })
}
/// Returns the filesystem path where a build's log should be stored
pub fn log_path(&self, build_id: &Uuid) -> PathBuf {
self.log_dir.join(format!("{}.log", build_id))
}
/// Returns the filesystem path where a build's log should be stored
pub fn log_path(&self, build_id: &Uuid) -> PathBuf {
self.log_dir.join(format!("{}.log", build_id))
}
/// Returns the filesystem path for an active (in-progress) build log
pub fn log_path_for_active(&self, build_id: &Uuid) -> PathBuf {
self.log_dir.join(format!("{}.active.log", build_id))
}
/// Returns the filesystem path for an active (in-progress) build log
pub fn log_path_for_active(&self, build_id: &Uuid) -> PathBuf {
self.log_dir.join(format!("{}.active.log", build_id))
}
/// Write build log content to file
pub fn write_log(
&self,
build_id: &Uuid,
stdout: &str,
stderr: &str,
) -> std::io::Result<PathBuf> {
let path = self.log_path(build_id);
let mut content = String::new();
if !stdout.is_empty() {
content.push_str(stdout);
}
if !stderr.is_empty() {
if !content.is_empty() {
content.push('\n');
}
content.push_str(stderr);
}
std::fs::write(&path, &content)?;
tracing::debug!(build_id = %build_id, path = %path.display(), "Wrote build log");
Ok(path)
/// Write build log content to file
pub fn write_log(
&self,
build_id: &Uuid,
stdout: &str,
stderr: &str,
) -> std::io::Result<PathBuf> {
let path = self.log_path(build_id);
let mut content = String::new();
if !stdout.is_empty() {
content.push_str(stdout);
}
if !stderr.is_empty() {
if !content.is_empty() {
content.push('\n');
}
content.push_str(stderr);
}
std::fs::write(&path, &content)?;
tracing::debug!(build_id = %build_id, path = %path.display(), "Wrote build log");
Ok(path)
}
/// Read a build log from disk. Returns None if the file doesn't exist.
pub fn read_log(&self, build_id: &Uuid) -> std::io::Result<Option<String>> {
let path = self.log_path(build_id);
if !path.exists() {
return Ok(None);
}
let content = std::fs::read_to_string(&path)?;
Ok(Some(content))
/// Read a build log from disk. Returns None if the file doesn't exist.
pub fn read_log(&self, build_id: &Uuid) -> std::io::Result<Option<String>> {
let path = self.log_path(build_id);
if !path.exists() {
return Ok(None);
}
let content = std::fs::read_to_string(&path)?;
Ok(Some(content))
}
/// Delete a build log
pub fn delete_log(&self, build_id: &Uuid) -> std::io::Result<()> {
let path = self.log_path(build_id);
if path.exists() {
std::fs::remove_file(&path)?;
}
Ok(())
/// Delete a build log
pub fn delete_log(&self, build_id: &Uuid) -> std::io::Result<()> {
let path = self.log_path(build_id);
if path.exists() {
std::fs::remove_file(&path)?;
}
Ok(())
}
}

View file

@ -5,65 +5,65 @@ use tracing::{error, info, warn};
/// Runs database migrations and ensures the database exists
pub async fn run_migrations(database_url: &str) -> anyhow::Result<()> {
info!("Starting database migrations");
info!("Starting database migrations");
// Check if database exists, create if it doesn't
if !Postgres::database_exists(database_url).await? {
warn!("Database does not exist, creating it");
Postgres::create_database(database_url).await?;
info!("Database created successfully");
}
// Check if database exists, create if it doesn't
if !Postgres::database_exists(database_url).await? {
warn!("Database does not exist, creating it");
Postgres::create_database(database_url).await?;
info!("Database created successfully");
}
// Set up connection pool with retry logic, then run migrations
let pool = create_connection_pool(database_url).await?;
match sqlx::migrate!("./migrations").run(&pool).await {
Ok(()) => {
info!("Database migrations completed successfully");
Ok(())
}
Err(e) => {
error!("Failed to run database migrations: {}", e);
Err(anyhow::anyhow!("Migration failed: {e}"))
}
}
// Set up connection pool with retry logic, then run migrations
let pool = create_connection_pool(database_url).await?;
match sqlx::migrate!("./migrations").run(&pool).await {
Ok(()) => {
info!("Database migrations completed successfully");
Ok(())
},
Err(e) => {
error!("Failed to run database migrations: {}", e);
Err(anyhow::anyhow!("Migration failed: {e}"))
},
}
}
/// Creates a connection pool with proper configuration
async fn create_connection_pool(database_url: &str) -> anyhow::Result<PgPool> {
let pool = PgPool::connect(database_url).await?;
let pool = PgPool::connect(database_url).await?;
// Test the connection
sqlx::query("SELECT 1").fetch_one(&pool).await?;
// Test the connection
sqlx::query("SELECT 1").fetch_one(&pool).await?;
Ok(pool)
Ok(pool)
}
/// Validates that all required tables exist and have the expected structure
pub async fn validate_schema(pool: &PgPool) -> anyhow::Result<()> {
info!("Validating database schema");
info!("Validating database schema");
let required_tables = vec![
"projects",
"jobsets",
"evaluations",
"builds",
"build_products",
"build_steps",
];
let required_tables = vec![
"projects",
"jobsets",
"evaluations",
"builds",
"build_products",
"build_steps",
];
for table in required_tables {
let result = sqlx::query_scalar::<_, i64>(
"SELECT COUNT(*) FROM information_schema.tables WHERE table_name = $1",
)
.bind(table)
.fetch_one(pool)
.await?;
for table in required_tables {
let result = sqlx::query_scalar::<_, i64>(
"SELECT COUNT(*) FROM information_schema.tables WHERE table_name = $1",
)
.bind(table)
.fetch_one(pool)
.await?;
if result == 0 {
return Err(anyhow::anyhow!("Required table '{table}' does not exist"));
}
if result == 0 {
return Err(anyhow::anyhow!("Required table '{table}' does not exist"));
}
}
info!("Database schema validation passed");
Ok(())
info!("Database schema validation passed");
Ok(())
}

View file

@ -8,78 +8,74 @@ use tracing_subscriber::fmt::init;
#[command(name = "fc-migrate")]
#[command(about = "Database migration utility for FC CI")]
pub struct Cli {
#[command(subcommand)]
pub command: Commands,
#[command(subcommand)]
pub command: Commands,
}
#[derive(Subcommand)]
pub enum Commands {
/// Run all pending migrations
Up {
/// Database connection URL
database_url: String,
},
/// Validate the current schema
Validate {
/// Database connection URL
database_url: String,
},
/// Create a new migration file
Create {
/// Migration name
#[arg(required = true)]
name: String,
},
/// Run all pending migrations
Up {
/// Database connection URL
database_url: String,
},
/// Validate the current schema
Validate {
/// Database connection URL
database_url: String,
},
/// Create a new migration file
Create {
/// Migration name
#[arg(required = true)]
name: String,
},
}
pub async fn run() -> anyhow::Result<()> {
let cli = Cli::parse();
let cli = Cli::parse();
// Initialize logging
init();
// Initialize logging
init();
match cli.command {
Commands::Up { database_url } => {
info!("Running database migrations");
crate::run_migrations(&database_url).await?;
info!("Migrations completed successfully");
}
Commands::Validate { database_url } => {
info!("Validating database schema");
let pool = sqlx::PgPool::connect(&database_url).await?;
crate::validate_schema(&pool).await?;
info!("Schema validation passed");
}
Commands::Create { name } => {
create_migration(&name)?;
}
}
match cli.command {
Commands::Up { database_url } => {
info!("Running database migrations");
crate::run_migrations(&database_url).await?;
info!("Migrations completed successfully");
},
Commands::Validate { database_url } => {
info!("Validating database schema");
let pool = sqlx::PgPool::connect(&database_url).await?;
crate::validate_schema(&pool).await?;
info!("Schema validation passed");
},
Commands::Create { name } => {
create_migration(&name)?;
},
}
Ok(())
Ok(())
}
fn create_migration(name: &str) -> anyhow::Result<()> {
use chrono::Utc;
use std::fs;
use std::fs;
let timestamp = Utc::now().format("%Y%m%d_%H%M%S");
let filename = format!("{timestamp}_{name}.sql");
let filepath = format!("crates/common/migrations/{filename}");
use chrono::Utc;
let content = format!(
"-- Migration: {}\n\
-- Created: {}\n\
\n\
-- Add your migration SQL here\n\
\n\
-- Uncomment below for rollback SQL\n\
-- ROLLBACK;\n",
name,
Utc::now().to_rfc3339()
);
let timestamp = Utc::now().format("%Y%m%d_%H%M%S");
let filename = format!("{timestamp}_{name}.sql");
let filepath = format!("crates/common/migrations/{filename}");
fs::write(&filepath, content)?;
println!("Created migration file: {filepath}");
let content = format!(
"-- Migration: {}\n-- Created: {}\n\n-- Add your migration SQL here\n\n-- \
Uncomment below for rollback SQL\n-- ROLLBACK;\n",
name,
Utc::now().to_rfc3339()
);
Ok(())
fs::write(&filepath, content)?;
println!("Created migration file: {filepath}");
Ok(())
}

View file

@ -7,395 +7,395 @@ use uuid::Uuid;
#[derive(Debug, Clone, Serialize, Deserialize, FromRow)]
pub struct Project {
pub id: Uuid,
pub name: String,
pub description: Option<String>,
pub repository_url: String,
pub created_at: DateTime<Utc>,
pub updated_at: DateTime<Utc>,
pub id: Uuid,
pub name: String,
pub description: Option<String>,
pub repository_url: String,
pub created_at: DateTime<Utc>,
pub updated_at: DateTime<Utc>,
}
#[derive(Debug, Clone, Serialize, Deserialize, FromRow)]
pub struct Jobset {
pub id: Uuid,
pub project_id: Uuid,
pub name: String,
pub nix_expression: String,
pub enabled: bool,
pub flake_mode: bool,
pub check_interval: i32,
pub branch: Option<String>,
pub scheduling_shares: i32,
pub created_at: DateTime<Utc>,
pub updated_at: DateTime<Utc>,
pub id: Uuid,
pub project_id: Uuid,
pub name: String,
pub nix_expression: String,
pub enabled: bool,
pub flake_mode: bool,
pub check_interval: i32,
pub branch: Option<String>,
pub scheduling_shares: i32,
pub created_at: DateTime<Utc>,
pub updated_at: DateTime<Utc>,
}
#[derive(Debug, Clone, Serialize, Deserialize, FromRow)]
pub struct Evaluation {
pub id: Uuid,
pub jobset_id: Uuid,
pub commit_hash: String,
pub evaluation_time: DateTime<Utc>,
pub status: EvaluationStatus,
pub error_message: Option<String>,
pub inputs_hash: Option<String>,
pub id: Uuid,
pub jobset_id: Uuid,
pub commit_hash: String,
pub evaluation_time: DateTime<Utc>,
pub status: EvaluationStatus,
pub error_message: Option<String>,
pub inputs_hash: Option<String>,
}
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, sqlx::Type)]
#[sqlx(type_name = "text", rename_all = "lowercase")]
pub enum EvaluationStatus {
Pending,
Running,
Completed,
Failed,
Pending,
Running,
Completed,
Failed,
}
#[derive(Debug, Clone, Serialize, Deserialize, FromRow)]
pub struct Build {
pub id: Uuid,
pub evaluation_id: Uuid,
pub job_name: String,
pub drv_path: String,
pub status: BuildStatus,
pub started_at: Option<DateTime<Utc>>,
pub completed_at: Option<DateTime<Utc>>,
pub log_path: Option<String>,
pub build_output_path: Option<String>,
pub error_message: Option<String>,
pub system: Option<String>,
pub priority: i32,
pub retry_count: i32,
pub max_retries: i32,
pub notification_pending_since: Option<DateTime<Utc>>,
pub log_url: Option<String>,
pub created_at: DateTime<Utc>,
pub outputs: Option<serde_json::Value>,
pub is_aggregate: bool,
pub constituents: Option<serde_json::Value>,
pub builder_id: Option<Uuid>,
pub signed: bool,
pub id: Uuid,
pub evaluation_id: Uuid,
pub job_name: String,
pub drv_path: String,
pub status: BuildStatus,
pub started_at: Option<DateTime<Utc>>,
pub completed_at: Option<DateTime<Utc>>,
pub log_path: Option<String>,
pub build_output_path: Option<String>,
pub error_message: Option<String>,
pub system: Option<String>,
pub priority: i32,
pub retry_count: i32,
pub max_retries: i32,
pub notification_pending_since: Option<DateTime<Utc>>,
pub log_url: Option<String>,
pub created_at: DateTime<Utc>,
pub outputs: Option<serde_json::Value>,
pub is_aggregate: bool,
pub constituents: Option<serde_json::Value>,
pub builder_id: Option<Uuid>,
pub signed: bool,
}
#[derive(Debug, Clone, Serialize, Deserialize, sqlx::Type, PartialEq)]
#[sqlx(type_name = "text", rename_all = "lowercase")]
pub enum BuildStatus {
Pending,
Running,
Completed,
Failed,
Cancelled,
Pending,
Running,
Completed,
Failed,
Cancelled,
}
#[derive(Debug, Clone, Serialize, Deserialize, FromRow)]
pub struct BuildProduct {
pub id: Uuid,
pub build_id: Uuid,
pub name: String,
pub path: String,
pub sha256_hash: Option<String>,
pub file_size: Option<i64>,
pub content_type: Option<String>,
pub is_directory: bool,
pub gc_root_path: Option<String>,
pub created_at: DateTime<Utc>,
pub id: Uuid,
pub build_id: Uuid,
pub name: String,
pub path: String,
pub sha256_hash: Option<String>,
pub file_size: Option<i64>,
pub content_type: Option<String>,
pub is_directory: bool,
pub gc_root_path: Option<String>,
pub created_at: DateTime<Utc>,
}
#[derive(Debug, Clone, Serialize, Deserialize, FromRow)]
pub struct BuildStep {
pub id: Uuid,
pub build_id: Uuid,
pub step_number: i32,
pub command: String,
pub output: Option<String>,
pub error_output: Option<String>,
pub started_at: DateTime<Utc>,
pub completed_at: Option<DateTime<Utc>>,
pub exit_code: Option<i32>,
pub id: Uuid,
pub build_id: Uuid,
pub step_number: i32,
pub command: String,
pub output: Option<String>,
pub error_output: Option<String>,
pub started_at: DateTime<Utc>,
pub completed_at: Option<DateTime<Utc>>,
pub exit_code: Option<i32>,
}
#[derive(Debug, Clone, Serialize, Deserialize, FromRow)]
pub struct BuildDependency {
pub id: Uuid,
pub build_id: Uuid,
pub dependency_build_id: Uuid,
pub id: Uuid,
pub build_id: Uuid,
pub dependency_build_id: Uuid,
}
/// Active jobset view — enabled jobsets joined with project info.
#[derive(Debug, Clone, Serialize, Deserialize, FromRow)]
pub struct ActiveJobset {
pub id: Uuid,
pub project_id: Uuid,
pub name: String,
pub nix_expression: String,
pub enabled: bool,
pub flake_mode: bool,
pub check_interval: i32,
pub branch: Option<String>,
pub scheduling_shares: i32,
pub created_at: DateTime<Utc>,
pub updated_at: DateTime<Utc>,
pub project_name: String,
pub repository_url: String,
pub id: Uuid,
pub project_id: Uuid,
pub name: String,
pub nix_expression: String,
pub enabled: bool,
pub flake_mode: bool,
pub check_interval: i32,
pub branch: Option<String>,
pub scheduling_shares: i32,
pub created_at: DateTime<Utc>,
pub updated_at: DateTime<Utc>,
pub project_name: String,
pub repository_url: String,
}
/// Build statistics from the build_stats view.
#[derive(Debug, Clone, Serialize, Deserialize, FromRow, Default)]
pub struct BuildStats {
pub total_builds: Option<i64>,
pub completed_builds: Option<i64>,
pub failed_builds: Option<i64>,
pub running_builds: Option<i64>,
pub pending_builds: Option<i64>,
pub avg_duration_seconds: Option<f64>,
pub total_builds: Option<i64>,
pub completed_builds: Option<i64>,
pub failed_builds: Option<i64>,
pub running_builds: Option<i64>,
pub pending_builds: Option<i64>,
pub avg_duration_seconds: Option<f64>,
}
/// API key for authentication.
#[derive(Debug, Clone, Serialize, Deserialize, FromRow)]
pub struct ApiKey {
pub id: Uuid,
pub name: String,
pub key_hash: String,
pub role: String,
pub created_at: DateTime<Utc>,
pub last_used_at: Option<DateTime<Utc>>,
pub id: Uuid,
pub name: String,
pub key_hash: String,
pub role: String,
pub created_at: DateTime<Utc>,
pub last_used_at: Option<DateTime<Utc>>,
}
/// Webhook configuration for a project.
#[derive(Debug, Clone, Serialize, Deserialize, FromRow)]
pub struct WebhookConfig {
pub id: Uuid,
pub project_id: Uuid,
pub forge_type: String,
pub secret_hash: Option<String>,
pub enabled: bool,
pub created_at: DateTime<Utc>,
pub id: Uuid,
pub project_id: Uuid,
pub forge_type: String,
pub secret_hash: Option<String>,
pub enabled: bool,
pub created_at: DateTime<Utc>,
}
/// Notification configuration for a project.
#[derive(Debug, Clone, Serialize, Deserialize, FromRow)]
pub struct NotificationConfig {
pub id: Uuid,
pub project_id: Uuid,
pub notification_type: String,
pub config: serde_json::Value,
pub enabled: bool,
pub created_at: DateTime<Utc>,
pub id: Uuid,
pub project_id: Uuid,
pub notification_type: String,
pub config: serde_json::Value,
pub enabled: bool,
pub created_at: DateTime<Utc>,
}
/// Jobset input definition.
#[derive(Debug, Clone, Serialize, Deserialize, FromRow)]
pub struct JobsetInput {
pub id: Uuid,
pub jobset_id: Uuid,
pub name: String,
pub input_type: String,
pub value: String,
pub revision: Option<String>,
pub created_at: DateTime<Utc>,
pub id: Uuid,
pub jobset_id: Uuid,
pub name: String,
pub input_type: String,
pub value: String,
pub revision: Option<String>,
pub created_at: DateTime<Utc>,
}
/// Release channel — tracks the latest "good" evaluation for a jobset.
#[derive(Debug, Clone, Serialize, Deserialize, FromRow)]
pub struct Channel {
pub id: Uuid,
pub project_id: Uuid,
pub name: String,
pub jobset_id: Uuid,
pub current_evaluation_id: Option<Uuid>,
pub created_at: DateTime<Utc>,
pub updated_at: DateTime<Utc>,
pub id: Uuid,
pub project_id: Uuid,
pub name: String,
pub jobset_id: Uuid,
pub current_evaluation_id: Option<Uuid>,
pub created_at: DateTime<Utc>,
pub updated_at: DateTime<Utc>,
}
/// Remote builder for multi-machine / multi-arch builds.
#[derive(Debug, Clone, Serialize, Deserialize, FromRow)]
pub struct RemoteBuilder {
pub id: Uuid,
pub name: String,
pub ssh_uri: String,
pub systems: Vec<String>,
pub max_jobs: i32,
pub speed_factor: i32,
pub supported_features: Vec<String>,
pub mandatory_features: Vec<String>,
pub enabled: bool,
pub public_host_key: Option<String>,
pub ssh_key_file: Option<String>,
pub created_at: DateTime<Utc>,
pub id: Uuid,
pub name: String,
pub ssh_uri: String,
pub systems: Vec<String>,
pub max_jobs: i32,
pub speed_factor: i32,
pub supported_features: Vec<String>,
pub mandatory_features: Vec<String>,
pub enabled: bool,
pub public_host_key: Option<String>,
pub ssh_key_file: Option<String>,
pub created_at: DateTime<Utc>,
}
// --- Pagination ---
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PaginationParams {
pub limit: Option<i64>,
pub offset: Option<i64>,
pub limit: Option<i64>,
pub offset: Option<i64>,
}
impl PaginationParams {
pub fn limit(&self) -> i64 {
self.limit.unwrap_or(50).min(200).max(1)
}
pub fn limit(&self) -> i64 {
self.limit.unwrap_or(50).min(200).max(1)
}
pub fn offset(&self) -> i64 {
self.offset.unwrap_or(0).max(0)
}
pub fn offset(&self) -> i64 {
self.offset.unwrap_or(0).max(0)
}
}
impl Default for PaginationParams {
fn default() -> Self {
Self {
limit: Some(50),
offset: Some(0),
}
fn default() -> Self {
Self {
limit: Some(50),
offset: Some(0),
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PaginatedResponse<T> {
pub items: Vec<T>,
pub total: i64,
pub limit: i64,
pub offset: i64,
pub items: Vec<T>,
pub total: i64,
pub limit: i64,
pub offset: i64,
}
// --- DTO structs for creation and updates ---
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct CreateProject {
pub name: String,
pub description: Option<String>,
pub repository_url: String,
pub name: String,
pub description: Option<String>,
pub repository_url: String,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct UpdateProject {
pub name: Option<String>,
pub description: Option<String>,
pub repository_url: Option<String>,
pub name: Option<String>,
pub description: Option<String>,
pub repository_url: Option<String>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct CreateJobset {
pub project_id: Uuid,
pub name: String,
pub nix_expression: String,
pub enabled: Option<bool>,
pub flake_mode: Option<bool>,
pub check_interval: Option<i32>,
pub branch: Option<String>,
pub scheduling_shares: Option<i32>,
pub project_id: Uuid,
pub name: String,
pub nix_expression: String,
pub enabled: Option<bool>,
pub flake_mode: Option<bool>,
pub check_interval: Option<i32>,
pub branch: Option<String>,
pub scheduling_shares: Option<i32>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct UpdateJobset {
pub name: Option<String>,
pub nix_expression: Option<String>,
pub enabled: Option<bool>,
pub flake_mode: Option<bool>,
pub check_interval: Option<i32>,
pub branch: Option<String>,
pub scheduling_shares: Option<i32>,
pub name: Option<String>,
pub nix_expression: Option<String>,
pub enabled: Option<bool>,
pub flake_mode: Option<bool>,
pub check_interval: Option<i32>,
pub branch: Option<String>,
pub scheduling_shares: Option<i32>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct CreateEvaluation {
pub jobset_id: Uuid,
pub commit_hash: String,
pub jobset_id: Uuid,
pub commit_hash: String,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct CreateBuild {
pub evaluation_id: Uuid,
pub job_name: String,
pub drv_path: String,
pub system: Option<String>,
pub outputs: Option<serde_json::Value>,
pub is_aggregate: Option<bool>,
pub constituents: Option<serde_json::Value>,
pub evaluation_id: Uuid,
pub job_name: String,
pub drv_path: String,
pub system: Option<String>,
pub outputs: Option<serde_json::Value>,
pub is_aggregate: Option<bool>,
pub constituents: Option<serde_json::Value>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct CreateBuildProduct {
pub build_id: Uuid,
pub name: String,
pub path: String,
pub sha256_hash: Option<String>,
pub file_size: Option<i64>,
pub content_type: Option<String>,
pub is_directory: bool,
pub build_id: Uuid,
pub name: String,
pub path: String,
pub sha256_hash: Option<String>,
pub file_size: Option<i64>,
pub content_type: Option<String>,
pub is_directory: bool,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct CreateBuildStep {
pub build_id: Uuid,
pub step_number: i32,
pub command: String,
pub build_id: Uuid,
pub step_number: i32,
pub command: String,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct CreateWebhookConfig {
pub project_id: Uuid,
pub forge_type: String,
pub secret: Option<String>,
pub project_id: Uuid,
pub forge_type: String,
pub secret: Option<String>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct CreateNotificationConfig {
pub project_id: Uuid,
pub notification_type: String,
pub config: serde_json::Value,
pub project_id: Uuid,
pub notification_type: String,
pub config: serde_json::Value,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct CreateChannel {
pub project_id: Uuid,
pub name: String,
pub jobset_id: Uuid,
pub project_id: Uuid,
pub name: String,
pub jobset_id: Uuid,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct UpdateChannel {
pub name: Option<String>,
pub jobset_id: Option<Uuid>,
pub name: Option<String>,
pub jobset_id: Option<Uuid>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct CreateRemoteBuilder {
pub name: String,
pub ssh_uri: String,
pub systems: Vec<String>,
pub max_jobs: Option<i32>,
pub speed_factor: Option<i32>,
pub supported_features: Option<Vec<String>>,
pub mandatory_features: Option<Vec<String>>,
pub public_host_key: Option<String>,
pub ssh_key_file: Option<String>,
pub name: String,
pub ssh_uri: String,
pub systems: Vec<String>,
pub max_jobs: Option<i32>,
pub speed_factor: Option<i32>,
pub supported_features: Option<Vec<String>>,
pub mandatory_features: Option<Vec<String>>,
pub public_host_key: Option<String>,
pub ssh_key_file: Option<String>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct UpdateRemoteBuilder {
pub name: Option<String>,
pub ssh_uri: Option<String>,
pub systems: Option<Vec<String>>,
pub max_jobs: Option<i32>,
pub speed_factor: Option<i32>,
pub supported_features: Option<Vec<String>>,
pub mandatory_features: Option<Vec<String>>,
pub enabled: Option<bool>,
pub public_host_key: Option<String>,
pub ssh_key_file: Option<String>,
pub name: Option<String>,
pub ssh_uri: Option<String>,
pub systems: Option<Vec<String>>,
pub max_jobs: Option<i32>,
pub speed_factor: Option<i32>,
pub supported_features: Option<Vec<String>>,
pub mandatory_features: Option<Vec<String>>,
pub enabled: Option<bool>,
pub public_host_key: Option<String>,
pub ssh_key_file: Option<String>,
}
/// Summary of system status for the admin API.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SystemStatus {
pub projects_count: i64,
pub jobsets_count: i64,
pub evaluations_count: i64,
pub builds_pending: i64,
pub builds_running: i64,
pub builds_completed: i64,
pub builds_failed: i64,
pub remote_builders: i64,
pub channels_count: i64,
pub projects_count: i64,
pub jobsets_count: i64,
pub evaluations_count: i64,
pub builds_pending: i64,
pub builds_running: i64,
pub builds_completed: i64,
pub builds_failed: i64,
pub remote_builders: i64,
pub channels_count: i64,
}

View file

@ -2,41 +2,40 @@
use serde::{Deserialize, Serialize};
use crate::CiError;
use crate::error::Result;
use crate::{CiError, error::Result};
/// Result of probing a flake repository.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct FlakeProbeResult {
pub is_flake: bool,
pub outputs: Vec<FlakeOutput>,
pub suggested_jobsets: Vec<SuggestedJobset>,
pub metadata: FlakeMetadata,
pub error: Option<String>,
pub is_flake: bool,
pub outputs: Vec<FlakeOutput>,
pub suggested_jobsets: Vec<SuggestedJobset>,
pub metadata: FlakeMetadata,
pub error: Option<String>,
}
/// A discovered flake output attribute.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct FlakeOutput {
pub path: String,
pub output_type: String,
pub systems: Vec<String>,
pub path: String,
pub output_type: String,
pub systems: Vec<String>,
}
/// A suggested jobset configuration based on discovered outputs.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SuggestedJobset {
pub name: String,
pub nix_expression: String,
pub description: String,
pub priority: u8,
pub name: String,
pub nix_expression: String,
pub description: String,
pub priority: u8,
}
/// Metadata extracted from the flake.
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
pub struct FlakeMetadata {
pub description: Option<String>,
pub url: Option<String>,
pub description: Option<String>,
pub url: Option<String>,
}
/// Maximum output size we'll parse from `nix flake show --json` (10 MB).
@ -49,383 +48,405 @@ const MAX_OUTPUT_SIZE: usize = 10 * 1024 * 1024;
/// `git+` prefix so nix clones via git rather than trying to unpack an
/// archive. URLs that are already valid flake refs are returned as-is.
fn to_flake_ref(url: &str) -> String {
let url_trimmed = url.trim().trim_end_matches('/');
let url_trimmed = url.trim().trim_end_matches('/');
// Already a flake ref (github:, gitlab:, git+, path:, sourcehut:, etc.)
if url_trimmed.contains(':')
&& !url_trimmed.starts_with("http://")
&& !url_trimmed.starts_with("https://")
{
return url_trimmed.to_string();
}
// Already a flake ref (github:, gitlab:, git+, path:, sourcehut:, etc.)
if url_trimmed.contains(':')
&& !url_trimmed.starts_with("http://")
&& !url_trimmed.starts_with("https://")
{
return url_trimmed.to_string();
}
// Extract host + path from HTTP(S) URLs
let without_scheme = url_trimmed
.strip_prefix("https://")
.or_else(|| url_trimmed.strip_prefix("http://"))
.unwrap_or(url_trimmed);
let without_dotgit = without_scheme.trim_end_matches(".git");
// Extract host + path from HTTP(S) URLs
let without_scheme = url_trimmed
.strip_prefix("https://")
.or_else(|| url_trimmed.strip_prefix("http://"))
.unwrap_or(url_trimmed);
let without_dotgit = without_scheme.trim_end_matches(".git");
// github.com/owner/repo → github:owner/repo
if let Some(path) = without_dotgit.strip_prefix("github.com/") {
return format!("github:{path}");
}
// github.com/owner/repo → github:owner/repo
if let Some(path) = without_dotgit.strip_prefix("github.com/") {
return format!("github:{path}");
}
// gitlab.com/owner/repo → gitlab:owner/repo
if let Some(path) = without_dotgit.strip_prefix("gitlab.com/") {
return format!("gitlab:{path}");
}
// gitlab.com/owner/repo → gitlab:owner/repo
if let Some(path) = without_dotgit.strip_prefix("gitlab.com/") {
return format!("gitlab:{path}");
}
// Any other HTTPS/HTTP URL: prefix with git+ so nix clones it
if url_trimmed.starts_with("https://") || url_trimmed.starts_with("http://") {
return format!("git+{url_trimmed}");
}
// Any other HTTPS/HTTP URL: prefix with git+ so nix clones it
if url_trimmed.starts_with("https://") || url_trimmed.starts_with("http://") {
return format!("git+{url_trimmed}");
}
url_trimmed.to_string()
url_trimmed.to_string()
}
/// Probe a flake repository to discover its outputs and suggest jobsets.
pub async fn probe_flake(repo_url: &str, revision: Option<&str>) -> Result<FlakeProbeResult> {
let base_ref = to_flake_ref(repo_url);
let flake_ref = if let Some(rev) = revision {
format!("{base_ref}?rev={rev}")
} else {
base_ref
};
pub async fn probe_flake(
repo_url: &str,
revision: Option<&str>,
) -> Result<FlakeProbeResult> {
let base_ref = to_flake_ref(repo_url);
let flake_ref = if let Some(rev) = revision {
format!("{base_ref}?rev={rev}")
} else {
base_ref
};
let output = tokio::time::timeout(std::time::Duration::from_secs(60), async {
tokio::process::Command::new("nix")
.args([
"--extra-experimental-features",
"nix-command flakes",
"flake",
"show",
"--json",
"--no-write-lock-file",
&flake_ref,
])
.output()
.await
let output =
tokio::time::timeout(std::time::Duration::from_secs(60), async {
tokio::process::Command::new("nix")
.args([
"--extra-experimental-features",
"nix-command flakes",
"flake",
"show",
"--json",
"--no-write-lock-file",
&flake_ref,
])
.output()
.await
})
.await
.map_err(|_| CiError::Timeout("Flake probe timed out after 60s".to_string()))?
.map_err(|e| CiError::NixEval(format!("Failed to run nix flake show: {e}")))?;
.map_err(|_| {
CiError::Timeout("Flake probe timed out after 60s".to_string())
})?
.map_err(|e| {
CiError::NixEval(format!("Failed to run nix flake show: {e}"))
})?;
if !output.status.success() {
let stderr = String::from_utf8_lossy(&output.stderr);
// Check for common non-flake case
if stderr.contains("does not provide attribute") || stderr.contains("has no 'flake.nix'") {
return Ok(FlakeProbeResult {
is_flake: false,
outputs: Vec::new(),
suggested_jobsets: Vec::new(),
metadata: FlakeMetadata::default(),
error: Some("Repository does not contain a flake.nix".to_string()),
});
}
if stderr.contains("denied")
|| stderr.contains("not accessible")
|| stderr.contains("authentication")
{
return Err(CiError::NixEval(
"Repository not accessible. Check URL and permissions.".to_string(),
));
}
return Err(CiError::NixEval(format!("nix flake show failed: {stderr}")));
}
let stdout = String::from_utf8_lossy(&output.stdout);
if stdout.len() > MAX_OUTPUT_SIZE {
// For huge repos like nixpkgs, we still parse but only top-level
tracing::warn!(
"Flake show output exceeds {}MB, parsing top-level only",
MAX_OUTPUT_SIZE / (1024 * 1024)
);
}
let raw: serde_json::Value = serde_json::from_str(&stdout[..stdout.len().min(MAX_OUTPUT_SIZE)])
.map_err(|e| CiError::NixEval(format!("Failed to parse flake show output: {e}")))?;
let top = match raw.as_object() {
Some(obj) => obj,
None => {
return Err(CiError::NixEval(
"Unexpected flake show output format".to_string(),
));
}
};
let mut outputs = Vec::new();
let mut suggested_jobsets = Vec::new();
// Known output types and their detection
let output_types: &[(&str, &str, &str, u8)] = &[
("hydraJobs", "derivation", "CI Jobs (hydraJobs)", 10),
("checks", "derivation", "Checks", 7),
("packages", "derivation", "Packages", 6),
("devShells", "derivation", "Development Shells", 3),
(
"nixosConfigurations",
"configuration",
"NixOS Configurations",
4,
if !output.status.success() {
let stderr = String::from_utf8_lossy(&output.stderr);
// Check for common non-flake case
if stderr.contains("does not provide attribute")
|| stderr.contains("has no 'flake.nix'")
{
return Ok(FlakeProbeResult {
is_flake: false,
outputs: Vec::new(),
suggested_jobsets: Vec::new(),
metadata: FlakeMetadata::default(),
error: Some(
"Repository does not contain a flake.nix".to_string(),
),
("nixosModules", "module", "NixOS Modules", 2),
("overlays", "overlay", "Overlays", 1),
(
"legacyPackages",
"derivation",
"Legacy Packages (nixpkgs-style)",
5,
),
];
for &(key, output_type, description, priority) in output_types {
if let Some(val) = top.get(key) {
let systems = extract_systems(val);
outputs.push(FlakeOutput {
path: key.to_string(),
output_type: output_type.to_string(),
systems: systems.clone(),
});
// Generate suggested jobset
let nix_expression = match key {
"hydraJobs" => "hydraJobs".to_string(),
"checks" => "checks".to_string(),
"packages" => "packages".to_string(),
"devShells" => "devShells".to_string(),
"legacyPackages" => "legacyPackages".to_string(),
_ => continue, // Don't suggest jobsets for non-buildable outputs
};
suggested_jobsets.push(SuggestedJobset {
name: key.to_string(),
nix_expression,
description: description.to_string(),
priority,
});
}
});
}
if stderr.contains("denied")
|| stderr.contains("not accessible")
|| stderr.contains("authentication")
{
return Err(CiError::NixEval(
"Repository not accessible. Check URL and permissions.".to_string(),
));
}
return Err(CiError::NixEval(format!("nix flake show failed: {stderr}")));
}
// Sort jobsets by priority (highest first)
suggested_jobsets.sort_by(|a, b| b.priority.cmp(&a.priority));
let stdout = String::from_utf8_lossy(&output.stdout);
if stdout.len() > MAX_OUTPUT_SIZE {
// For huge repos like nixpkgs, we still parse but only top-level
tracing::warn!(
"Flake show output exceeds {}MB, parsing top-level only",
MAX_OUTPUT_SIZE / (1024 * 1024)
);
}
// Extract metadata from the flake
let metadata = FlakeMetadata {
description: top
.get("description")
.and_then(|v| v.as_str())
.map(|s| s.to_string()),
url: Some(repo_url.to_string()),
};
let raw: serde_json::Value =
serde_json::from_str(&stdout[..stdout.len().min(MAX_OUTPUT_SIZE)])
.map_err(|e| {
CiError::NixEval(format!("Failed to parse flake show output: {e}"))
})?;
Ok(FlakeProbeResult {
is_flake: true,
outputs,
suggested_jobsets,
metadata,
error: None,
})
let top = match raw.as_object() {
Some(obj) => obj,
None => {
return Err(CiError::NixEval(
"Unexpected flake show output format".to_string(),
));
},
};
let mut outputs = Vec::new();
let mut suggested_jobsets = Vec::new();
// Known output types and their detection
let output_types: &[(&str, &str, &str, u8)] = &[
("hydraJobs", "derivation", "CI Jobs (hydraJobs)", 10),
("checks", "derivation", "Checks", 7),
("packages", "derivation", "Packages", 6),
("devShells", "derivation", "Development Shells", 3),
(
"nixosConfigurations",
"configuration",
"NixOS Configurations",
4,
),
("nixosModules", "module", "NixOS Modules", 2),
("overlays", "overlay", "Overlays", 1),
(
"legacyPackages",
"derivation",
"Legacy Packages (nixpkgs-style)",
5,
),
];
for &(key, output_type, description, priority) in output_types {
if let Some(val) = top.get(key) {
let systems = extract_systems(val);
outputs.push(FlakeOutput {
path: key.to_string(),
output_type: output_type.to_string(),
systems: systems.clone(),
});
// Generate suggested jobset
let nix_expression = match key {
"hydraJobs" => "hydraJobs".to_string(),
"checks" => "checks".to_string(),
"packages" => "packages".to_string(),
"devShells" => "devShells".to_string(),
"legacyPackages" => "legacyPackages".to_string(),
_ => continue, // Don't suggest jobsets for non-buildable outputs
};
suggested_jobsets.push(SuggestedJobset {
name: key.to_string(),
nix_expression,
description: description.to_string(),
priority,
});
}
}
// Sort jobsets by priority (highest first)
suggested_jobsets.sort_by(|a, b| b.priority.cmp(&a.priority));
// Extract metadata from the flake
let metadata = FlakeMetadata {
description: top
.get("description")
.and_then(|v| v.as_str())
.map(|s| s.to_string()),
url: Some(repo_url.to_string()),
};
Ok(FlakeProbeResult {
is_flake: true,
outputs,
suggested_jobsets,
metadata,
error: None,
})
}
/// Extract system names from a flake output value (e.g., `packages.x86_64-linux`).
/// Extract system names from a flake output value (e.g.,
/// `packages.x86_64-linux`).
pub(crate) fn extract_systems(val: &serde_json::Value) -> Vec<String> {
let mut systems = Vec::new();
if let Some(obj) = val.as_object() {
for key in obj.keys() {
// System names follow the pattern `arch-os` (e.g., x86_64-linux, aarch64-darwin)
if key.contains('-') && (key.contains("linux") || key.contains("darwin")) {
systems.push(key.clone());
}
}
let mut systems = Vec::new();
if let Some(obj) = val.as_object() {
for key in obj.keys() {
// System names follow the pattern `arch-os` (e.g., x86_64-linux,
// aarch64-darwin)
if key.contains('-') && (key.contains("linux") || key.contains("darwin"))
{
systems.push(key.clone());
}
}
systems.sort();
systems
}
systems.sort();
systems
}
#[cfg(test)]
mod tests {
use super::*;
use serde_json::json;
use serde_json::json;
#[test]
fn test_extract_systems_typical_flake() {
let val = json!({
"x86_64-linux": { "hello": {} },
"aarch64-linux": { "hello": {} },
"x86_64-darwin": { "hello": {} }
});
let systems = extract_systems(&val);
assert_eq!(
systems,
vec!["aarch64-linux", "x86_64-darwin", "x86_64-linux"]
);
}
use super::*;
#[test]
fn test_extract_systems_empty_object() {
let val = json!({});
assert!(extract_systems(&val).is_empty());
}
#[test]
fn test_extract_systems_typical_flake() {
let val = json!({
"x86_64-linux": { "hello": {} },
"aarch64-linux": { "hello": {} },
"x86_64-darwin": { "hello": {} }
});
let systems = extract_systems(&val);
assert_eq!(systems, vec![
"aarch64-linux",
"x86_64-darwin",
"x86_64-linux"
]);
}
#[test]
fn test_extract_systems_non_system_keys_ignored() {
let val = json!({
"x86_64-linux": {},
"default": {},
"lib": {},
"overlay": {}
});
let systems = extract_systems(&val);
assert_eq!(systems, vec!["x86_64-linux"]);
}
#[test]
fn test_extract_systems_empty_object() {
let val = json!({});
assert!(extract_systems(&val).is_empty());
}
#[test]
fn test_extract_systems_non_object_value() {
let val = json!("string");
assert!(extract_systems(&val).is_empty());
#[test]
fn test_extract_systems_non_system_keys_ignored() {
let val = json!({
"x86_64-linux": {},
"default": {},
"lib": {},
"overlay": {}
});
let systems = extract_systems(&val);
assert_eq!(systems, vec!["x86_64-linux"]);
}
let val = json!(null);
assert!(extract_systems(&val).is_empty());
}
#[test]
fn test_extract_systems_non_object_value() {
let val = json!("string");
assert!(extract_systems(&val).is_empty());
#[test]
fn test_flake_probe_result_serialization() {
let result = FlakeProbeResult {
is_flake: true,
outputs: vec![FlakeOutput {
path: "packages".to_string(),
output_type: "derivation".to_string(),
systems: vec!["x86_64-linux".to_string()],
}],
suggested_jobsets: vec![SuggestedJobset {
name: "packages".to_string(),
nix_expression: "packages".to_string(),
description: "Packages".to_string(),
priority: 6,
}],
metadata: FlakeMetadata {
description: Some("A test flake".to_string()),
url: Some("https://github.com/test/repo".to_string()),
},
error: None,
};
let val = json!(null);
assert!(extract_systems(&val).is_empty());
}
let json = serde_json::to_string(&result).unwrap();
let parsed: FlakeProbeResult = serde_json::from_str(&json).unwrap();
assert!(parsed.is_flake);
assert_eq!(parsed.outputs.len(), 1);
assert_eq!(parsed.suggested_jobsets.len(), 1);
assert_eq!(parsed.suggested_jobsets[0].priority, 6);
assert_eq!(parsed.metadata.description.as_deref(), Some("A test flake"));
}
#[test]
fn test_flake_probe_result_serialization() {
let result = FlakeProbeResult {
is_flake: true,
outputs: vec![FlakeOutput {
path: "packages".to_string(),
output_type: "derivation".to_string(),
systems: vec!["x86_64-linux".to_string()],
}],
suggested_jobsets: vec![SuggestedJobset {
name: "packages".to_string(),
nix_expression: "packages".to_string(),
description: "Packages".to_string(),
priority: 6,
}],
metadata: FlakeMetadata {
description: Some("A test flake".to_string()),
url: Some("https://github.com/test/repo".to_string()),
},
error: None,
};
#[test]
fn test_flake_probe_result_not_a_flake() {
let result = FlakeProbeResult {
is_flake: false,
outputs: Vec::new(),
suggested_jobsets: Vec::new(),
metadata: FlakeMetadata::default(),
error: Some("Repository does not contain a flake.nix".to_string()),
};
let json = serde_json::to_string(&result).unwrap();
let parsed: FlakeProbeResult = serde_json::from_str(&json).unwrap();
assert!(parsed.is_flake);
assert_eq!(parsed.outputs.len(), 1);
assert_eq!(parsed.suggested_jobsets.len(), 1);
assert_eq!(parsed.suggested_jobsets[0].priority, 6);
assert_eq!(parsed.metadata.description.as_deref(), Some("A test flake"));
}
let json = serde_json::to_string(&result).unwrap();
let parsed: FlakeProbeResult = serde_json::from_str(&json).unwrap();
assert!(!parsed.is_flake);
assert!(parsed.error.is_some());
}
#[test]
fn test_flake_probe_result_not_a_flake() {
let result = FlakeProbeResult {
is_flake: false,
outputs: Vec::new(),
suggested_jobsets: Vec::new(),
metadata: FlakeMetadata::default(),
error: Some(
"Repository does not contain a flake.nix".to_string(),
),
};
#[test]
fn test_to_flake_ref_github_https() {
assert_eq!(
to_flake_ref("https://github.com/notashelf/rags"),
"github:notashelf/rags"
);
assert_eq!(
to_flake_ref("https://github.com/NixOS/nixpkgs"),
"github:NixOS/nixpkgs"
);
assert_eq!(
to_flake_ref("https://github.com/owner/repo.git"),
"github:owner/repo"
);
assert_eq!(
to_flake_ref("http://github.com/owner/repo"),
"github:owner/repo"
);
assert_eq!(
to_flake_ref("https://github.com/owner/repo/"),
"github:owner/repo"
);
}
let json = serde_json::to_string(&result).unwrap();
let parsed: FlakeProbeResult = serde_json::from_str(&json).unwrap();
assert!(!parsed.is_flake);
assert!(parsed.error.is_some());
}
#[test]
fn test_to_flake_ref_gitlab_https() {
assert_eq!(
to_flake_ref("https://gitlab.com/owner/repo"),
"gitlab:owner/repo"
);
assert_eq!(
to_flake_ref("https://gitlab.com/group/subgroup/repo.git"),
"gitlab:group/subgroup/repo"
);
}
#[test]
fn test_to_flake_ref_github_https() {
assert_eq!(
to_flake_ref("https://github.com/notashelf/rags"),
"github:notashelf/rags"
);
assert_eq!(
to_flake_ref("https://github.com/NixOS/nixpkgs"),
"github:NixOS/nixpkgs"
);
assert_eq!(
to_flake_ref("https://github.com/owner/repo.git"),
"github:owner/repo"
);
assert_eq!(
to_flake_ref("http://github.com/owner/repo"),
"github:owner/repo"
);
assert_eq!(
to_flake_ref("https://github.com/owner/repo/"),
"github:owner/repo"
);
}
#[test]
fn test_to_flake_ref_already_flake_ref() {
assert_eq!(to_flake_ref("github:owner/repo"), "github:owner/repo");
assert_eq!(to_flake_ref("gitlab:owner/repo"), "gitlab:owner/repo");
assert_eq!(
to_flake_ref("git+https://example.com/repo.git"),
"git+https://example.com/repo.git"
);
assert_eq!(
to_flake_ref("path:/some/local/path"),
"path:/some/local/path"
);
assert_eq!(to_flake_ref("sourcehut:~user/repo"), "sourcehut:~user/repo");
}
#[test]
fn test_to_flake_ref_gitlab_https() {
assert_eq!(
to_flake_ref("https://gitlab.com/owner/repo"),
"gitlab:owner/repo"
);
assert_eq!(
to_flake_ref("https://gitlab.com/group/subgroup/repo.git"),
"gitlab:group/subgroup/repo"
);
}
#[test]
fn test_to_flake_ref_other_https() {
assert_eq!(
to_flake_ref("https://codeberg.org/owner/repo"),
"git+https://codeberg.org/owner/repo"
);
assert_eq!(
to_flake_ref("https://sr.ht/~user/repo"),
"git+https://sr.ht/~user/repo"
);
}
#[test]
fn test_to_flake_ref_already_flake_ref() {
assert_eq!(to_flake_ref("github:owner/repo"), "github:owner/repo");
assert_eq!(to_flake_ref("gitlab:owner/repo"), "gitlab:owner/repo");
assert_eq!(
to_flake_ref("git+https://example.com/repo.git"),
"git+https://example.com/repo.git"
);
assert_eq!(
to_flake_ref("path:/some/local/path"),
"path:/some/local/path"
);
assert_eq!(to_flake_ref("sourcehut:~user/repo"), "sourcehut:~user/repo");
}
#[test]
fn test_suggested_jobset_ordering() {
let mut jobsets = vec![
SuggestedJobset {
name: "packages".to_string(),
nix_expression: "packages".to_string(),
description: "Packages".to_string(),
priority: 6,
},
SuggestedJobset {
name: "hydraJobs".to_string(),
nix_expression: "hydraJobs".to_string(),
description: "CI Jobs".to_string(),
priority: 10,
},
SuggestedJobset {
name: "checks".to_string(),
nix_expression: "checks".to_string(),
description: "Checks".to_string(),
priority: 7,
},
];
#[test]
fn test_to_flake_ref_other_https() {
assert_eq!(
to_flake_ref("https://codeberg.org/owner/repo"),
"git+https://codeberg.org/owner/repo"
);
assert_eq!(
to_flake_ref("https://sr.ht/~user/repo"),
"git+https://sr.ht/~user/repo"
);
}
jobsets.sort_by(|a, b| b.priority.cmp(&a.priority));
assert_eq!(jobsets[0].name, "hydraJobs");
assert_eq!(jobsets[1].name, "checks");
assert_eq!(jobsets[2].name, "packages");
}
#[test]
fn test_suggested_jobset_ordering() {
let mut jobsets = vec![
SuggestedJobset {
name: "packages".to_string(),
nix_expression: "packages".to_string(),
description: "Packages".to_string(),
priority: 6,
},
SuggestedJobset {
name: "hydraJobs".to_string(),
nix_expression: "hydraJobs".to_string(),
description: "CI Jobs".to_string(),
priority: 10,
},
SuggestedJobset {
name: "checks".to_string(),
nix_expression: "checks".to_string(),
description: "Checks".to_string(),
priority: 7,
},
];
jobsets.sort_by(|a, b| b.priority.cmp(&a.priority));
assert_eq!(jobsets[0].name, "hydraJobs");
assert_eq!(jobsets[1].name, "checks");
assert_eq!(jobsets[2].name, "packages");
}
}

View file

@ -1,294 +1,313 @@
//! Notification dispatch for build events
use crate::config::{EmailConfig, NotificationsConfig};
use crate::models::{Build, BuildStatus, Project};
use tracing::{error, info, warn};
use crate::{
config::{EmailConfig, NotificationsConfig},
models::{Build, BuildStatus, Project},
};
/// Dispatch all configured notifications for a completed build.
pub async fn dispatch_build_finished(
build: &Build,
project: &Project,
commit_hash: &str,
config: &NotificationsConfig,
build: &Build,
project: &Project,
commit_hash: &str,
config: &NotificationsConfig,
) {
// 1. Run command notification
if let Some(ref cmd) = config.run_command {
run_command_notification(cmd, build, project).await;
}
// 1. Run command notification
if let Some(ref cmd) = config.run_command {
run_command_notification(cmd, build, project).await;
}
// 2. GitHub commit status
if let Some(ref token) = config.github_token
&& project.repository_url.contains("github.com") {
set_github_status(token, &project.repository_url, commit_hash, build).await;
}
// 2. GitHub commit status
if let Some(ref token) = config.github_token
&& project.repository_url.contains("github.com")
{
set_github_status(token, &project.repository_url, commit_hash, build).await;
}
// 3. Gitea/Forgejo commit status
if let (Some(url), Some(token)) = (&config.gitea_url, &config.gitea_token) {
set_gitea_status(url, token, &project.repository_url, commit_hash, build).await;
}
// 3. Gitea/Forgejo commit status
if let (Some(url), Some(token)) = (&config.gitea_url, &config.gitea_token) {
set_gitea_status(url, token, &project.repository_url, commit_hash, build)
.await;
}
// 4. Email notification
if let Some(ref email_config) = config.email
&& (!email_config.on_failure_only || build.status == BuildStatus::Failed) {
send_email_notification(email_config, build, project).await;
}
// 4. Email notification
if let Some(ref email_config) = config.email
&& (!email_config.on_failure_only || build.status == BuildStatus::Failed)
{
send_email_notification(email_config, build, project).await;
}
}
async fn run_command_notification(cmd: &str, build: &Build, project: &Project) {
let status_str = match build.status {
BuildStatus::Completed => "success",
BuildStatus::Failed => "failure",
BuildStatus::Cancelled => "cancelled",
_ => "unknown",
};
let status_str = match build.status {
BuildStatus::Completed => "success",
BuildStatus::Failed => "failure",
BuildStatus::Cancelled => "cancelled",
_ => "unknown",
};
let result = tokio::process::Command::new("sh")
.arg("-c")
.arg(cmd)
.env("FC_BUILD_ID", build.id.to_string())
.env("FC_BUILD_STATUS", status_str)
.env("FC_BUILD_JOB", &build.job_name)
.env("FC_BUILD_DRV", &build.drv_path)
.env("FC_PROJECT_NAME", &project.name)
.env("FC_PROJECT_URL", &project.repository_url)
.env(
"FC_BUILD_OUTPUT",
build.build_output_path.as_deref().unwrap_or(""),
)
.output()
.await;
let result = tokio::process::Command::new("sh")
.arg("-c")
.arg(cmd)
.env("FC_BUILD_ID", build.id.to_string())
.env("FC_BUILD_STATUS", status_str)
.env("FC_BUILD_JOB", &build.job_name)
.env("FC_BUILD_DRV", &build.drv_path)
.env("FC_PROJECT_NAME", &project.name)
.env("FC_PROJECT_URL", &project.repository_url)
.env(
"FC_BUILD_OUTPUT",
build.build_output_path.as_deref().unwrap_or(""),
)
.output()
.await;
match result {
Ok(output) => {
if !output.status.success() {
let stderr = String::from_utf8_lossy(&output.stderr);
warn!(build_id = %build.id, "RunCommand failed: {stderr}");
} else {
info!(build_id = %build.id, "RunCommand completed successfully");
}
}
Err(e) => error!(build_id = %build.id, "RunCommand execution failed: {e}"),
}
match result {
Ok(output) => {
if !output.status.success() {
let stderr = String::from_utf8_lossy(&output.stderr);
warn!(build_id = %build.id, "RunCommand failed: {stderr}");
} else {
info!(build_id = %build.id, "RunCommand completed successfully");
}
},
Err(e) => error!(build_id = %build.id, "RunCommand execution failed: {e}"),
}
}
async fn set_github_status(token: &str, repo_url: &str, commit: &str, build: &Build) {
// Parse owner/repo from URL
let (owner, repo) = match parse_github_repo(repo_url) {
Some(v) => v,
None => {
warn!("Cannot parse GitHub owner/repo from {repo_url}");
return;
}
};
async fn set_github_status(
token: &str,
repo_url: &str,
commit: &str,
build: &Build,
) {
// Parse owner/repo from URL
let (owner, repo) = match parse_github_repo(repo_url) {
Some(v) => v,
None => {
warn!("Cannot parse GitHub owner/repo from {repo_url}");
return;
},
};
let (state, description) = match build.status {
BuildStatus::Completed => ("success", "Build succeeded"),
BuildStatus::Failed => ("failure", "Build failed"),
BuildStatus::Running => ("pending", "Build in progress"),
BuildStatus::Pending => ("pending", "Build queued"),
BuildStatus::Cancelled => ("error", "Build cancelled"),
};
let (state, description) = match build.status {
BuildStatus::Completed => ("success", "Build succeeded"),
BuildStatus::Failed => ("failure", "Build failed"),
BuildStatus::Running => ("pending", "Build in progress"),
BuildStatus::Pending => ("pending", "Build queued"),
BuildStatus::Cancelled => ("error", "Build cancelled"),
};
let url = format!("https://api.github.com/repos/{owner}/{repo}/statuses/{commit}");
let body = serde_json::json!({
"state": state,
"description": description,
"context": format!("fc/{}", build.job_name),
});
let url =
format!("https://api.github.com/repos/{owner}/{repo}/statuses/{commit}");
let body = serde_json::json!({
"state": state,
"description": description,
"context": format!("fc/{}", build.job_name),
});
let client = reqwest::Client::new();
match client
.post(&url)
.header("Authorization", format!("token {token}"))
.header("User-Agent", "fc-ci")
.header("Accept", "application/vnd.github+json")
.json(&body)
.send()
.await
{
Ok(resp) => {
if !resp.status().is_success() {
let status = resp.status();
let text = resp.text().await.unwrap_or_default();
warn!("GitHub status API returned {status}: {text}");
} else {
info!(build_id = %build.id, "Set GitHub commit status: {state}");
}
}
Err(e) => error!("GitHub status API request failed: {e}"),
}
let client = reqwest::Client::new();
match client
.post(&url)
.header("Authorization", format!("token {token}"))
.header("User-Agent", "fc-ci")
.header("Accept", "application/vnd.github+json")
.json(&body)
.send()
.await
{
Ok(resp) => {
if !resp.status().is_success() {
let status = resp.status();
let text = resp.text().await.unwrap_or_default();
warn!("GitHub status API returned {status}: {text}");
} else {
info!(build_id = %build.id, "Set GitHub commit status: {state}");
}
},
Err(e) => error!("GitHub status API request failed: {e}"),
}
}
async fn set_gitea_status(
base_url: &str,
token: &str,
repo_url: &str,
commit: &str,
build: &Build,
base_url: &str,
token: &str,
repo_url: &str,
commit: &str,
build: &Build,
) {
// Parse owner/repo from URL (try to extract from the gitea URL)
let (owner, repo) = match parse_gitea_repo(repo_url, base_url) {
Some(v) => v,
None => {
warn!("Cannot parse Gitea owner/repo from {repo_url}");
return;
}
};
// Parse owner/repo from URL (try to extract from the gitea URL)
let (owner, repo) = match parse_gitea_repo(repo_url, base_url) {
Some(v) => v,
None => {
warn!("Cannot parse Gitea owner/repo from {repo_url}");
return;
},
};
let (state, description) = match build.status {
BuildStatus::Completed => ("success", "Build succeeded"),
BuildStatus::Failed => ("failure", "Build failed"),
BuildStatus::Running => ("pending", "Build in progress"),
BuildStatus::Pending => ("pending", "Build queued"),
BuildStatus::Cancelled => ("error", "Build cancelled"),
};
let (state, description) = match build.status {
BuildStatus::Completed => ("success", "Build succeeded"),
BuildStatus::Failed => ("failure", "Build failed"),
BuildStatus::Running => ("pending", "Build in progress"),
BuildStatus::Pending => ("pending", "Build queued"),
BuildStatus::Cancelled => ("error", "Build cancelled"),
};
let url = format!("{base_url}/api/v1/repos/{owner}/{repo}/statuses/{commit}");
let body = serde_json::json!({
"state": state,
"description": description,
"context": format!("fc/{}", build.job_name),
});
let url = format!("{base_url}/api/v1/repos/{owner}/{repo}/statuses/{commit}");
let body = serde_json::json!({
"state": state,
"description": description,
"context": format!("fc/{}", build.job_name),
});
let client = reqwest::Client::new();
match client
.post(&url)
.header("Authorization", format!("token {token}"))
.json(&body)
.send()
.await
{
Ok(resp) => {
if !resp.status().is_success() {
let status = resp.status();
let text = resp.text().await.unwrap_or_default();
warn!("Gitea status API returned {status}: {text}");
} else {
info!(build_id = %build.id, "Set Gitea commit status: {state}");
}
}
Err(e) => error!("Gitea status API request failed: {e}"),
}
let client = reqwest::Client::new();
match client
.post(&url)
.header("Authorization", format!("token {token}"))
.json(&body)
.send()
.await
{
Ok(resp) => {
if !resp.status().is_success() {
let status = resp.status();
let text = resp.text().await.unwrap_or_default();
warn!("Gitea status API returned {status}: {text}");
} else {
info!(build_id = %build.id, "Set Gitea commit status: {state}");
}
},
Err(e) => error!("Gitea status API request failed: {e}"),
}
}
fn parse_github_repo(url: &str) -> Option<(String, String)> {
// Handle https://github.com/owner/repo.git or git@github.com:owner/repo.git
let url = url.trim_end_matches(".git");
if let Some(rest) = url.strip_prefix("https://github.com/") {
let parts: Vec<&str> = rest.splitn(2, '/').collect();
if parts.len() == 2 {
return Some((parts[0].to_string(), parts[1].to_string()));
}
// Handle https://github.com/owner/repo.git or git@github.com:owner/repo.git
let url = url.trim_end_matches(".git");
if let Some(rest) = url.strip_prefix("https://github.com/") {
let parts: Vec<&str> = rest.splitn(2, '/').collect();
if parts.len() == 2 {
return Some((parts[0].to_string(), parts[1].to_string()));
}
if let Some(rest) = url.strip_prefix("git@github.com:") {
let parts: Vec<&str> = rest.splitn(2, '/').collect();
if parts.len() == 2 {
return Some((parts[0].to_string(), parts[1].to_string()));
}
}
if let Some(rest) = url.strip_prefix("git@github.com:") {
let parts: Vec<&str> = rest.splitn(2, '/').collect();
if parts.len() == 2 {
return Some((parts[0].to_string(), parts[1].to_string()));
}
None
}
None
}
fn parse_gitea_repo(repo_url: &str, base_url: &str) -> Option<(String, String)> {
let url = repo_url.trim_end_matches(".git");
let base = base_url.trim_end_matches('/');
if let Some(rest) = url.strip_prefix(&format!("{base}/")) {
let parts: Vec<&str> = rest.splitn(2, '/').collect();
if parts.len() == 2 {
return Some((parts[0].to_string(), parts[1].to_string()));
}
fn parse_gitea_repo(
repo_url: &str,
base_url: &str,
) -> Option<(String, String)> {
let url = repo_url.trim_end_matches(".git");
let base = base_url.trim_end_matches('/');
if let Some(rest) = url.strip_prefix(&format!("{base}/")) {
let parts: Vec<&str> = rest.splitn(2, '/').collect();
if parts.len() == 2 {
return Some((parts[0].to_string(), parts[1].to_string()));
}
None
}
None
}
async fn send_email_notification(config: &EmailConfig, build: &Build, project: &Project) {
use lettre::message::header::ContentType;
use lettre::transport::smtp::authentication::Credentials;
use lettre::{AsyncSmtpTransport, AsyncTransport, Message, Tokio1Executor};
async fn send_email_notification(
config: &EmailConfig,
build: &Build,
project: &Project,
) {
use lettre::{
AsyncSmtpTransport,
AsyncTransport,
Message,
Tokio1Executor,
message::header::ContentType,
transport::smtp::authentication::Credentials,
};
let status_str = match build.status {
BuildStatus::Completed => "SUCCESS",
BuildStatus::Failed => "FAILURE",
BuildStatus::Cancelled => "CANCELLED",
_ => "UNKNOWN",
let status_str = match build.status {
BuildStatus::Completed => "SUCCESS",
BuildStatus::Failed => "FAILURE",
BuildStatus::Cancelled => "CANCELLED",
_ => "UNKNOWN",
};
let subject = format!(
"[FC] {} - {} ({})",
status_str, build.job_name, project.name
);
let body = format!(
"Build notification from FC CI\n\nProject: {}\nJob: {}\nStatus: \
{}\nDerivation: {}\nOutput: {}\nBuild ID: {}\n",
project.name,
build.job_name,
status_str,
build.drv_path,
build.build_output_path.as_deref().unwrap_or("N/A"),
build.id,
);
for to_addr in &config.to_addresses {
let email = match Message::builder()
.from(match config.from_address.parse() {
Ok(addr) => addr,
Err(e) => {
error!("Invalid from address '{}': {e}", config.from_address);
return;
},
})
.to(match to_addr.parse() {
Ok(addr) => addr,
Err(e) => {
warn!("Invalid to address '{to_addr}': {e}");
continue;
},
})
.subject(&subject)
.header(ContentType::TEXT_PLAIN)
.body(body.clone())
{
Ok(e) => e,
Err(e) => {
error!("Failed to build email: {e}");
continue;
},
};
let subject = format!(
"[FC] {} - {} ({})",
status_str, build.job_name, project.name
);
let mut mailer_builder = if config.tls {
match AsyncSmtpTransport::<Tokio1Executor>::relay(&config.smtp_host) {
Ok(b) => b.port(config.smtp_port),
Err(e) => {
error!("Failed to create SMTP transport: {e}");
return;
},
}
} else {
AsyncSmtpTransport::<Tokio1Executor>::builder_dangerous(&config.smtp_host)
.port(config.smtp_port)
};
let body = format!(
"Build notification from FC CI\n\n\
Project: {}\n\
Job: {}\n\
Status: {}\n\
Derivation: {}\n\
Output: {}\n\
Build ID: {}\n",
project.name,
build.job_name,
status_str,
build.drv_path,
build.build_output_path.as_deref().unwrap_or("N/A"),
build.id,
);
for to_addr in &config.to_addresses {
let email = match Message::builder()
.from(match config.from_address.parse() {
Ok(addr) => addr,
Err(e) => {
error!("Invalid from address '{}': {e}", config.from_address);
return;
}
})
.to(match to_addr.parse() {
Ok(addr) => addr,
Err(e) => {
warn!("Invalid to address '{to_addr}': {e}");
continue;
}
})
.subject(&subject)
.header(ContentType::TEXT_PLAIN)
.body(body.clone())
{
Ok(e) => e,
Err(e) => {
error!("Failed to build email: {e}");
continue;
}
};
let mut mailer_builder = if config.tls {
match AsyncSmtpTransport::<Tokio1Executor>::relay(&config.smtp_host) {
Ok(b) => b.port(config.smtp_port),
Err(e) => {
error!("Failed to create SMTP transport: {e}");
return;
}
}
} else {
AsyncSmtpTransport::<Tokio1Executor>::builder_dangerous(&config.smtp_host)
.port(config.smtp_port)
};
if let (Some(user), Some(pass)) = (&config.smtp_user, &config.smtp_password) {
mailer_builder =
mailer_builder.credentials(Credentials::new(user.clone(), pass.clone()));
}
let mailer = mailer_builder.build();
match mailer.send(email).await {
Ok(_) => {
info!(build_id = %build.id, to = to_addr, "Email notification sent");
}
Err(e) => {
error!(build_id = %build.id, to = to_addr, "Failed to send email: {e}");
}
}
if let (Some(user), Some(pass)) = (&config.smtp_user, &config.smtp_password)
{
mailer_builder = mailer_builder
.credentials(Credentials::new(user.clone(), pass.clone()));
}
let mailer = mailer_builder.build();
match mailer.send(email).await {
Ok(_) => {
info!(build_id = %build.id, to = to_addr, "Email notification sent");
},
Err(e) => {
error!(build_id = %build.id, to = to_addr, "Failed to send email: {e}");
},
}
}
}

View file

@ -1,73 +1,89 @@
use sqlx::PgPool;
use uuid::Uuid;
use crate::error::{CiError, Result};
use crate::models::ApiKey;
use crate::{
error::{CiError, Result},
models::ApiKey,
};
pub async fn create(pool: &PgPool, name: &str, key_hash: &str, role: &str) -> Result<ApiKey> {
sqlx::query_as::<_, ApiKey>(
"INSERT INTO api_keys (name, key_hash, role) VALUES ($1, $2, $3) RETURNING *",
)
.bind(name)
.bind(key_hash)
.bind(role)
.fetch_one(pool)
.await
.map_err(|e| match &e {
sqlx::Error::Database(db_err) if db_err.is_unique_violation() => {
CiError::Conflict("API key with this hash already exists".to_string())
}
_ => CiError::Database(e),
})
pub async fn create(
pool: &PgPool,
name: &str,
key_hash: &str,
role: &str,
) -> Result<ApiKey> {
sqlx::query_as::<_, ApiKey>(
"INSERT INTO api_keys (name, key_hash, role) VALUES ($1, $2, $3) \
RETURNING *",
)
.bind(name)
.bind(key_hash)
.bind(role)
.fetch_one(pool)
.await
.map_err(|e| {
match &e {
sqlx::Error::Database(db_err) if db_err.is_unique_violation() => {
CiError::Conflict("API key with this hash already exists".to_string())
},
_ => CiError::Database(e),
}
})
}
pub async fn upsert(pool: &PgPool, name: &str, key_hash: &str, role: &str) -> Result<ApiKey> {
sqlx::query_as::<_, ApiKey>(
"INSERT INTO api_keys (name, key_hash, role) VALUES ($1, $2, $3) \
ON CONFLICT (key_hash) DO UPDATE SET \
name = EXCLUDED.name, \
role = EXCLUDED.role \
RETURNING *",
)
.bind(name)
pub async fn upsert(
pool: &PgPool,
name: &str,
key_hash: &str,
role: &str,
) -> Result<ApiKey> {
sqlx::query_as::<_, ApiKey>(
"INSERT INTO api_keys (name, key_hash, role) VALUES ($1, $2, $3) ON \
CONFLICT (key_hash) DO UPDATE SET name = EXCLUDED.name, role = \
EXCLUDED.role RETURNING *",
)
.bind(name)
.bind(key_hash)
.bind(role)
.fetch_one(pool)
.await
.map_err(CiError::Database)
}
pub async fn get_by_hash(
pool: &PgPool,
key_hash: &str,
) -> Result<Option<ApiKey>> {
sqlx::query_as::<_, ApiKey>("SELECT * FROM api_keys WHERE key_hash = $1")
.bind(key_hash)
.bind(role)
.fetch_one(pool)
.fetch_optional(pool)
.await
.map_err(CiError::Database)
}
pub async fn get_by_hash(pool: &PgPool, key_hash: &str) -> Result<Option<ApiKey>> {
sqlx::query_as::<_, ApiKey>("SELECT * FROM api_keys WHERE key_hash = $1")
.bind(key_hash)
.fetch_optional(pool)
.await
.map_err(CiError::Database)
}
pub async fn list(pool: &PgPool) -> Result<Vec<ApiKey>> {
sqlx::query_as::<_, ApiKey>("SELECT * FROM api_keys ORDER BY created_at DESC")
.fetch_all(pool)
.await
.map_err(CiError::Database)
sqlx::query_as::<_, ApiKey>("SELECT * FROM api_keys ORDER BY created_at DESC")
.fetch_all(pool)
.await
.map_err(CiError::Database)
}
pub async fn delete(pool: &PgPool, id: Uuid) -> Result<()> {
let result = sqlx::query("DELETE FROM api_keys WHERE id = $1")
.bind(id)
.execute(pool)
.await?;
if result.rows_affected() == 0 {
return Err(CiError::NotFound(format!("API key {id} not found")));
}
Ok(())
let result = sqlx::query("DELETE FROM api_keys WHERE id = $1")
.bind(id)
.execute(pool)
.await?;
if result.rows_affected() == 0 {
return Err(CiError::NotFound(format!("API key {id} not found")));
}
Ok(())
}
pub async fn touch_last_used(pool: &PgPool, id: Uuid) -> Result<()> {
sqlx::query("UPDATE api_keys SET last_used_at = NOW() WHERE id = $1")
.bind(id)
.execute(pool)
.await
.map_err(CiError::Database)?;
Ok(())
sqlx::query("UPDATE api_keys SET last_used_at = NOW() WHERE id = $1")
.bind(id)
.execute(pool)
.await
.map_err(CiError::Database)?;
Ok(())
}

View file

@ -1,79 +1,92 @@
use sqlx::PgPool;
use uuid::Uuid;
use crate::error::{CiError, Result};
use crate::models::BuildDependency;
use crate::{
error::{CiError, Result},
models::BuildDependency,
};
pub async fn create(
pool: &PgPool,
build_id: Uuid,
dependency_build_id: Uuid,
pool: &PgPool,
build_id: Uuid,
dependency_build_id: Uuid,
) -> Result<BuildDependency> {
sqlx::query_as::<_, BuildDependency>(
"INSERT INTO build_dependencies (build_id, dependency_build_id) VALUES ($1, $2) RETURNING *",
)
.bind(build_id)
.bind(dependency_build_id)
.fetch_one(pool)
.await
.map_err(|e| match &e {
sqlx::Error::Database(db_err) if db_err.is_unique_violation() => {
CiError::Conflict(format!(
"Dependency from {build_id} to {dependency_build_id} already exists"
))
}
_ => CiError::Database(e),
})
}
pub async fn list_for_build(pool: &PgPool, build_id: Uuid) -> Result<Vec<BuildDependency>> {
sqlx::query_as::<_, BuildDependency>("SELECT * FROM build_dependencies WHERE build_id = $1")
.bind(build_id)
.fetch_all(pool)
.await
.map_err(CiError::Database)
}
/// Batch check if all dependency builds are completed for multiple builds at once.
/// Returns a map from build_id to whether all deps are completed.
pub async fn check_deps_for_builds(
pool: &PgPool,
build_ids: &[Uuid],
) -> Result<std::collections::HashMap<Uuid, bool>> {
if build_ids.is_empty() {
return Ok(std::collections::HashMap::new());
sqlx::query_as::<_, BuildDependency>(
"INSERT INTO build_dependencies (build_id, dependency_build_id) VALUES \
($1, $2) RETURNING *",
)
.bind(build_id)
.bind(dependency_build_id)
.fetch_one(pool)
.await
.map_err(|e| {
match &e {
sqlx::Error::Database(db_err) if db_err.is_unique_violation() => {
CiError::Conflict(format!(
"Dependency from {build_id} to {dependency_build_id} already exists"
))
},
_ => CiError::Database(e),
}
})
}
// Find build_ids that have incomplete deps
let rows: Vec<(Uuid,)> = sqlx::query_as(
"SELECT DISTINCT bd.build_id FROM build_dependencies bd \
JOIN builds b ON bd.dependency_build_id = b.id \
WHERE bd.build_id = ANY($1) AND b.status != 'completed'",
)
.bind(build_ids)
.fetch_all(pool)
.await
.map_err(CiError::Database)?;
pub async fn list_for_build(
pool: &PgPool,
build_id: Uuid,
) -> Result<Vec<BuildDependency>> {
sqlx::query_as::<_, BuildDependency>(
"SELECT * FROM build_dependencies WHERE build_id = $1",
)
.bind(build_id)
.fetch_all(pool)
.await
.map_err(CiError::Database)
}
let incomplete: std::collections::HashSet<Uuid> = rows.into_iter().map(|(id,)| id).collect();
/// Batch check if all dependency builds are completed for multiple builds at
/// once. Returns a map from build_id to whether all deps are completed.
pub async fn check_deps_for_builds(
pool: &PgPool,
build_ids: &[Uuid],
) -> Result<std::collections::HashMap<Uuid, bool>> {
if build_ids.is_empty() {
return Ok(std::collections::HashMap::new());
}
Ok(build_ids
.iter()
.map(|id| (*id, !incomplete.contains(id)))
.collect())
// Find build_ids that have incomplete deps
let rows: Vec<(Uuid,)> = sqlx::query_as(
"SELECT DISTINCT bd.build_id FROM build_dependencies bd JOIN builds b ON \
bd.dependency_build_id = b.id WHERE bd.build_id = ANY($1) AND b.status \
!= 'completed'",
)
.bind(build_ids)
.fetch_all(pool)
.await
.map_err(CiError::Database)?;
let incomplete: std::collections::HashSet<Uuid> =
rows.into_iter().map(|(id,)| id).collect();
Ok(
build_ids
.iter()
.map(|id| (*id, !incomplete.contains(id)))
.collect(),
)
}
/// Check if all dependency builds for a given build are completed.
pub async fn all_deps_completed(pool: &PgPool, build_id: Uuid) -> Result<bool> {
let row: (i64,) = sqlx::query_as(
"SELECT COUNT(*) FROM build_dependencies bd \
JOIN builds b ON bd.dependency_build_id = b.id \
WHERE bd.build_id = $1 AND b.status != 'completed'",
)
.bind(build_id)
.fetch_one(pool)
.await
.map_err(CiError::Database)?;
let row: (i64,) = sqlx::query_as(
"SELECT COUNT(*) FROM build_dependencies bd JOIN builds b ON \
bd.dependency_build_id = b.id WHERE bd.build_id = $1 AND b.status != \
'completed'",
)
.bind(build_id)
.fetch_one(pool)
.await
.map_err(CiError::Database)?;
Ok(row.0 == 0)
Ok(row.0 == 0)
}

View file

@ -1,40 +1,51 @@
use sqlx::PgPool;
use uuid::Uuid;
use crate::error::{CiError, Result};
use crate::models::{BuildProduct, CreateBuildProduct};
use crate::{
error::{CiError, Result},
models::{BuildProduct, CreateBuildProduct},
};
pub async fn create(pool: &PgPool, input: CreateBuildProduct) -> Result<BuildProduct> {
sqlx::query_as::<_, BuildProduct>(
"INSERT INTO build_products (build_id, name, path, sha256_hash, file_size, content_type, is_directory) \
VALUES ($1, $2, $3, $4, $5, $6, $7) RETURNING *",
)
.bind(input.build_id)
.bind(&input.name)
.bind(&input.path)
.bind(&input.sha256_hash)
.bind(input.file_size)
.bind(&input.content_type)
.bind(input.is_directory)
.fetch_one(pool)
.await
.map_err(CiError::Database)
pub async fn create(
pool: &PgPool,
input: CreateBuildProduct,
) -> Result<BuildProduct> {
sqlx::query_as::<_, BuildProduct>(
"INSERT INTO build_products (build_id, name, path, sha256_hash, \
file_size, content_type, is_directory) VALUES ($1, $2, $3, $4, $5, $6, \
$7) RETURNING *",
)
.bind(input.build_id)
.bind(&input.name)
.bind(&input.path)
.bind(&input.sha256_hash)
.bind(input.file_size)
.bind(&input.content_type)
.bind(input.is_directory)
.fetch_one(pool)
.await
.map_err(CiError::Database)
}
pub async fn get(pool: &PgPool, id: Uuid) -> Result<BuildProduct> {
sqlx::query_as::<_, BuildProduct>("SELECT * FROM build_products WHERE id = $1")
.bind(id)
.fetch_optional(pool)
.await?
.ok_or_else(|| CiError::NotFound(format!("Build product {id} not found")))
sqlx::query_as::<_, BuildProduct>(
"SELECT * FROM build_products WHERE id = $1",
)
.bind(id)
.fetch_optional(pool)
.await?
.ok_or_else(|| CiError::NotFound(format!("Build product {id} not found")))
}
pub async fn list_for_build(pool: &PgPool, build_id: Uuid) -> Result<Vec<BuildProduct>> {
sqlx::query_as::<_, BuildProduct>(
"SELECT * FROM build_products WHERE build_id = $1 ORDER BY created_at ASC",
)
.bind(build_id)
.fetch_all(pool)
.await
.map_err(CiError::Database)
pub async fn list_for_build(
pool: &PgPool,
build_id: Uuid,
) -> Result<Vec<BuildProduct>> {
sqlx::query_as::<_, BuildProduct>(
"SELECT * FROM build_products WHERE build_id = $1 ORDER BY created_at ASC",
)
.bind(build_id)
.fetch_all(pool)
.await
.map_err(CiError::Database)
}

View file

@ -1,54 +1,66 @@
use sqlx::PgPool;
use uuid::Uuid;
use crate::error::{CiError, Result};
use crate::models::{BuildStep, CreateBuildStep};
use crate::{
error::{CiError, Result},
models::{BuildStep, CreateBuildStep},
};
pub async fn create(pool: &PgPool, input: CreateBuildStep) -> Result<BuildStep> {
sqlx::query_as::<_, BuildStep>(
"INSERT INTO build_steps (build_id, step_number, command) VALUES ($1, $2, $3) RETURNING *",
)
.bind(input.build_id)
.bind(input.step_number)
.bind(&input.command)
.fetch_one(pool)
.await
.map_err(|e| match &e {
sqlx::Error::Database(db_err) if db_err.is_unique_violation() => {
CiError::Conflict(format!(
"Build step {} already exists for this build",
input.step_number
))
}
_ => CiError::Database(e),
})
pub async fn create(
pool: &PgPool,
input: CreateBuildStep,
) -> Result<BuildStep> {
sqlx::query_as::<_, BuildStep>(
"INSERT INTO build_steps (build_id, step_number, command) VALUES ($1, $2, \
$3) RETURNING *",
)
.bind(input.build_id)
.bind(input.step_number)
.bind(&input.command)
.fetch_one(pool)
.await
.map_err(|e| {
match &e {
sqlx::Error::Database(db_err) if db_err.is_unique_violation() => {
CiError::Conflict(format!(
"Build step {} already exists for this build",
input.step_number
))
},
_ => CiError::Database(e),
}
})
}
pub async fn complete(
pool: &PgPool,
id: Uuid,
exit_code: i32,
output: Option<&str>,
error_output: Option<&str>,
pool: &PgPool,
id: Uuid,
exit_code: i32,
output: Option<&str>,
error_output: Option<&str>,
) -> Result<BuildStep> {
sqlx::query_as::<_, BuildStep>(
"UPDATE build_steps SET completed_at = NOW(), exit_code = $1, output = $2, error_output = $3 WHERE id = $4 RETURNING *",
)
.bind(exit_code)
.bind(output)
.bind(error_output)
.bind(id)
.fetch_optional(pool)
.await?
.ok_or_else(|| CiError::NotFound(format!("Build step {id} not found")))
sqlx::query_as::<_, BuildStep>(
"UPDATE build_steps SET completed_at = NOW(), exit_code = $1, output = \
$2, error_output = $3 WHERE id = $4 RETURNING *",
)
.bind(exit_code)
.bind(output)
.bind(error_output)
.bind(id)
.fetch_optional(pool)
.await?
.ok_or_else(|| CiError::NotFound(format!("Build step {id} not found")))
}
pub async fn list_for_build(pool: &PgPool, build_id: Uuid) -> Result<Vec<BuildStep>> {
sqlx::query_as::<_, BuildStep>(
"SELECT * FROM build_steps WHERE build_id = $1 ORDER BY step_number ASC",
)
.bind(build_id)
.fetch_all(pool)
.await
.map_err(CiError::Database)
pub async fn list_for_build(
pool: &PgPool,
build_id: Uuid,
) -> Result<Vec<BuildStep>> {
sqlx::query_as::<_, BuildStep>(
"SELECT * FROM build_steps WHERE build_id = $1 ORDER BY step_number ASC",
)
.bind(build_id)
.fetch_all(pool)
.await
.map_err(CiError::Database)
}

View file

@ -1,316 +1,335 @@
use sqlx::PgPool;
use uuid::Uuid;
use crate::error::{CiError, Result};
use crate::models::{Build, BuildStats, BuildStatus, CreateBuild};
use crate::{
error::{CiError, Result},
models::{Build, BuildStats, BuildStatus, CreateBuild},
};
pub async fn create(pool: &PgPool, input: CreateBuild) -> Result<Build> {
let is_aggregate = input.is_aggregate.unwrap_or(false);
sqlx::query_as::<_, Build>(
"INSERT INTO builds (evaluation_id, job_name, drv_path, status, system, outputs, is_aggregate, constituents) \
VALUES ($1, $2, $3, 'pending', $4, $5, $6, $7) RETURNING *",
)
.bind(input.evaluation_id)
.bind(&input.job_name)
.bind(&input.drv_path)
.bind(&input.system)
.bind(&input.outputs)
.bind(is_aggregate)
.bind(&input.constituents)
.fetch_one(pool)
.await
.map_err(|e| match &e {
sqlx::Error::Database(db_err) if db_err.is_unique_violation() => {
CiError::Conflict(format!(
"Build for job '{}' already exists in this evaluation",
input.job_name
))
}
_ => CiError::Database(e),
})
let is_aggregate = input.is_aggregate.unwrap_or(false);
sqlx::query_as::<_, Build>(
"INSERT INTO builds (evaluation_id, job_name, drv_path, status, system, \
outputs, is_aggregate, constituents) VALUES ($1, $2, $3, 'pending', $4, \
$5, $6, $7) RETURNING *",
)
.bind(input.evaluation_id)
.bind(&input.job_name)
.bind(&input.drv_path)
.bind(&input.system)
.bind(&input.outputs)
.bind(is_aggregate)
.bind(&input.constituents)
.fetch_one(pool)
.await
.map_err(|e| {
match &e {
sqlx::Error::Database(db_err) if db_err.is_unique_violation() => {
CiError::Conflict(format!(
"Build for job '{}' already exists in this evaluation",
input.job_name
))
},
_ => CiError::Database(e),
}
})
}
pub async fn get_completed_by_drv_path(pool: &PgPool, drv_path: &str) -> Result<Option<Build>> {
sqlx::query_as::<_, Build>(
"SELECT * FROM builds WHERE drv_path = $1 AND status = 'completed' LIMIT 1",
)
.bind(drv_path)
.fetch_optional(pool)
.await
.map_err(CiError::Database)
pub async fn get_completed_by_drv_path(
pool: &PgPool,
drv_path: &str,
) -> Result<Option<Build>> {
sqlx::query_as::<_, Build>(
"SELECT * FROM builds WHERE drv_path = $1 AND status = 'completed' LIMIT 1",
)
.bind(drv_path)
.fetch_optional(pool)
.await
.map_err(CiError::Database)
}
pub async fn get(pool: &PgPool, id: Uuid) -> Result<Build> {
sqlx::query_as::<_, Build>("SELECT * FROM builds WHERE id = $1")
.bind(id)
.fetch_optional(pool)
.await?
.ok_or_else(|| CiError::NotFound(format!("Build {id} not found")))
}
pub async fn list_for_evaluation(pool: &PgPool, evaluation_id: Uuid) -> Result<Vec<Build>> {
sqlx::query_as::<_, Build>(
"SELECT * FROM builds WHERE evaluation_id = $1 ORDER BY created_at DESC",
)
.bind(evaluation_id)
.fetch_all(pool)
.await
.map_err(CiError::Database)
}
pub async fn list_pending(pool: &PgPool, limit: i64) -> Result<Vec<Build>> {
sqlx::query_as::<_, Build>(
"SELECT b.* FROM builds b \
JOIN evaluations e ON b.evaluation_id = e.id \
JOIN jobsets j ON e.jobset_id = j.id \
WHERE b.status = 'pending' \
ORDER BY b.priority DESC, j.scheduling_shares DESC, b.created_at ASC \
LIMIT $1",
)
.bind(limit)
.fetch_all(pool)
.await
.map_err(CiError::Database)
}
/// Atomically claim a pending build by setting it to running.
/// Returns `None` if the build was already claimed by another worker.
pub async fn start(pool: &PgPool, id: Uuid) -> Result<Option<Build>> {
sqlx::query_as::<_, Build>(
"UPDATE builds SET status = 'running', started_at = NOW() WHERE id = $1 AND status = 'pending' RETURNING *",
)
.bind(id)
.fetch_optional(pool)
.await
.map_err(CiError::Database)
}
pub async fn complete(
pool: &PgPool,
id: Uuid,
status: BuildStatus,
log_path: Option<&str>,
build_output_path: Option<&str>,
error_message: Option<&str>,
) -> Result<Build> {
sqlx::query_as::<_, Build>(
"UPDATE builds SET status = $1, completed_at = NOW(), log_path = $2, build_output_path = $3, error_message = $4 WHERE id = $5 RETURNING *",
)
.bind(status)
.bind(log_path)
.bind(build_output_path)
.bind(error_message)
sqlx::query_as::<_, Build>("SELECT * FROM builds WHERE id = $1")
.bind(id)
.fetch_optional(pool)
.await?
.ok_or_else(|| CiError::NotFound(format!("Build {id} not found")))
}
pub async fn list_recent(pool: &PgPool, limit: i64) -> Result<Vec<Build>> {
sqlx::query_as::<_, Build>("SELECT * FROM builds ORDER BY created_at DESC LIMIT $1")
.bind(limit)
.fetch_all(pool)
.await
.map_err(CiError::Database)
pub async fn list_for_evaluation(
pool: &PgPool,
evaluation_id: Uuid,
) -> Result<Vec<Build>> {
sqlx::query_as::<_, Build>(
"SELECT * FROM builds WHERE evaluation_id = $1 ORDER BY created_at DESC",
)
.bind(evaluation_id)
.fetch_all(pool)
.await
.map_err(CiError::Database)
}
pub async fn list_for_project(pool: &PgPool, project_id: Uuid) -> Result<Vec<Build>> {
sqlx::query_as::<_, Build>(
"SELECT b.* FROM builds b \
JOIN evaluations e ON b.evaluation_id = e.id \
JOIN jobsets j ON e.jobset_id = j.id \
WHERE j.project_id = $1 \
ORDER BY b.created_at DESC",
)
.bind(project_id)
.fetch_all(pool)
.await
.map_err(CiError::Database)
pub async fn list_pending(pool: &PgPool, limit: i64) -> Result<Vec<Build>> {
sqlx::query_as::<_, Build>(
"SELECT b.* FROM builds b JOIN evaluations e ON b.evaluation_id = e.id \
JOIN jobsets j ON e.jobset_id = j.id WHERE b.status = 'pending' ORDER BY \
b.priority DESC, j.scheduling_shares DESC, b.created_at ASC LIMIT $1",
)
.bind(limit)
.fetch_all(pool)
.await
.map_err(CiError::Database)
}
/// Atomically claim a pending build by setting it to running.
/// Returns `None` if the build was already claimed by another worker.
pub async fn start(pool: &PgPool, id: Uuid) -> Result<Option<Build>> {
sqlx::query_as::<_, Build>(
"UPDATE builds SET status = 'running', started_at = NOW() WHERE id = $1 \
AND status = 'pending' RETURNING *",
)
.bind(id)
.fetch_optional(pool)
.await
.map_err(CiError::Database)
}
pub async fn complete(
pool: &PgPool,
id: Uuid,
status: BuildStatus,
log_path: Option<&str>,
build_output_path: Option<&str>,
error_message: Option<&str>,
) -> Result<Build> {
sqlx::query_as::<_, Build>(
"UPDATE builds SET status = $1, completed_at = NOW(), log_path = $2, \
build_output_path = $3, error_message = $4 WHERE id = $5 RETURNING *",
)
.bind(status)
.bind(log_path)
.bind(build_output_path)
.bind(error_message)
.bind(id)
.fetch_optional(pool)
.await?
.ok_or_else(|| CiError::NotFound(format!("Build {id} not found")))
}
pub async fn list_recent(pool: &PgPool, limit: i64) -> Result<Vec<Build>> {
sqlx::query_as::<_, Build>(
"SELECT * FROM builds ORDER BY created_at DESC LIMIT $1",
)
.bind(limit)
.fetch_all(pool)
.await
.map_err(CiError::Database)
}
pub async fn list_for_project(
pool: &PgPool,
project_id: Uuid,
) -> Result<Vec<Build>> {
sqlx::query_as::<_, Build>(
"SELECT b.* FROM builds b JOIN evaluations e ON b.evaluation_id = e.id \
JOIN jobsets j ON e.jobset_id = j.id WHERE j.project_id = $1 ORDER BY \
b.created_at DESC",
)
.bind(project_id)
.fetch_all(pool)
.await
.map_err(CiError::Database)
}
pub async fn get_stats(pool: &PgPool) -> Result<BuildStats> {
sqlx::query_as::<_, BuildStats>("SELECT * FROM build_stats")
.fetch_optional(pool)
.await
.map_err(CiError::Database)
.map(|opt| opt.unwrap_or_default())
}
/// Reset builds that were left in 'running' state (orphaned by a crashed runner).
/// Limited to 50 builds per call to prevent thundering herd.
pub async fn reset_orphaned(pool: &PgPool, older_than_secs: i64) -> Result<u64> {
let result = sqlx::query(
"UPDATE builds SET status = 'pending', started_at = NULL \
WHERE id IN (SELECT id FROM builds WHERE status = 'running' \
AND started_at < NOW() - make_interval(secs => $1) LIMIT 50)",
)
.bind(older_than_secs)
.execute(pool)
.await
.map_err(CiError::Database)?;
Ok(result.rows_affected())
}
/// List builds with optional evaluation_id, status, system, and job_name filters, with pagination.
pub async fn list_filtered(
pool: &PgPool,
evaluation_id: Option<Uuid>,
status: Option<&str>,
system: Option<&str>,
job_name: Option<&str>,
limit: i64,
offset: i64,
) -> Result<Vec<Build>> {
sqlx::query_as::<_, Build>(
"SELECT * FROM builds \
WHERE ($1::uuid IS NULL OR evaluation_id = $1) \
AND ($2::text IS NULL OR status = $2) \
AND ($3::text IS NULL OR system = $3) \
AND ($4::text IS NULL OR job_name ILIKE '%' || $4 || '%') \
ORDER BY created_at DESC LIMIT $5 OFFSET $6",
)
.bind(evaluation_id)
.bind(status)
.bind(system)
.bind(job_name)
.bind(limit)
.bind(offset)
.fetch_all(pool)
sqlx::query_as::<_, BuildStats>("SELECT * FROM build_stats")
.fetch_optional(pool)
.await
.map_err(CiError::Database)
.map(|opt| opt.unwrap_or_default())
}
/// Reset builds that were left in 'running' state (orphaned by a crashed
/// runner). Limited to 50 builds per call to prevent thundering herd.
pub async fn reset_orphaned(
pool: &PgPool,
older_than_secs: i64,
) -> Result<u64> {
let result = sqlx::query(
"UPDATE builds SET status = 'pending', started_at = NULL WHERE id IN \
(SELECT id FROM builds WHERE status = 'running' AND started_at < NOW() - \
make_interval(secs => $1) LIMIT 50)",
)
.bind(older_than_secs)
.execute(pool)
.await
.map_err(CiError::Database)?;
Ok(result.rows_affected())
}
/// List builds with optional evaluation_id, status, system, and job_name
/// filters, with pagination.
pub async fn list_filtered(
pool: &PgPool,
evaluation_id: Option<Uuid>,
status: Option<&str>,
system: Option<&str>,
job_name: Option<&str>,
limit: i64,
offset: i64,
) -> Result<Vec<Build>> {
sqlx::query_as::<_, Build>(
"SELECT * FROM builds WHERE ($1::uuid IS NULL OR evaluation_id = $1) AND \
($2::text IS NULL OR status = $2) AND ($3::text IS NULL OR system = $3) \
AND ($4::text IS NULL OR job_name ILIKE '%' || $4 || '%') ORDER BY \
created_at DESC LIMIT $5 OFFSET $6",
)
.bind(evaluation_id)
.bind(status)
.bind(system)
.bind(job_name)
.bind(limit)
.bind(offset)
.fetch_all(pool)
.await
.map_err(CiError::Database)
}
pub async fn count_filtered(
pool: &PgPool,
evaluation_id: Option<Uuid>,
status: Option<&str>,
system: Option<&str>,
job_name: Option<&str>,
pool: &PgPool,
evaluation_id: Option<Uuid>,
status: Option<&str>,
system: Option<&str>,
job_name: Option<&str>,
) -> Result<i64> {
let row: (i64,) = sqlx::query_as(
"SELECT COUNT(*) FROM builds \
WHERE ($1::uuid IS NULL OR evaluation_id = $1) \
AND ($2::text IS NULL OR status = $2) \
AND ($3::text IS NULL OR system = $3) \
AND ($4::text IS NULL OR job_name ILIKE '%' || $4 || '%')",
)
.bind(evaluation_id)
.bind(status)
.bind(system)
.bind(job_name)
.fetch_one(pool)
.await
.map_err(CiError::Database)?;
Ok(row.0)
let row: (i64,) = sqlx::query_as(
"SELECT COUNT(*) FROM builds WHERE ($1::uuid IS NULL OR evaluation_id = \
$1) AND ($2::text IS NULL OR status = $2) AND ($3::text IS NULL OR \
system = $3) AND ($4::text IS NULL OR job_name ILIKE '%' || $4 || '%')",
)
.bind(evaluation_id)
.bind(status)
.bind(system)
.bind(job_name)
.fetch_one(pool)
.await
.map_err(CiError::Database)?;
Ok(row.0)
}
pub async fn cancel(pool: &PgPool, id: Uuid) -> Result<Build> {
sqlx::query_as::<_, Build>(
"UPDATE builds SET status = 'cancelled', completed_at = NOW() WHERE id = $1 AND status IN ('pending', 'running') RETURNING *",
)
.bind(id)
.fetch_optional(pool)
.await?
.ok_or_else(|| {
CiError::NotFound(format!(
"Build {id} not found or not in a cancellable state"
))
})
sqlx::query_as::<_, Build>(
"UPDATE builds SET status = 'cancelled', completed_at = NOW() WHERE id = \
$1 AND status IN ('pending', 'running') RETURNING *",
)
.bind(id)
.fetch_optional(pool)
.await?
.ok_or_else(|| {
CiError::NotFound(format!(
"Build {id} not found or not in a cancellable state"
))
})
}
/// Cancel a build and all its transitive dependents.
pub async fn cancel_cascade(pool: &PgPool, id: Uuid) -> Result<Vec<Build>> {
let mut cancelled = Vec::new();
let mut cancelled = Vec::new();
// Cancel the target build
if let Ok(build) = cancel(pool, id).await {
// Cancel the target build
if let Ok(build) = cancel(pool, id).await {
cancelled.push(build);
}
// Find and cancel all dependents recursively
let mut to_cancel: Vec<Uuid> = vec![id];
while let Some(build_id) = to_cancel.pop() {
let dependents: Vec<(Uuid,)> = sqlx::query_as(
"SELECT build_id FROM build_dependencies WHERE dependency_build_id = $1",
)
.bind(build_id)
.fetch_all(pool)
.await
.map_err(CiError::Database)?;
for (dep_id,) in dependents {
if let Ok(build) = cancel(pool, dep_id).await {
to_cancel.push(dep_id);
cancelled.push(build);
}
}
}
// Find and cancel all dependents recursively
let mut to_cancel: Vec<Uuid> = vec![id];
while let Some(build_id) = to_cancel.pop() {
let dependents: Vec<(Uuid,)> = sqlx::query_as(
"SELECT build_id FROM build_dependencies WHERE dependency_build_id = $1",
)
.bind(build_id)
.fetch_all(pool)
.await
.map_err(CiError::Database)?;
for (dep_id,) in dependents {
if let Ok(build) = cancel(pool, dep_id).await {
to_cancel.push(dep_id);
cancelled.push(build);
}
}
}
Ok(cancelled)
Ok(cancelled)
}
/// Restart a build by resetting it to pending state.
/// Only works for failed, completed, or cancelled builds.
pub async fn restart(pool: &PgPool, id: Uuid) -> Result<Build> {
sqlx::query_as::<_, Build>(
"UPDATE builds SET status = 'pending', started_at = NULL, completed_at = NULL, \
log_path = NULL, build_output_path = NULL, error_message = NULL, \
retry_count = retry_count + 1 \
WHERE id = $1 AND status IN ('failed', 'completed', 'cancelled') RETURNING *",
)
.bind(id)
.fetch_optional(pool)
.await?
.ok_or_else(|| {
CiError::NotFound(format!(
"Build {id} not found or not in a restartable state"
))
})
sqlx::query_as::<_, Build>(
"UPDATE builds SET status = 'pending', started_at = NULL, completed_at = \
NULL, log_path = NULL, build_output_path = NULL, error_message = NULL, \
retry_count = retry_count + 1 WHERE id = $1 AND status IN ('failed', \
'completed', 'cancelled') RETURNING *",
)
.bind(id)
.fetch_optional(pool)
.await?
.ok_or_else(|| {
CiError::NotFound(format!(
"Build {id} not found or not in a restartable state"
))
})
}
/// Mark a build's outputs as signed.
pub async fn mark_signed(pool: &PgPool, id: Uuid) -> Result<()> {
sqlx::query("UPDATE builds SET signed = true WHERE id = $1")
.bind(id)
.execute(pool)
.await
.map_err(CiError::Database)?;
Ok(())
sqlx::query("UPDATE builds SET signed = true WHERE id = $1")
.bind(id)
.execute(pool)
.await
.map_err(CiError::Database)?;
Ok(())
}
/// Batch-fetch completed builds by derivation paths.
/// Returns a map from drv_path to Build for deduplication.
pub async fn get_completed_by_drv_paths(
pool: &PgPool,
drv_paths: &[String],
pool: &PgPool,
drv_paths: &[String],
) -> Result<std::collections::HashMap<String, Build>> {
if drv_paths.is_empty() {
return Ok(std::collections::HashMap::new());
}
let builds = sqlx::query_as::<_, Build>(
"SELECT DISTINCT ON (drv_path) * FROM builds \
WHERE drv_path = ANY($1) AND status = 'completed' \
ORDER BY drv_path, completed_at DESC",
)
.bind(drv_paths)
.fetch_all(pool)
.await
.map_err(CiError::Database)?;
if drv_paths.is_empty() {
return Ok(std::collections::HashMap::new());
}
let builds = sqlx::query_as::<_, Build>(
"SELECT DISTINCT ON (drv_path) * FROM builds WHERE drv_path = ANY($1) AND \
status = 'completed' ORDER BY drv_path, completed_at DESC",
)
.bind(drv_paths)
.fetch_all(pool)
.await
.map_err(CiError::Database)?;
Ok(builds
.into_iter()
.map(|b| (b.drv_path.clone(), b))
.collect())
Ok(
builds
.into_iter()
.map(|b| (b.drv_path.clone(), b))
.collect(),
)
}
/// Set the builder_id for a build.
pub async fn set_builder(pool: &PgPool, id: Uuid, builder_id: Uuid) -> Result<()> {
sqlx::query("UPDATE builds SET builder_id = $1 WHERE id = $2")
.bind(builder_id)
.bind(id)
.execute(pool)
.await
.map_err(CiError::Database)?;
Ok(())
pub async fn set_builder(
pool: &PgPool,
id: Uuid,
builder_id: Uuid,
) -> Result<()> {
sqlx::query("UPDATE builds SET builder_id = $1 WHERE id = $2")
.bind(builder_id)
.bind(id)
.execute(pool)
.await
.map_err(CiError::Database)?;
Ok(())
}

View file

@ -1,111 +1,129 @@
use sqlx::PgPool;
use uuid::Uuid;
use crate::error::{CiError, Result};
use crate::models::{Channel, CreateChannel};
use crate::{
error::{CiError, Result},
models::{Channel, CreateChannel},
};
pub async fn create(pool: &PgPool, input: CreateChannel) -> Result<Channel> {
sqlx::query_as::<_, Channel>(
"INSERT INTO channels (project_id, name, jobset_id) \
VALUES ($1, $2, $3) RETURNING *",
)
.bind(input.project_id)
.bind(&input.name)
.bind(input.jobset_id)
.fetch_one(pool)
.await
.map_err(|e| match &e {
sqlx::Error::Database(db_err) if db_err.is_unique_violation() => CiError::Conflict(
format!("Channel '{}' already exists for this project", input.name),
),
_ => CiError::Database(e),
})
sqlx::query_as::<_, Channel>(
"INSERT INTO channels (project_id, name, jobset_id) VALUES ($1, $2, $3) \
RETURNING *",
)
.bind(input.project_id)
.bind(&input.name)
.bind(input.jobset_id)
.fetch_one(pool)
.await
.map_err(|e| {
match &e {
sqlx::Error::Database(db_err) if db_err.is_unique_violation() => {
CiError::Conflict(format!(
"Channel '{}' already exists for this project",
input.name
))
},
_ => CiError::Database(e),
}
})
}
pub async fn get(pool: &PgPool, id: Uuid) -> Result<Channel> {
sqlx::query_as::<_, Channel>("SELECT * FROM channels WHERE id = $1")
.bind(id)
.fetch_optional(pool)
.await?
.ok_or_else(|| CiError::NotFound(format!("Channel {id} not found")))
sqlx::query_as::<_, Channel>("SELECT * FROM channels WHERE id = $1")
.bind(id)
.fetch_optional(pool)
.await?
.ok_or_else(|| CiError::NotFound(format!("Channel {id} not found")))
}
pub async fn list_for_project(pool: &PgPool, project_id: Uuid) -> Result<Vec<Channel>> {
sqlx::query_as::<_, Channel>("SELECT * FROM channels WHERE project_id = $1 ORDER BY name")
.bind(project_id)
.fetch_all(pool)
.await
.map_err(CiError::Database)
pub async fn list_for_project(
pool: &PgPool,
project_id: Uuid,
) -> Result<Vec<Channel>> {
sqlx::query_as::<_, Channel>(
"SELECT * FROM channels WHERE project_id = $1 ORDER BY name",
)
.bind(project_id)
.fetch_all(pool)
.await
.map_err(CiError::Database)
}
pub async fn list_all(pool: &PgPool) -> Result<Vec<Channel>> {
sqlx::query_as::<_, Channel>("SELECT * FROM channels ORDER BY name")
.fetch_all(pool)
.await
.map_err(CiError::Database)
sqlx::query_as::<_, Channel>("SELECT * FROM channels ORDER BY name")
.fetch_all(pool)
.await
.map_err(CiError::Database)
}
/// Promote an evaluation to a channel (set it as the current evaluation).
pub async fn promote(pool: &PgPool, channel_id: Uuid, evaluation_id: Uuid) -> Result<Channel> {
sqlx::query_as::<_, Channel>(
"UPDATE channels SET current_evaluation_id = $1, updated_at = NOW() \
WHERE id = $2 RETURNING *",
)
.bind(evaluation_id)
.bind(channel_id)
.fetch_optional(pool)
.await?
.ok_or_else(|| CiError::NotFound(format!("Channel {channel_id} not found")))
pub async fn promote(
pool: &PgPool,
channel_id: Uuid,
evaluation_id: Uuid,
) -> Result<Channel> {
sqlx::query_as::<_, Channel>(
"UPDATE channels SET current_evaluation_id = $1, updated_at = NOW() WHERE \
id = $2 RETURNING *",
)
.bind(evaluation_id)
.bind(channel_id)
.fetch_optional(pool)
.await?
.ok_or_else(|| CiError::NotFound(format!("Channel {channel_id} not found")))
}
pub async fn delete(pool: &PgPool, id: Uuid) -> Result<()> {
let result = sqlx::query("DELETE FROM channels WHERE id = $1")
.bind(id)
.execute(pool)
.await
.map_err(CiError::Database)?;
if result.rows_affected() == 0 {
return Err(CiError::NotFound(format!("Channel {id} not found")));
}
Ok(())
}
/// Find the channel for a jobset and auto-promote if all builds in the evaluation succeeded.
pub async fn auto_promote_if_complete(
pool: &PgPool,
jobset_id: Uuid,
evaluation_id: Uuid,
) -> Result<()> {
// Check if all builds for this evaluation are completed
let row: (i64, i64) = sqlx::query_as(
"SELECT COUNT(*), COUNT(*) FILTER (WHERE status = 'completed') \
FROM builds WHERE evaluation_id = $1",
)
.bind(evaluation_id)
.fetch_one(pool)
let result = sqlx::query("DELETE FROM channels WHERE id = $1")
.bind(id)
.execute(pool)
.await
.map_err(CiError::Database)?;
let (total, completed) = row;
if total == 0 || total != completed {
return Ok(());
}
// All builds completed — promote to any channels tracking this jobset
let channels = sqlx::query_as::<_, Channel>("SELECT * FROM channels WHERE jobset_id = $1")
.bind(jobset_id)
.fetch_all(pool)
.await
.map_err(CiError::Database)?;
for channel in channels {
let _ = promote(pool, channel.id, evaluation_id).await;
tracing::info!(
channel = %channel.name,
evaluation_id = %evaluation_id,
"Auto-promoted evaluation to channel"
);
}
Ok(())
if result.rows_affected() == 0 {
return Err(CiError::NotFound(format!("Channel {id} not found")));
}
Ok(())
}
/// Find the channel for a jobset and auto-promote if all builds in the
/// evaluation succeeded.
pub async fn auto_promote_if_complete(
pool: &PgPool,
jobset_id: Uuid,
evaluation_id: Uuid,
) -> Result<()> {
// Check if all builds for this evaluation are completed
let row: (i64, i64) = sqlx::query_as(
"SELECT COUNT(*), COUNT(*) FILTER (WHERE status = 'completed') FROM \
builds WHERE evaluation_id = $1",
)
.bind(evaluation_id)
.fetch_one(pool)
.await
.map_err(CiError::Database)?;
let (total, completed) = row;
if total == 0 || total != completed {
return Ok(());
}
// All builds completed — promote to any channels tracking this jobset
let channels =
sqlx::query_as::<_, Channel>("SELECT * FROM channels WHERE jobset_id = $1")
.bind(jobset_id)
.fetch_all(pool)
.await
.map_err(CiError::Database)?;
for channel in channels {
let _ = promote(pool, channel.id, evaluation_id).await;
tracing::info!(
channel = %channel.name,
evaluation_id = %evaluation_id,
"Auto-promoted evaluation to channel"
);
}
Ok(())
}

View file

@ -1,146 +1,167 @@
use sqlx::PgPool;
use uuid::Uuid;
use crate::error::{CiError, Result};
use crate::models::{CreateEvaluation, Evaluation, EvaluationStatus};
use crate::{
error::{CiError, Result},
models::{CreateEvaluation, Evaluation, EvaluationStatus},
};
pub async fn create(pool: &PgPool, input: CreateEvaluation) -> Result<Evaluation> {
sqlx::query_as::<_, Evaluation>(
"INSERT INTO evaluations (jobset_id, commit_hash, status) VALUES ($1, $2, 'pending') RETURNING *",
)
.bind(input.jobset_id)
.bind(&input.commit_hash)
.fetch_one(pool)
.await
.map_err(|e| match &e {
sqlx::Error::Database(db_err) if db_err.is_unique_violation() => {
CiError::Conflict(format!(
"Evaluation for commit '{}' already exists in this jobset",
input.commit_hash
))
}
_ => CiError::Database(e),
})
pub async fn create(
pool: &PgPool,
input: CreateEvaluation,
) -> Result<Evaluation> {
sqlx::query_as::<_, Evaluation>(
"INSERT INTO evaluations (jobset_id, commit_hash, status) VALUES ($1, $2, \
'pending') RETURNING *",
)
.bind(input.jobset_id)
.bind(&input.commit_hash)
.fetch_one(pool)
.await
.map_err(|e| {
match &e {
sqlx::Error::Database(db_err) if db_err.is_unique_violation() => {
CiError::Conflict(format!(
"Evaluation for commit '{}' already exists in this jobset",
input.commit_hash
))
},
_ => CiError::Database(e),
}
})
}
pub async fn get(pool: &PgPool, id: Uuid) -> Result<Evaluation> {
sqlx::query_as::<_, Evaluation>("SELECT * FROM evaluations WHERE id = $1")
.bind(id)
.fetch_optional(pool)
.await?
.ok_or_else(|| CiError::NotFound(format!("Evaluation {id} not found")))
}
pub async fn list_for_jobset(pool: &PgPool, jobset_id: Uuid) -> Result<Vec<Evaluation>> {
sqlx::query_as::<_, Evaluation>(
"SELECT * FROM evaluations WHERE jobset_id = $1 ORDER BY evaluation_time DESC",
)
.bind(jobset_id)
.fetch_all(pool)
.await
.map_err(CiError::Database)
}
/// List evaluations with optional jobset_id and status filters, with pagination.
pub async fn list_filtered(
pool: &PgPool,
jobset_id: Option<Uuid>,
status: Option<&str>,
limit: i64,
offset: i64,
) -> Result<Vec<Evaluation>> {
sqlx::query_as::<_, Evaluation>(
"SELECT * FROM evaluations \
WHERE ($1::uuid IS NULL OR jobset_id = $1) \
AND ($2::text IS NULL OR status = $2) \
ORDER BY evaluation_time DESC LIMIT $3 OFFSET $4",
)
.bind(jobset_id)
.bind(status)
.bind(limit)
.bind(offset)
.fetch_all(pool)
.await
.map_err(CiError::Database)
}
pub async fn count_filtered(
pool: &PgPool,
jobset_id: Option<Uuid>,
status: Option<&str>,
) -> Result<i64> {
let row: (i64,) = sqlx::query_as(
"SELECT COUNT(*) FROM evaluations \
WHERE ($1::uuid IS NULL OR jobset_id = $1) \
AND ($2::text IS NULL OR status = $2)",
)
.bind(jobset_id)
.bind(status)
.fetch_one(pool)
.await
.map_err(CiError::Database)?;
Ok(row.0)
}
pub async fn update_status(
pool: &PgPool,
id: Uuid,
status: EvaluationStatus,
error_message: Option<&str>,
) -> Result<Evaluation> {
sqlx::query_as::<_, Evaluation>(
"UPDATE evaluations SET status = $1, error_message = $2 WHERE id = $3 RETURNING *",
)
.bind(status)
.bind(error_message)
sqlx::query_as::<_, Evaluation>("SELECT * FROM evaluations WHERE id = $1")
.bind(id)
.fetch_optional(pool)
.await?
.ok_or_else(|| CiError::NotFound(format!("Evaluation {id} not found")))
}
pub async fn get_latest(pool: &PgPool, jobset_id: Uuid) -> Result<Option<Evaluation>> {
sqlx::query_as::<_, Evaluation>(
"SELECT * FROM evaluations WHERE jobset_id = $1 ORDER BY evaluation_time DESC LIMIT 1",
)
.bind(jobset_id)
.fetch_optional(pool)
.await
.map_err(CiError::Database)
pub async fn list_for_jobset(
pool: &PgPool,
jobset_id: Uuid,
) -> Result<Vec<Evaluation>> {
sqlx::query_as::<_, Evaluation>(
"SELECT * FROM evaluations WHERE jobset_id = $1 ORDER BY evaluation_time \
DESC",
)
.bind(jobset_id)
.fetch_all(pool)
.await
.map_err(CiError::Database)
}
/// List evaluations with optional jobset_id and status filters, with
/// pagination.
pub async fn list_filtered(
pool: &PgPool,
jobset_id: Option<Uuid>,
status: Option<&str>,
limit: i64,
offset: i64,
) -> Result<Vec<Evaluation>> {
sqlx::query_as::<_, Evaluation>(
"SELECT * FROM evaluations WHERE ($1::uuid IS NULL OR jobset_id = $1) AND \
($2::text IS NULL OR status = $2) ORDER BY evaluation_time DESC LIMIT $3 \
OFFSET $4",
)
.bind(jobset_id)
.bind(status)
.bind(limit)
.bind(offset)
.fetch_all(pool)
.await
.map_err(CiError::Database)
}
pub async fn count_filtered(
pool: &PgPool,
jobset_id: Option<Uuid>,
status: Option<&str>,
) -> Result<i64> {
let row: (i64,) = sqlx::query_as(
"SELECT COUNT(*) FROM evaluations WHERE ($1::uuid IS NULL OR jobset_id = \
$1) AND ($2::text IS NULL OR status = $2)",
)
.bind(jobset_id)
.bind(status)
.fetch_one(pool)
.await
.map_err(CiError::Database)?;
Ok(row.0)
}
pub async fn update_status(
pool: &PgPool,
id: Uuid,
status: EvaluationStatus,
error_message: Option<&str>,
) -> Result<Evaluation> {
sqlx::query_as::<_, Evaluation>(
"UPDATE evaluations SET status = $1, error_message = $2 WHERE id = $3 \
RETURNING *",
)
.bind(status)
.bind(error_message)
.bind(id)
.fetch_optional(pool)
.await?
.ok_or_else(|| CiError::NotFound(format!("Evaluation {id} not found")))
}
pub async fn get_latest(
pool: &PgPool,
jobset_id: Uuid,
) -> Result<Option<Evaluation>> {
sqlx::query_as::<_, Evaluation>(
"SELECT * FROM evaluations WHERE jobset_id = $1 ORDER BY evaluation_time \
DESC LIMIT 1",
)
.bind(jobset_id)
.fetch_optional(pool)
.await
.map_err(CiError::Database)
}
/// Set the inputs hash for an evaluation (used for eval caching).
pub async fn set_inputs_hash(pool: &PgPool, id: Uuid, hash: &str) -> Result<()> {
sqlx::query("UPDATE evaluations SET inputs_hash = $1 WHERE id = $2")
.bind(hash)
.bind(id)
.execute(pool)
.await
.map_err(CiError::Database)?;
Ok(())
pub async fn set_inputs_hash(
pool: &PgPool,
id: Uuid,
hash: &str,
) -> Result<()> {
sqlx::query("UPDATE evaluations SET inputs_hash = $1 WHERE id = $2")
.bind(hash)
.bind(id)
.execute(pool)
.await
.map_err(CiError::Database)?;
Ok(())
}
/// Check if an evaluation with the same inputs_hash already exists for this jobset.
/// Check if an evaluation with the same inputs_hash already exists for this
/// jobset.
pub async fn get_by_inputs_hash(
pool: &PgPool,
jobset_id: Uuid,
inputs_hash: &str,
pool: &PgPool,
jobset_id: Uuid,
inputs_hash: &str,
) -> Result<Option<Evaluation>> {
sqlx::query_as::<_, Evaluation>(
"SELECT * FROM evaluations WHERE jobset_id = $1 AND inputs_hash = $2 \
AND status = 'completed' ORDER BY evaluation_time DESC LIMIT 1",
)
.bind(jobset_id)
.bind(inputs_hash)
.fetch_optional(pool)
.await
.map_err(CiError::Database)
sqlx::query_as::<_, Evaluation>(
"SELECT * FROM evaluations WHERE jobset_id = $1 AND inputs_hash = $2 AND \
status = 'completed' ORDER BY evaluation_time DESC LIMIT 1",
)
.bind(jobset_id)
.bind(inputs_hash)
.fetch_optional(pool)
.await
.map_err(CiError::Database)
}
pub async fn count(pool: &PgPool) -> Result<i64> {
let row: (i64,) = sqlx::query_as("SELECT COUNT(*) FROM evaluations")
.fetch_one(pool)
.await
.map_err(CiError::Database)?;
Ok(row.0)
let row: (i64,) = sqlx::query_as("SELECT COUNT(*) FROM evaluations")
.fetch_one(pool)
.await
.map_err(CiError::Database)?;
Ok(row.0)
}

View file

@ -1,52 +1,62 @@
use sqlx::PgPool;
use uuid::Uuid;
use crate::error::{CiError, Result};
use crate::models::JobsetInput;
use crate::{
error::{CiError, Result},
models::JobsetInput,
};
pub async fn create(
pool: &PgPool,
jobset_id: Uuid,
name: &str,
input_type: &str,
value: &str,
revision: Option<&str>,
pool: &PgPool,
jobset_id: Uuid,
name: &str,
input_type: &str,
value: &str,
revision: Option<&str>,
) -> Result<JobsetInput> {
sqlx::query_as::<_, JobsetInput>(
"INSERT INTO jobset_inputs (jobset_id, name, input_type, value, revision) VALUES ($1, $2, $3, $4, $5) RETURNING *",
)
.bind(jobset_id)
.bind(name)
.bind(input_type)
.bind(value)
.bind(revision)
.fetch_one(pool)
.await
.map_err(|e| match &e {
sqlx::Error::Database(db_err) if db_err.is_unique_violation() => {
CiError::Conflict(format!("Input '{name}' already exists in this jobset"))
}
_ => CiError::Database(e),
})
sqlx::query_as::<_, JobsetInput>(
"INSERT INTO jobset_inputs (jobset_id, name, input_type, value, revision) \
VALUES ($1, $2, $3, $4, $5) RETURNING *",
)
.bind(jobset_id)
.bind(name)
.bind(input_type)
.bind(value)
.bind(revision)
.fetch_one(pool)
.await
.map_err(|e| {
match &e {
sqlx::Error::Database(db_err) if db_err.is_unique_violation() => {
CiError::Conflict(format!(
"Input '{name}' already exists in this jobset"
))
},
_ => CiError::Database(e),
}
})
}
pub async fn list_for_jobset(pool: &PgPool, jobset_id: Uuid) -> Result<Vec<JobsetInput>> {
sqlx::query_as::<_, JobsetInput>(
"SELECT * FROM jobset_inputs WHERE jobset_id = $1 ORDER BY name ASC",
)
.bind(jobset_id)
.fetch_all(pool)
.await
.map_err(CiError::Database)
pub async fn list_for_jobset(
pool: &PgPool,
jobset_id: Uuid,
) -> Result<Vec<JobsetInput>> {
sqlx::query_as::<_, JobsetInput>(
"SELECT * FROM jobset_inputs WHERE jobset_id = $1 ORDER BY name ASC",
)
.bind(jobset_id)
.fetch_all(pool)
.await
.map_err(CiError::Database)
}
pub async fn delete(pool: &PgPool, id: Uuid) -> Result<()> {
let result = sqlx::query("DELETE FROM jobset_inputs WHERE id = $1")
.bind(id)
.execute(pool)
.await?;
if result.rows_affected() == 0 {
return Err(CiError::NotFound(format!("Jobset input {id} not found")));
}
Ok(())
let result = sqlx::query("DELETE FROM jobset_inputs WHERE id = $1")
.bind(id)
.execute(pool)
.await?;
if result.rows_affected() == 0 {
return Err(CiError::NotFound(format!("Jobset input {id} not found")));
}
Ok(())
}

View file

@ -1,151 +1,169 @@
use sqlx::PgPool;
use uuid::Uuid;
use crate::error::{CiError, Result};
use crate::models::{ActiveJobset, CreateJobset, Jobset, UpdateJobset};
use crate::{
error::{CiError, Result},
models::{ActiveJobset, CreateJobset, Jobset, UpdateJobset},
};
pub async fn create(pool: &PgPool, input: CreateJobset) -> Result<Jobset> {
let enabled = input.enabled.unwrap_or(true);
let flake_mode = input.flake_mode.unwrap_or(true);
let check_interval = input.check_interval.unwrap_or(60);
let scheduling_shares = input.scheduling_shares.unwrap_or(100);
let enabled = input.enabled.unwrap_or(true);
let flake_mode = input.flake_mode.unwrap_or(true);
let check_interval = input.check_interval.unwrap_or(60);
let scheduling_shares = input.scheduling_shares.unwrap_or(100);
sqlx::query_as::<_, Jobset>(
"INSERT INTO jobsets (project_id, name, nix_expression, enabled, flake_mode, check_interval, branch, scheduling_shares) VALUES ($1, $2, $3, $4, $5, $6, $7, $8) RETURNING *",
)
.bind(input.project_id)
.bind(&input.name)
.bind(&input.nix_expression)
.bind(enabled)
.bind(flake_mode)
.bind(check_interval)
.bind(&input.branch)
.bind(scheduling_shares)
.fetch_one(pool)
.await
.map_err(|e| match &e {
sqlx::Error::Database(db_err) if db_err.is_unique_violation() => {
CiError::Conflict(format!("Jobset '{}' already exists in this project", input.name))
}
_ => CiError::Database(e),
})
sqlx::query_as::<_, Jobset>(
"INSERT INTO jobsets (project_id, name, nix_expression, enabled, \
flake_mode, check_interval, branch, scheduling_shares) VALUES ($1, $2, \
$3, $4, $5, $6, $7, $8) RETURNING *",
)
.bind(input.project_id)
.bind(&input.name)
.bind(&input.nix_expression)
.bind(enabled)
.bind(flake_mode)
.bind(check_interval)
.bind(&input.branch)
.bind(scheduling_shares)
.fetch_one(pool)
.await
.map_err(|e| {
match &e {
sqlx::Error::Database(db_err) if db_err.is_unique_violation() => {
CiError::Conflict(format!(
"Jobset '{}' already exists in this project",
input.name
))
},
_ => CiError::Database(e),
}
})
}
pub async fn get(pool: &PgPool, id: Uuid) -> Result<Jobset> {
sqlx::query_as::<_, Jobset>("SELECT * FROM jobsets WHERE id = $1")
.bind(id)
.fetch_optional(pool)
.await?
.ok_or_else(|| CiError::NotFound(format!("Jobset {id} not found")))
sqlx::query_as::<_, Jobset>("SELECT * FROM jobsets WHERE id = $1")
.bind(id)
.fetch_optional(pool)
.await?
.ok_or_else(|| CiError::NotFound(format!("Jobset {id} not found")))
}
pub async fn list_for_project(
pool: &PgPool,
project_id: Uuid,
limit: i64,
offset: i64,
pool: &PgPool,
project_id: Uuid,
limit: i64,
offset: i64,
) -> Result<Vec<Jobset>> {
sqlx::query_as::<_, Jobset>(
"SELECT * FROM jobsets WHERE project_id = $1 ORDER BY created_at DESC LIMIT $2 OFFSET $3",
)
.bind(project_id)
.bind(limit)
.bind(offset)
sqlx::query_as::<_, Jobset>(
"SELECT * FROM jobsets WHERE project_id = $1 ORDER BY created_at DESC \
LIMIT $2 OFFSET $3",
)
.bind(project_id)
.bind(limit)
.bind(offset)
.fetch_all(pool)
.await
.map_err(CiError::Database)
}
pub async fn count_for_project(pool: &PgPool, project_id: Uuid) -> Result<i64> {
let row: (i64,) =
sqlx::query_as("SELECT COUNT(*) FROM jobsets WHERE project_id = $1")
.bind(project_id)
.fetch_one(pool)
.await
.map_err(CiError::Database)?;
Ok(row.0)
}
pub async fn update(
pool: &PgPool,
id: Uuid,
input: UpdateJobset,
) -> Result<Jobset> {
let existing = get(pool, id).await?;
let name = input.name.unwrap_or(existing.name);
let nix_expression = input.nix_expression.unwrap_or(existing.nix_expression);
let enabled = input.enabled.unwrap_or(existing.enabled);
let flake_mode = input.flake_mode.unwrap_or(existing.flake_mode);
let check_interval = input.check_interval.unwrap_or(existing.check_interval);
let branch = input.branch.or(existing.branch);
let scheduling_shares = input
.scheduling_shares
.unwrap_or(existing.scheduling_shares);
sqlx::query_as::<_, Jobset>(
"UPDATE jobsets SET name = $1, nix_expression = $2, enabled = $3, \
flake_mode = $4, check_interval = $5, branch = $6, scheduling_shares = \
$7 WHERE id = $8 RETURNING *",
)
.bind(&name)
.bind(&nix_expression)
.bind(enabled)
.bind(flake_mode)
.bind(check_interval)
.bind(&branch)
.bind(scheduling_shares)
.bind(id)
.fetch_one(pool)
.await
.map_err(|e| {
match &e {
sqlx::Error::Database(db_err) if db_err.is_unique_violation() => {
CiError::Conflict(format!(
"Jobset '{name}' already exists in this project"
))
},
_ => CiError::Database(e),
}
})
}
pub async fn delete(pool: &PgPool, id: Uuid) -> Result<()> {
let result = sqlx::query("DELETE FROM jobsets WHERE id = $1")
.bind(id)
.execute(pool)
.await?;
if result.rows_affected() == 0 {
return Err(CiError::NotFound(format!("Jobset {id} not found")));
}
Ok(())
}
pub async fn upsert(pool: &PgPool, input: CreateJobset) -> Result<Jobset> {
let enabled = input.enabled.unwrap_or(true);
let flake_mode = input.flake_mode.unwrap_or(true);
let check_interval = input.check_interval.unwrap_or(60);
let scheduling_shares = input.scheduling_shares.unwrap_or(100);
sqlx::query_as::<_, Jobset>(
"INSERT INTO jobsets (project_id, name, nix_expression, enabled, \
flake_mode, check_interval, branch, scheduling_shares) VALUES ($1, $2, \
$3, $4, $5, $6, $7, $8) ON CONFLICT (project_id, name) DO UPDATE SET \
nix_expression = EXCLUDED.nix_expression, enabled = EXCLUDED.enabled, \
flake_mode = EXCLUDED.flake_mode, check_interval = \
EXCLUDED.check_interval, branch = EXCLUDED.branch, scheduling_shares = \
EXCLUDED.scheduling_shares RETURNING *",
)
.bind(input.project_id)
.bind(&input.name)
.bind(&input.nix_expression)
.bind(enabled)
.bind(flake_mode)
.bind(check_interval)
.bind(&input.branch)
.bind(scheduling_shares)
.fetch_one(pool)
.await
.map_err(CiError::Database)
}
pub async fn list_active(pool: &PgPool) -> Result<Vec<ActiveJobset>> {
sqlx::query_as::<_, ActiveJobset>("SELECT * FROM active_jobsets")
.fetch_all(pool)
.await
.map_err(CiError::Database)
}
pub async fn count_for_project(pool: &PgPool, project_id: Uuid) -> Result<i64> {
let row: (i64,) = sqlx::query_as("SELECT COUNT(*) FROM jobsets WHERE project_id = $1")
.bind(project_id)
.fetch_one(pool)
.await
.map_err(CiError::Database)?;
Ok(row.0)
}
pub async fn update(pool: &PgPool, id: Uuid, input: UpdateJobset) -> Result<Jobset> {
let existing = get(pool, id).await?;
let name = input.name.unwrap_or(existing.name);
let nix_expression = input.nix_expression.unwrap_or(existing.nix_expression);
let enabled = input.enabled.unwrap_or(existing.enabled);
let flake_mode = input.flake_mode.unwrap_or(existing.flake_mode);
let check_interval = input.check_interval.unwrap_or(existing.check_interval);
let branch = input.branch.or(existing.branch);
let scheduling_shares = input
.scheduling_shares
.unwrap_or(existing.scheduling_shares);
sqlx::query_as::<_, Jobset>(
"UPDATE jobsets SET name = $1, nix_expression = $2, enabled = $3, flake_mode = $4, check_interval = $5, branch = $6, scheduling_shares = $7 WHERE id = $8 RETURNING *",
)
.bind(&name)
.bind(&nix_expression)
.bind(enabled)
.bind(flake_mode)
.bind(check_interval)
.bind(&branch)
.bind(scheduling_shares)
.bind(id)
.fetch_one(pool)
.await
.map_err(|e| match &e {
sqlx::Error::Database(db_err) if db_err.is_unique_violation() => {
CiError::Conflict(format!("Jobset '{name}' already exists in this project"))
}
_ => CiError::Database(e),
})
}
pub async fn delete(pool: &PgPool, id: Uuid) -> Result<()> {
let result = sqlx::query("DELETE FROM jobsets WHERE id = $1")
.bind(id)
.execute(pool)
.await?;
if result.rows_affected() == 0 {
return Err(CiError::NotFound(format!("Jobset {id} not found")));
}
Ok(())
}
pub async fn upsert(pool: &PgPool, input: CreateJobset) -> Result<Jobset> {
let enabled = input.enabled.unwrap_or(true);
let flake_mode = input.flake_mode.unwrap_or(true);
let check_interval = input.check_interval.unwrap_or(60);
let scheduling_shares = input.scheduling_shares.unwrap_or(100);
sqlx::query_as::<_, Jobset>(
"INSERT INTO jobsets (project_id, name, nix_expression, enabled, flake_mode, check_interval, branch, scheduling_shares) \
VALUES ($1, $2, $3, $4, $5, $6, $7, $8) \
ON CONFLICT (project_id, name) DO UPDATE SET \
nix_expression = EXCLUDED.nix_expression, \
enabled = EXCLUDED.enabled, \
flake_mode = EXCLUDED.flake_mode, \
check_interval = EXCLUDED.check_interval, \
branch = EXCLUDED.branch, \
scheduling_shares = EXCLUDED.scheduling_shares \
RETURNING *",
)
.bind(input.project_id)
.bind(&input.name)
.bind(&input.nix_expression)
.bind(enabled)
.bind(flake_mode)
.bind(check_interval)
.bind(&input.branch)
.bind(scheduling_shares)
.fetch_one(pool)
.await
.map_err(CiError::Database)
}
pub async fn list_active(pool: &PgPool) -> Result<Vec<ActiveJobset>> {
sqlx::query_as::<_, ActiveJobset>("SELECT * FROM active_jobsets")
.fetch_all(pool)
.await
.map_err(CiError::Database)
}

View file

@ -1,48 +1,60 @@
use sqlx::PgPool;
use uuid::Uuid;
use crate::error::{CiError, Result};
use crate::models::{CreateNotificationConfig, NotificationConfig};
use crate::{
error::{CiError, Result},
models::{CreateNotificationConfig, NotificationConfig},
};
pub async fn create(pool: &PgPool, input: CreateNotificationConfig) -> Result<NotificationConfig> {
sqlx::query_as::<_, NotificationConfig>(
"INSERT INTO notification_configs (project_id, notification_type, config) VALUES ($1, $2, $3) RETURNING *",
)
.bind(input.project_id)
.bind(&input.notification_type)
.bind(&input.config)
.fetch_one(pool)
.await
.map_err(|e| match &e {
sqlx::Error::Database(db_err) if db_err.is_unique_violation() => {
CiError::Conflict(format!(
"Notification config '{}' already exists for this project",
input.notification_type
))
}
_ => CiError::Database(e),
})
pub async fn create(
pool: &PgPool,
input: CreateNotificationConfig,
) -> Result<NotificationConfig> {
sqlx::query_as::<_, NotificationConfig>(
"INSERT INTO notification_configs (project_id, notification_type, config) \
VALUES ($1, $2, $3) RETURNING *",
)
.bind(input.project_id)
.bind(&input.notification_type)
.bind(&input.config)
.fetch_one(pool)
.await
.map_err(|e| {
match &e {
sqlx::Error::Database(db_err) if db_err.is_unique_violation() => {
CiError::Conflict(format!(
"Notification config '{}' already exists for this project",
input.notification_type
))
},
_ => CiError::Database(e),
}
})
}
pub async fn list_for_project(pool: &PgPool, project_id: Uuid) -> Result<Vec<NotificationConfig>> {
sqlx::query_as::<_, NotificationConfig>(
"SELECT * FROM notification_configs WHERE project_id = $1 AND enabled = true ORDER BY created_at DESC",
)
.bind(project_id)
.fetch_all(pool)
.await
.map_err(CiError::Database)
pub async fn list_for_project(
pool: &PgPool,
project_id: Uuid,
) -> Result<Vec<NotificationConfig>> {
sqlx::query_as::<_, NotificationConfig>(
"SELECT * FROM notification_configs WHERE project_id = $1 AND enabled = \
true ORDER BY created_at DESC",
)
.bind(project_id)
.fetch_all(pool)
.await
.map_err(CiError::Database)
}
pub async fn delete(pool: &PgPool, id: Uuid) -> Result<()> {
let result = sqlx::query("DELETE FROM notification_configs WHERE id = $1")
.bind(id)
.execute(pool)
.await?;
if result.rows_affected() == 0 {
return Err(CiError::NotFound(format!(
"Notification config {id} not found"
)));
}
Ok(())
let result = sqlx::query("DELETE FROM notification_configs WHERE id = $1")
.bind(id)
.execute(pool)
.await?;
if result.rows_affected() == 0 {
return Err(CiError::NotFound(format!(
"Notification config {id} not found"
)));
}
Ok(())
}

View file

@ -1,111 +1,125 @@
use sqlx::PgPool;
use uuid::Uuid;
use crate::error::{CiError, Result};
use crate::models::{CreateProject, Project, UpdateProject};
use crate::{
error::{CiError, Result},
models::{CreateProject, Project, UpdateProject},
};
pub async fn create(pool: &PgPool, input: CreateProject) -> Result<Project> {
sqlx::query_as::<_, Project>(
"INSERT INTO projects (name, description, repository_url) VALUES ($1, $2, $3) RETURNING *",
)
.bind(&input.name)
.bind(&input.description)
.bind(&input.repository_url)
.fetch_one(pool)
.await
.map_err(|e| match &e {
sqlx::Error::Database(db_err) if db_err.is_unique_violation() => {
CiError::Conflict(format!("Project '{}' already exists", input.name))
}
_ => CiError::Database(e),
})
sqlx::query_as::<_, Project>(
"INSERT INTO projects (name, description, repository_url) VALUES ($1, $2, \
$3) RETURNING *",
)
.bind(&input.name)
.bind(&input.description)
.bind(&input.repository_url)
.fetch_one(pool)
.await
.map_err(|e| {
match &e {
sqlx::Error::Database(db_err) if db_err.is_unique_violation() => {
CiError::Conflict(format!("Project '{}' already exists", input.name))
},
_ => CiError::Database(e),
}
})
}
pub async fn get(pool: &PgPool, id: Uuid) -> Result<Project> {
sqlx::query_as::<_, Project>("SELECT * FROM projects WHERE id = $1")
.bind(id)
.fetch_optional(pool)
.await?
.ok_or_else(|| CiError::NotFound(format!("Project {id} not found")))
sqlx::query_as::<_, Project>("SELECT * FROM projects WHERE id = $1")
.bind(id)
.fetch_optional(pool)
.await?
.ok_or_else(|| CiError::NotFound(format!("Project {id} not found")))
}
pub async fn get_by_name(pool: &PgPool, name: &str) -> Result<Project> {
sqlx::query_as::<_, Project>("SELECT * FROM projects WHERE name = $1")
.bind(name)
.fetch_optional(pool)
.await?
.ok_or_else(|| CiError::NotFound(format!("Project '{name}' not found")))
sqlx::query_as::<_, Project>("SELECT * FROM projects WHERE name = $1")
.bind(name)
.fetch_optional(pool)
.await?
.ok_or_else(|| CiError::NotFound(format!("Project '{name}' not found")))
}
pub async fn list(pool: &PgPool, limit: i64, offset: i64) -> Result<Vec<Project>> {
sqlx::query_as::<_, Project>(
"SELECT * FROM projects ORDER BY created_at DESC LIMIT $1 OFFSET $2",
)
.bind(limit)
.bind(offset)
.fetch_all(pool)
.await
.map_err(CiError::Database)
pub async fn list(
pool: &PgPool,
limit: i64,
offset: i64,
) -> Result<Vec<Project>> {
sqlx::query_as::<_, Project>(
"SELECT * FROM projects ORDER BY created_at DESC LIMIT $1 OFFSET $2",
)
.bind(limit)
.bind(offset)
.fetch_all(pool)
.await
.map_err(CiError::Database)
}
pub async fn count(pool: &PgPool) -> Result<i64> {
let row: (i64,) = sqlx::query_as("SELECT COUNT(*) FROM projects")
.fetch_one(pool)
.await
.map_err(CiError::Database)?;
Ok(row.0)
}
pub async fn update(pool: &PgPool, id: Uuid, input: UpdateProject) -> Result<Project> {
// Build dynamic update — only set provided fields
let existing = get(pool, id).await?;
let name = input.name.unwrap_or(existing.name);
let description = input.description.or(existing.description);
let repository_url = input.repository_url.unwrap_or(existing.repository_url);
sqlx::query_as::<_, Project>(
"UPDATE projects SET name = $1, description = $2, repository_url = $3 WHERE id = $4 RETURNING *",
)
.bind(&name)
.bind(&description)
.bind(&repository_url)
.bind(id)
let row: (i64,) = sqlx::query_as("SELECT COUNT(*) FROM projects")
.fetch_one(pool)
.await
.map_err(|e| match &e {
sqlx::Error::Database(db_err) if db_err.is_unique_violation() => {
CiError::Conflict(format!("Project '{name}' already exists"))
}
_ => CiError::Database(e),
})
.map_err(CiError::Database)?;
Ok(row.0)
}
pub async fn update(
pool: &PgPool,
id: Uuid,
input: UpdateProject,
) -> Result<Project> {
// Build dynamic update — only set provided fields
let existing = get(pool, id).await?;
let name = input.name.unwrap_or(existing.name);
let description = input.description.or(existing.description);
let repository_url = input.repository_url.unwrap_or(existing.repository_url);
sqlx::query_as::<_, Project>(
"UPDATE projects SET name = $1, description = $2, repository_url = $3 \
WHERE id = $4 RETURNING *",
)
.bind(&name)
.bind(&description)
.bind(&repository_url)
.bind(id)
.fetch_one(pool)
.await
.map_err(|e| {
match &e {
sqlx::Error::Database(db_err) if db_err.is_unique_violation() => {
CiError::Conflict(format!("Project '{name}' already exists"))
},
_ => CiError::Database(e),
}
})
}
pub async fn upsert(pool: &PgPool, input: CreateProject) -> Result<Project> {
sqlx::query_as::<_, Project>(
"INSERT INTO projects (name, description, repository_url) VALUES ($1, $2, $3) \
ON CONFLICT (name) DO UPDATE SET \
description = EXCLUDED.description, \
repository_url = EXCLUDED.repository_url \
RETURNING *",
)
.bind(&input.name)
.bind(&input.description)
.bind(&input.repository_url)
.fetch_one(pool)
.await
.map_err(CiError::Database)
sqlx::query_as::<_, Project>(
"INSERT INTO projects (name, description, repository_url) VALUES ($1, $2, \
$3) ON CONFLICT (name) DO UPDATE SET description = EXCLUDED.description, \
repository_url = EXCLUDED.repository_url RETURNING *",
)
.bind(&input.name)
.bind(&input.description)
.bind(&input.repository_url)
.fetch_one(pool)
.await
.map_err(CiError::Database)
}
pub async fn delete(pool: &PgPool, id: Uuid) -> Result<()> {
let result = sqlx::query("DELETE FROM projects WHERE id = $1")
.bind(id)
.execute(pool)
.await?;
let result = sqlx::query("DELETE FROM projects WHERE id = $1")
.bind(id)
.execute(pool)
.await?;
if result.rows_affected() == 0 {
return Err(CiError::NotFound(format!("Project {id} not found")));
}
if result.rows_affected() == 0 {
return Err(CiError::NotFound(format!("Project {id} not found")));
}
Ok(())
Ok(())
}

View file

@ -1,124 +1,135 @@
use sqlx::PgPool;
use uuid::Uuid;
use crate::error::{CiError, Result};
use crate::models::{CreateRemoteBuilder, RemoteBuilder};
use crate::{
error::{CiError, Result},
models::{CreateRemoteBuilder, RemoteBuilder},
};
pub async fn create(pool: &PgPool, input: CreateRemoteBuilder) -> Result<RemoteBuilder> {
sqlx::query_as::<_, RemoteBuilder>(
"INSERT INTO remote_builders (name, ssh_uri, systems, max_jobs, speed_factor, \
supported_features, mandatory_features, public_host_key, ssh_key_file) \
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9) RETURNING *",
)
.bind(&input.name)
.bind(&input.ssh_uri)
.bind(&input.systems)
.bind(input.max_jobs.unwrap_or(1))
.bind(input.speed_factor.unwrap_or(1))
.bind(input.supported_features.as_deref().unwrap_or(&[]))
.bind(input.mandatory_features.as_deref().unwrap_or(&[]))
.bind(&input.public_host_key)
.bind(&input.ssh_key_file)
.fetch_one(pool)
.await
.map_err(|e| match &e {
sqlx::Error::Database(db_err) if db_err.is_unique_violation() => {
CiError::Conflict(format!("Remote builder '{}' already exists", input.name))
}
_ => CiError::Database(e),
})
pub async fn create(
pool: &PgPool,
input: CreateRemoteBuilder,
) -> Result<RemoteBuilder> {
sqlx::query_as::<_, RemoteBuilder>(
"INSERT INTO remote_builders (name, ssh_uri, systems, max_jobs, \
speed_factor, supported_features, mandatory_features, public_host_key, \
ssh_key_file) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9) RETURNING *",
)
.bind(&input.name)
.bind(&input.ssh_uri)
.bind(&input.systems)
.bind(input.max_jobs.unwrap_or(1))
.bind(input.speed_factor.unwrap_or(1))
.bind(input.supported_features.as_deref().unwrap_or(&[]))
.bind(input.mandatory_features.as_deref().unwrap_or(&[]))
.bind(&input.public_host_key)
.bind(&input.ssh_key_file)
.fetch_one(pool)
.await
.map_err(|e| {
match &e {
sqlx::Error::Database(db_err) if db_err.is_unique_violation() => {
CiError::Conflict(format!(
"Remote builder '{}' already exists",
input.name
))
},
_ => CiError::Database(e),
}
})
}
pub async fn get(pool: &PgPool, id: Uuid) -> Result<RemoteBuilder> {
sqlx::query_as::<_, RemoteBuilder>("SELECT * FROM remote_builders WHERE id = $1")
.bind(id)
.fetch_optional(pool)
.await?
.ok_or_else(|| CiError::NotFound(format!("Remote builder {id} not found")))
sqlx::query_as::<_, RemoteBuilder>(
"SELECT * FROM remote_builders WHERE id = $1",
)
.bind(id)
.fetch_optional(pool)
.await?
.ok_or_else(|| CiError::NotFound(format!("Remote builder {id} not found")))
}
pub async fn list(pool: &PgPool) -> Result<Vec<RemoteBuilder>> {
sqlx::query_as::<_, RemoteBuilder>(
"SELECT * FROM remote_builders ORDER BY speed_factor DESC, name",
)
.fetch_all(pool)
.await
.map_err(CiError::Database)
sqlx::query_as::<_, RemoteBuilder>(
"SELECT * FROM remote_builders ORDER BY speed_factor DESC, name",
)
.fetch_all(pool)
.await
.map_err(CiError::Database)
}
pub async fn list_enabled(pool: &PgPool) -> Result<Vec<RemoteBuilder>> {
sqlx::query_as::<_, RemoteBuilder>(
"SELECT * FROM remote_builders WHERE enabled = true ORDER BY speed_factor DESC, name",
)
.fetch_all(pool)
.await
.map_err(CiError::Database)
sqlx::query_as::<_, RemoteBuilder>(
"SELECT * FROM remote_builders WHERE enabled = true ORDER BY speed_factor \
DESC, name",
)
.fetch_all(pool)
.await
.map_err(CiError::Database)
}
/// Find a suitable builder for the given system.
pub async fn find_for_system(pool: &PgPool, system: &str) -> Result<Vec<RemoteBuilder>> {
sqlx::query_as::<_, RemoteBuilder>(
"SELECT * FROM remote_builders WHERE enabled = true AND $1 = ANY(systems) \
ORDER BY speed_factor DESC",
)
.bind(system)
.fetch_all(pool)
.await
.map_err(CiError::Database)
pub async fn find_for_system(
pool: &PgPool,
system: &str,
) -> Result<Vec<RemoteBuilder>> {
sqlx::query_as::<_, RemoteBuilder>(
"SELECT * FROM remote_builders WHERE enabled = true AND $1 = ANY(systems) \
ORDER BY speed_factor DESC",
)
.bind(system)
.fetch_all(pool)
.await
.map_err(CiError::Database)
}
pub async fn update(
pool: &PgPool,
id: Uuid,
input: crate::models::UpdateRemoteBuilder,
pool: &PgPool,
id: Uuid,
input: crate::models::UpdateRemoteBuilder,
) -> Result<RemoteBuilder> {
// Build dynamic update — use COALESCE pattern
sqlx::query_as::<_, RemoteBuilder>(
"UPDATE remote_builders SET \
name = COALESCE($1, name), \
ssh_uri = COALESCE($2, ssh_uri), \
systems = COALESCE($3, systems), \
max_jobs = COALESCE($4, max_jobs), \
speed_factor = COALESCE($5, speed_factor), \
supported_features = COALESCE($6, supported_features), \
mandatory_features = COALESCE($7, mandatory_features), \
enabled = COALESCE($8, enabled), \
public_host_key = COALESCE($9, public_host_key), \
ssh_key_file = COALESCE($10, ssh_key_file) \
WHERE id = $11 RETURNING *",
)
.bind(&input.name)
.bind(&input.ssh_uri)
.bind(&input.systems)
.bind(input.max_jobs)
.bind(input.speed_factor)
.bind(&input.supported_features)
.bind(&input.mandatory_features)
.bind(input.enabled)
.bind(&input.public_host_key)
.bind(&input.ssh_key_file)
.bind(id)
.fetch_optional(pool)
.await?
.ok_or_else(|| CiError::NotFound(format!("Remote builder {id} not found")))
// Build dynamic update — use COALESCE pattern
sqlx::query_as::<_, RemoteBuilder>(
"UPDATE remote_builders SET name = COALESCE($1, name), ssh_uri = \
COALESCE($2, ssh_uri), systems = COALESCE($3, systems), max_jobs = \
COALESCE($4, max_jobs), speed_factor = COALESCE($5, speed_factor), \
supported_features = COALESCE($6, supported_features), \
mandatory_features = COALESCE($7, mandatory_features), enabled = \
COALESCE($8, enabled), public_host_key = COALESCE($9, public_host_key), \
ssh_key_file = COALESCE($10, ssh_key_file) WHERE id = $11 RETURNING *",
)
.bind(&input.name)
.bind(&input.ssh_uri)
.bind(&input.systems)
.bind(input.max_jobs)
.bind(input.speed_factor)
.bind(&input.supported_features)
.bind(&input.mandatory_features)
.bind(input.enabled)
.bind(&input.public_host_key)
.bind(&input.ssh_key_file)
.bind(id)
.fetch_optional(pool)
.await?
.ok_or_else(|| CiError::NotFound(format!("Remote builder {id} not found")))
}
pub async fn delete(pool: &PgPool, id: Uuid) -> Result<()> {
let result = sqlx::query("DELETE FROM remote_builders WHERE id = $1")
.bind(id)
.execute(pool)
.await
.map_err(CiError::Database)?;
if result.rows_affected() == 0 {
return Err(CiError::NotFound(format!("Remote builder {id} not found")));
}
Ok(())
let result = sqlx::query("DELETE FROM remote_builders WHERE id = $1")
.bind(id)
.execute(pool)
.await
.map_err(CiError::Database)?;
if result.rows_affected() == 0 {
return Err(CiError::NotFound(format!("Remote builder {id} not found")));
}
Ok(())
}
pub async fn count(pool: &PgPool) -> Result<i64> {
let row: (i64,) = sqlx::query_as("SELECT COUNT(*) FROM remote_builders")
.fetch_one(pool)
.await
.map_err(CiError::Database)?;
Ok(row.0)
let row: (i64,) = sqlx::query_as("SELECT COUNT(*) FROM remote_builders")
.fetch_one(pool)
.await
.map_err(CiError::Database)?;
Ok(row.0)
}

View file

@ -1,73 +1,85 @@
use sqlx::PgPool;
use uuid::Uuid;
use crate::error::{CiError, Result};
use crate::models::{CreateWebhookConfig, WebhookConfig};
use crate::{
error::{CiError, Result},
models::{CreateWebhookConfig, WebhookConfig},
};
pub async fn create(
pool: &PgPool,
input: CreateWebhookConfig,
secret_hash: Option<&str>,
pool: &PgPool,
input: CreateWebhookConfig,
secret_hash: Option<&str>,
) -> Result<WebhookConfig> {
sqlx::query_as::<_, WebhookConfig>(
"INSERT INTO webhook_configs (project_id, forge_type, secret_hash) VALUES ($1, $2, $3) RETURNING *",
)
.bind(input.project_id)
.bind(&input.forge_type)
.bind(secret_hash)
.fetch_one(pool)
.await
.map_err(|e| match &e {
sqlx::Error::Database(db_err) if db_err.is_unique_violation() => {
CiError::Conflict(format!(
"Webhook config for forge '{}' already exists for this project",
input.forge_type
))
}
_ => CiError::Database(e),
})
sqlx::query_as::<_, WebhookConfig>(
"INSERT INTO webhook_configs (project_id, forge_type, secret_hash) VALUES \
($1, $2, $3) RETURNING *",
)
.bind(input.project_id)
.bind(&input.forge_type)
.bind(secret_hash)
.fetch_one(pool)
.await
.map_err(|e| {
match &e {
sqlx::Error::Database(db_err) if db_err.is_unique_violation() => {
CiError::Conflict(format!(
"Webhook config for forge '{}' already exists for this project",
input.forge_type
))
},
_ => CiError::Database(e),
}
})
}
pub async fn get(pool: &PgPool, id: Uuid) -> Result<WebhookConfig> {
sqlx::query_as::<_, WebhookConfig>("SELECT * FROM webhook_configs WHERE id = $1")
.bind(id)
.fetch_optional(pool)
.await?
.ok_or_else(|| CiError::NotFound(format!("Webhook config {id} not found")))
sqlx::query_as::<_, WebhookConfig>(
"SELECT * FROM webhook_configs WHERE id = $1",
)
.bind(id)
.fetch_optional(pool)
.await?
.ok_or_else(|| CiError::NotFound(format!("Webhook config {id} not found")))
}
pub async fn list_for_project(pool: &PgPool, project_id: Uuid) -> Result<Vec<WebhookConfig>> {
sqlx::query_as::<_, WebhookConfig>(
"SELECT * FROM webhook_configs WHERE project_id = $1 ORDER BY created_at DESC",
)
.bind(project_id)
.fetch_all(pool)
.await
.map_err(CiError::Database)
pub async fn list_for_project(
pool: &PgPool,
project_id: Uuid,
) -> Result<Vec<WebhookConfig>> {
sqlx::query_as::<_, WebhookConfig>(
"SELECT * FROM webhook_configs WHERE project_id = $1 ORDER BY created_at \
DESC",
)
.bind(project_id)
.fetch_all(pool)
.await
.map_err(CiError::Database)
}
pub async fn get_by_project_and_forge(
pool: &PgPool,
project_id: Uuid,
forge_type: &str,
pool: &PgPool,
project_id: Uuid,
forge_type: &str,
) -> Result<Option<WebhookConfig>> {
sqlx::query_as::<_, WebhookConfig>(
"SELECT * FROM webhook_configs WHERE project_id = $1 AND forge_type = $2 AND enabled = true",
)
.bind(project_id)
.bind(forge_type)
.fetch_optional(pool)
.await
.map_err(CiError::Database)
sqlx::query_as::<_, WebhookConfig>(
"SELECT * FROM webhook_configs WHERE project_id = $1 AND forge_type = $2 \
AND enabled = true",
)
.bind(project_id)
.bind(forge_type)
.fetch_optional(pool)
.await
.map_err(CiError::Database)
}
pub async fn delete(pool: &PgPool, id: Uuid) -> Result<()> {
let result = sqlx::query("DELETE FROM webhook_configs WHERE id = $1")
.bind(id)
.execute(pool)
.await?;
if result.rows_affected() == 0 {
return Err(CiError::NotFound(format!("Webhook config {id} not found")));
}
Ok(())
let result = sqlx::query("DELETE FROM webhook_configs WHERE id = $1")
.bind(id)
.execute(pool)
.await?;
if result.rows_affected() == 0 {
return Err(CiError::NotFound(format!("Webhook config {id} not found")));
}
Ok(())
}

View file

@ -1,7 +1,6 @@
//! Tracing initialization helper for all FC daemons.
use tracing_subscriber::EnvFilter;
use tracing_subscriber::fmt;
use tracing_subscriber::{EnvFilter, fmt};
use crate::config::TracingConfig;
@ -10,42 +9,42 @@ use crate::config::TracingConfig;
/// Respects `RUST_LOG` environment variable as an override. If `RUST_LOG` is
/// not set, falls back to the configured level.
pub fn init_tracing(config: &TracingConfig) {
let env_filter =
EnvFilter::try_from_default_env().unwrap_or_else(|_| EnvFilter::new(&config.level));
let env_filter = EnvFilter::try_from_default_env()
.unwrap_or_else(|_| EnvFilter::new(&config.level));
match config.format.as_str() {
"json" => {
let builder = fmt()
.json()
.with_target(config.show_targets)
.with_env_filter(env_filter);
if config.show_timestamps {
builder.init();
} else {
builder.without_time().init();
}
}
"full" => {
let builder = fmt()
.with_target(config.show_targets)
.with_env_filter(env_filter);
if config.show_timestamps {
builder.init();
} else {
builder.without_time().init();
}
}
_ => {
// "compact" or any other value
let builder = fmt()
.compact()
.with_target(config.show_targets)
.with_env_filter(env_filter);
if config.show_timestamps {
builder.init();
} else {
builder.without_time().init();
}
}
}
match config.format.as_str() {
"json" => {
let builder = fmt()
.json()
.with_target(config.show_targets)
.with_env_filter(env_filter);
if config.show_timestamps {
builder.init();
} else {
builder.without_time().init();
}
},
"full" => {
let builder = fmt()
.with_target(config.show_targets)
.with_env_filter(env_filter);
if config.show_timestamps {
builder.init();
} else {
builder.without_time().init();
}
},
_ => {
// "compact" or any other value
let builder = fmt()
.compact()
.with_target(config.show_targets)
.with_env_filter(env_filter);
if config.show_timestamps {
builder.init();
} else {
builder.without_time().init();
}
},
}
}

File diff suppressed because it is too large Load diff

View file

@ -1,195 +1,207 @@
//! Database integration tests
use fc_common::config::DatabaseConfig;
use fc_common::*;
use fc_common::{config::DatabaseConfig, *};
use sqlx::PgPool;
#[tokio::test]
async fn test_database_connection() -> anyhow::Result<()> {
let config = DatabaseConfig {
url: "postgresql://postgres:password@localhost/test".to_string(),
max_connections: 5,
min_connections: 1,
connect_timeout: 5, // Short timeout for test
idle_timeout: 600,
max_lifetime: 1800,
};
let config = DatabaseConfig {
url: "postgresql://postgres:password@localhost/test"
.to_string(),
max_connections: 5,
min_connections: 1,
connect_timeout: 5, // Short timeout for test
idle_timeout: 600,
max_lifetime: 1800,
};
// Try to connect, skip test if database is not available
let db = match Database::new(config).await {
Ok(db) => db,
Err(e) => {
println!(
"Skipping test_database_connection: no PostgreSQL instance available - {}",
e
);
return Ok(());
}
};
// Try to connect, skip test if database is not available
let db = match Database::new(config).await {
Ok(db) => db,
Err(e) => {
println!(
"Skipping test_database_connection: no PostgreSQL instance available \
- {}",
e
);
return Ok(());
},
};
// Test health check
Database::health_check(db.pool()).await?;
// Test health check
Database::health_check(db.pool()).await?;
// Test connection info
let info = db.get_connection_info().await?;
assert!(!info.database.is_empty());
assert!(!info.user.is_empty());
assert!(!info.version.is_empty());
// Test connection info
let info = db.get_connection_info().await?;
assert!(!info.database.is_empty());
assert!(!info.user.is_empty());
assert!(!info.version.is_empty());
// Test pool stats
let stats = db.get_pool_stats().await;
assert!(stats.size >= 1);
// Test pool stats
let stats = db.get_pool_stats().await;
assert!(stats.size >= 1);
db.close().await;
db.close().await;
Ok(())
Ok(())
}
#[tokio::test]
async fn test_database_health_check() -> anyhow::Result<()> {
// Try to connect, skip test if database is not available
let pool = match PgPool::connect("postgresql://postgres:password@localhost/test").await {
Ok(pool) => pool,
Err(e) => {
println!(
"Skipping test_database_health_check: no PostgreSQL instance available - {}",
e
);
return Ok(());
}
};
// Try to connect, skip test if database is not available
let pool = match PgPool::connect(
"postgresql://postgres:password@localhost/test",
)
.await
{
Ok(pool) => pool,
Err(e) => {
println!(
"Skipping test_database_health_check: no PostgreSQL instance \
available - {}",
e
);
return Ok(());
},
};
// Should succeed
Database::health_check(&pool).await?;
// Should succeed
Database::health_check(&pool).await?;
pool.close().await;
Ok(())
pool.close().await;
Ok(())
}
#[tokio::test]
async fn test_connection_info() -> anyhow::Result<()> {
// Try to connect, skip test if database is not available
let pool = match PgPool::connect("postgresql://postgres:password@localhost/test").await {
Ok(pool) => pool,
Err(e) => {
println!(
"Skipping test_connection_info: no PostgreSQL instance available - {}",
e
);
return Ok(());
}
};
// Try to connect, skip test if database is not available
let pool = match PgPool::connect(
"postgresql://postgres:password@localhost/test",
)
.await
{
Ok(pool) => pool,
Err(e) => {
println!(
"Skipping test_connection_info: no PostgreSQL instance available - {}",
e
);
return Ok(());
},
};
let db = match Database::new(DatabaseConfig {
url: "postgresql://postgres:password@localhost/test".to_string(),
max_connections: 5,
min_connections: 1,
connect_timeout: 5, // Short timeout for test
idle_timeout: 600,
max_lifetime: 1800,
})
.await
{
Ok(db) => db,
Err(e) => {
println!(
"Skipping test_connection_info: database connection failed - {}",
e
);
pool.close().await;
return Ok(());
}
};
let db = match Database::new(DatabaseConfig {
url: "postgresql://postgres:password@localhost/test"
.to_string(),
max_connections: 5,
min_connections: 1,
connect_timeout: 5, // Short timeout for test
idle_timeout: 600,
max_lifetime: 1800,
})
.await
{
Ok(db) => db,
Err(e) => {
println!(
"Skipping test_connection_info: database connection failed - {}",
e
);
pool.close().await;
return Ok(());
},
};
let info = db.get_connection_info().await?;
let info = db.get_connection_info().await?;
assert!(!info.database.is_empty());
assert!(!info.user.is_empty());
assert!(!info.version.is_empty());
assert!(info.version.contains("PostgreSQL"));
assert!(!info.database.is_empty());
assert!(!info.user.is_empty());
assert!(!info.version.is_empty());
assert!(info.version.contains("PostgreSQL"));
db.close().await;
pool.close().await;
db.close().await;
pool.close().await;
Ok(())
Ok(())
}
#[tokio::test]
async fn test_pool_stats() -> anyhow::Result<()> {
let db = match Database::new(DatabaseConfig {
url: "postgresql://postgres:password@localhost/test".to_string(),
max_connections: 5,
min_connections: 1,
connect_timeout: 5, // Short timeout for test
idle_timeout: 600,
max_lifetime: 1800,
})
.await
{
Ok(db) => db,
Err(e) => {
println!(
"Skipping test_pool_stats: no PostgreSQL instance available - {}",
e
);
return Ok(());
}
};
let db = match Database::new(DatabaseConfig {
url: "postgresql://postgres:password@localhost/test"
.to_string(),
max_connections: 5,
min_connections: 1,
connect_timeout: 5, // Short timeout for test
idle_timeout: 600,
max_lifetime: 1800,
})
.await
{
Ok(db) => db,
Err(e) => {
println!(
"Skipping test_pool_stats: no PostgreSQL instance available - {}",
e
);
return Ok(());
},
};
let stats = db.get_pool_stats().await;
let stats = db.get_pool_stats().await;
assert!(stats.size >= 1);
assert!(stats.idle >= 1);
assert_eq!(stats.size, stats.idle + stats.active);
assert!(stats.size >= 1);
assert!(stats.idle >= 1);
assert_eq!(stats.size, stats.idle + stats.active);
db.close().await;
db.close().await;
Ok(())
Ok(())
}
#[sqlx::test]
async fn test_database_config_validation() -> anyhow::Result<()> {
// Valid config
let config = DatabaseConfig {
url: "postgresql://user:pass@localhost/db".to_string(),
max_connections: 10,
min_connections: 2,
connect_timeout: 30,
idle_timeout: 600,
max_lifetime: 1800,
};
assert!(config.validate().is_ok());
// Valid config
let config = DatabaseConfig {
url: "postgresql://user:pass@localhost/db".to_string(),
max_connections: 10,
min_connections: 2,
connect_timeout: 30,
idle_timeout: 600,
max_lifetime: 1800,
};
assert!(config.validate().is_ok());
// Invalid URL
let mut config = config.clone();
config.url = "invalid://url".to_string();
assert!(config.validate().is_err());
// Invalid URL
let mut config = config.clone();
config.url = "invalid://url".to_string();
assert!(config.validate().is_err());
// Empty URL
config.url = "".to_string();
assert!(config.validate().is_err());
// Empty URL
config.url = "".to_string();
assert!(config.validate().is_err());
// Zero max connections
config = DatabaseConfig {
url: "postgresql://user:pass@localhost/db".to_string(),
max_connections: 0,
min_connections: 1,
connect_timeout: 30,
idle_timeout: 600,
max_lifetime: 1800,
};
assert!(config.validate().is_err());
// Zero max connections
config = DatabaseConfig {
url: "postgresql://user:pass@localhost/db".to_string(),
max_connections: 0,
min_connections: 1,
connect_timeout: 30,
idle_timeout: 600,
max_lifetime: 1800,
};
assert!(config.validate().is_err());
// Min > max
config = DatabaseConfig {
url: "postgresql://user:pass@localhost/db".to_string(),
max_connections: 5,
min_connections: 10,
connect_timeout: 30,
idle_timeout: 600,
max_lifetime: 1800,
};
assert!(config.validate().is_err());
// Min > max
config = DatabaseConfig {
url: "postgresql://user:pass@localhost/db".to_string(),
max_connections: 5,
min_connections: 10,
connect_timeout: 30,
idle_timeout: 600,
max_lifetime: 1800,
};
assert!(config.validate().is_err());
Ok(())
Ok(())
}

View file

@ -1,148 +1,151 @@
//! Integration tests for database and configuration
use fc_common::Database;
use fc_common::config::{Config, DatabaseConfig};
use fc_common::{
Database,
config::{Config, DatabaseConfig},
};
#[tokio::test]
async fn test_database_connection_full() -> anyhow::Result<()> {
// This test requires a running PostgreSQL instance
// Skip if no database is available
let config = DatabaseConfig {
url: "postgresql://postgres:password@localhost/fc_ci_test".to_string(),
max_connections: 5,
min_connections: 1,
connect_timeout: 5, // Short timeout for test
idle_timeout: 600,
max_lifetime: 1800,
};
// This test requires a running PostgreSQL instance
// Skip if no database is available
let config = DatabaseConfig {
url: "postgresql://postgres:password@localhost/fc_ci_test"
.to_string(),
max_connections: 5,
min_connections: 1,
connect_timeout: 5, // Short timeout for test
idle_timeout: 600,
max_lifetime: 1800,
};
// Try to connect, skip test if database is not available
let db = match Database::new(config).await {
Ok(db) => db,
Err(_) => {
println!("Skipping database test: no PostgreSQL instance available");
return Ok(());
}
};
// Try to connect, skip test if database is not available
let db = match Database::new(config).await {
Ok(db) => db,
Err(_) => {
println!("Skipping database test: no PostgreSQL instance available");
return Ok(());
},
};
// Test health check
Database::health_check(db.pool()).await?;
// Test health check
Database::health_check(db.pool()).await?;
// Test connection info
let info = db.get_connection_info().await?;
assert!(!info.database.is_empty());
assert!(!info.user.is_empty());
assert!(!info.version.is_empty());
// Test connection info
let info = db.get_connection_info().await?;
assert!(!info.database.is_empty());
assert!(!info.user.is_empty());
assert!(!info.version.is_empty());
// Test pool stats
let stats = db.get_pool_stats().await;
assert!(stats.size >= 1);
assert!(stats.idle >= 1);
assert_eq!(stats.size, stats.idle + stats.active);
// Test pool stats
let stats = db.get_pool_stats().await;
assert!(stats.size >= 1);
assert!(stats.idle >= 1);
assert_eq!(stats.size, stats.idle + stats.active);
db.close().await;
db.close().await;
Ok(())
Ok(())
}
#[test]
fn test_config_loading() -> anyhow::Result<()> {
// Test default config loading
let config = Config::load()?;
assert!(config.validate().is_ok());
// Test default config loading
let config = Config::load()?;
assert!(config.validate().is_ok());
// Test that defaults are reasonable
assert_eq!(config.database.max_connections, 20);
assert_eq!(config.database.min_connections, 5);
assert_eq!(config.server.port, 3000);
assert_eq!(config.evaluator.poll_interval, 60);
assert_eq!(config.queue_runner.workers, 4);
// Test that defaults are reasonable
assert_eq!(config.database.max_connections, 20);
assert_eq!(config.database.min_connections, 5);
assert_eq!(config.server.port, 3000);
assert_eq!(config.evaluator.poll_interval, 60);
assert_eq!(config.queue_runner.workers, 4);
Ok(())
Ok(())
}
#[test]
fn test_config_validation() -> anyhow::Result<()> {
// Test valid config
let config = Config::default();
assert!(config.validate().is_ok());
// Test valid config
let config = Config::default();
assert!(config.validate().is_ok());
// Test invalid database URL
let mut config = config.clone();
config.database.url = "invalid://url".to_string();
assert!(config.validate().is_err());
// Test invalid database URL
let mut config = config.clone();
config.database.url = "invalid://url".to_string();
assert!(config.validate().is_err());
// Test invalid port
let mut config = config.clone();
config.server.port = 0;
assert!(config.validate().is_err());
// Test invalid port
let mut config = config.clone();
config.server.port = 0;
assert!(config.validate().is_err());
// Test invalid connections
let mut config = config.clone();
config.database.max_connections = 0;
assert!(config.validate().is_err());
// Test invalid connections
let mut config = config.clone();
config.database.max_connections = 0;
assert!(config.validate().is_err());
config.database.max_connections = 10;
config.database.min_connections = 15;
assert!(config.validate().is_err());
config.database.max_connections = 10;
config.database.min_connections = 15;
assert!(config.validate().is_err());
// Test invalid evaluator settings
let mut config = config.clone();
config.evaluator.poll_interval = 0;
assert!(config.validate().is_err());
// Test invalid evaluator settings
let mut config = config.clone();
config.evaluator.poll_interval = 0;
assert!(config.validate().is_err());
// Test invalid queue runner settings
let mut config = config.clone();
config.queue_runner.workers = 0;
assert!(config.validate().is_err());
// Test invalid queue runner settings
let mut config = config.clone();
config.queue_runner.workers = 0;
assert!(config.validate().is_err());
Ok(())
Ok(())
}
#[test]
fn test_database_config_validation() -> anyhow::Result<()> {
// Test valid config
let config = DatabaseConfig::default();
assert!(config.validate().is_ok());
// Test valid config
let config = DatabaseConfig::default();
assert!(config.validate().is_ok());
// Test invalid URL
let mut config = config.clone();
config.url = "invalid://url".to_string();
assert!(config.validate().is_err());
// Test invalid URL
let mut config = config.clone();
config.url = "invalid://url".to_string();
assert!(config.validate().is_err());
// Test empty URL
config.url = "".to_string();
assert!(config.validate().is_err());
// Test empty URL
config.url = "".to_string();
assert!(config.validate().is_err());
// Test zero max connections
config = DatabaseConfig::default();
config.max_connections = 0;
assert!(config.validate().is_err());
// Test zero max connections
config = DatabaseConfig::default();
config.max_connections = 0;
assert!(config.validate().is_err());
// Test min > max
config = DatabaseConfig::default();
config.max_connections = 5;
config.min_connections = 10;
assert!(config.validate().is_err());
// Test min > max
config = DatabaseConfig::default();
config.max_connections = 5;
config.min_connections = 10;
assert!(config.validate().is_err());
Ok(())
Ok(())
}
#[test]
fn test_config_serialization() -> anyhow::Result<()> {
let config = Config::default();
let config = Config::default();
// Test TOML serialization
let toml_str = toml::to_string_pretty(&config)?;
let parsed: Config = toml::from_str(&toml_str)?;
assert_eq!(config.database.url, parsed.database.url);
assert_eq!(config.server.port, parsed.server.port);
// Test TOML serialization
let toml_str = toml::to_string_pretty(&config)?;
let parsed: Config = toml::from_str(&toml_str)?;
assert_eq!(config.database.url, parsed.database.url);
assert_eq!(config.server.port, parsed.server.port);
// Test JSON serialization
let json_str = serde_json::to_string_pretty(&config)?;
let parsed: Config = serde_json::from_str(&json_str)?;
assert_eq!(config.database.url, parsed.database.url);
assert_eq!(config.server.port, parsed.server.port);
// Test JSON serialization
let json_str = serde_json::to_string_pretty(&config)?;
let parsed: Config = serde_json::from_str(&json_str)?;
assert_eq!(config.database.url, parsed.database.url);
assert_eq!(config.server.port, parsed.server.port);
Ok(())
Ok(())
}

File diff suppressed because it is too large Load diff

View file

@ -1,29 +1,29 @@
[package]
name = "fc-evaluator"
version.workspace = true
edition.workspace = true
authors.workspace = true
license.workspace = true
name = "fc-evaluator"
version.workspace = true
edition.workspace = true
authors.workspace = true
license.workspace = true
repository.workspace = true
[dependencies]
tokio.workspace = true
sqlx.workspace = true
serde.workspace = true
serde_json.workspace = true
uuid.workspace = true
chrono.workspace = true
tracing.workspace = true
anyhow.workspace = true
chrono.workspace = true
clap.workspace = true
config.workspace = true
futures.workspace = true
git2.workspace = true
hex.workspace = true
serde.workspace = true
serde_json.workspace = true
sha2.workspace = true
sqlx.workspace = true
thiserror.workspace = true
tokio.workspace = true
toml.workspace = true
tracing.workspace = true
tracing-subscriber.workspace = true
anyhow.workspace = true
thiserror.workspace = true
git2.workspace = true
clap.workspace = true
config.workspace = true
futures.workspace = true
toml.workspace = true
sha2.workspace = true
hex.workspace = true
uuid.workspace = true
# Our crates
fc-common.workspace = true

View file

@ -1,312 +1,345 @@
use std::collections::HashMap;
use std::time::Duration;
use std::{collections::HashMap, time::Duration};
use fc_common::{
config::EvaluatorConfig,
models::{CreateBuild, CreateEvaluation, EvaluationStatus, JobsetInput},
repo,
};
use futures::stream::{self, StreamExt};
use fc_common::config::EvaluatorConfig;
use fc_common::models::{CreateBuild, CreateEvaluation, EvaluationStatus, JobsetInput};
use fc_common::repo;
use sqlx::PgPool;
use uuid::Uuid;
pub async fn run(pool: PgPool, config: EvaluatorConfig) -> anyhow::Result<()> {
let poll_interval = Duration::from_secs(config.poll_interval);
let nix_timeout = Duration::from_secs(config.nix_timeout);
let git_timeout = Duration::from_secs(config.git_timeout);
let poll_interval = Duration::from_secs(config.poll_interval);
let nix_timeout = Duration::from_secs(config.nix_timeout);
let git_timeout = Duration::from_secs(config.git_timeout);
loop {
if let Err(e) = run_cycle(&pool, &config, nix_timeout, git_timeout).await {
tracing::error!("Evaluation cycle failed: {e}");
}
tokio::time::sleep(poll_interval).await;
loop {
if let Err(e) = run_cycle(&pool, &config, nix_timeout, git_timeout).await {
tracing::error!("Evaluation cycle failed: {e}");
}
tokio::time::sleep(poll_interval).await;
}
}
async fn run_cycle(
pool: &PgPool,
config: &EvaluatorConfig,
nix_timeout: Duration,
git_timeout: Duration,
pool: &PgPool,
config: &EvaluatorConfig,
nix_timeout: Duration,
git_timeout: Duration,
) -> anyhow::Result<()> {
let active = repo::jobsets::list_active(pool).await?;
tracing::info!("Found {} active jobsets", active.len());
let active = repo::jobsets::list_active(pool).await?;
tracing::info!("Found {} active jobsets", active.len());
let max_concurrent = config.max_concurrent_evals;
let max_concurrent = config.max_concurrent_evals;
stream::iter(active)
.for_each_concurrent(max_concurrent, |jobset| async move {
if let Err(e) = evaluate_jobset(pool, &jobset, config, nix_timeout, git_timeout).await {
tracing::error!(
jobset_id = %jobset.id,
jobset_name = %jobset.name,
"Failed to evaluate jobset: {e}"
);
}
})
.await;
stream::iter(active)
.for_each_concurrent(max_concurrent, |jobset| {
async move {
if let Err(e) =
evaluate_jobset(pool, &jobset, config, nix_timeout, git_timeout).await
{
tracing::error!(
jobset_id = %jobset.id,
jobset_name = %jobset.name,
"Failed to evaluate jobset: {e}"
);
}
}
})
.await;
Ok(())
Ok(())
}
async fn evaluate_jobset(
pool: &PgPool,
jobset: &fc_common::models::ActiveJobset,
config: &EvaluatorConfig,
nix_timeout: Duration,
git_timeout: Duration,
pool: &PgPool,
jobset: &fc_common::models::ActiveJobset,
config: &EvaluatorConfig,
nix_timeout: Duration,
git_timeout: Duration,
) -> anyhow::Result<()> {
let url = jobset.repository_url.clone();
let work_dir = config.work_dir.clone();
let project_name = jobset.project_name.clone();
let branch = jobset.branch.clone();
let url = jobset.repository_url.clone();
let work_dir = config.work_dir.clone();
let project_name = jobset.project_name.clone();
let branch = jobset.branch.clone();
// Clone/fetch in a blocking task (git2 is sync) with timeout
let (repo_path, commit_hash) = tokio::time::timeout(
git_timeout,
tokio::task::spawn_blocking(move || {
crate::git::clone_or_fetch(&url, &work_dir, &project_name, branch.as_deref())
}),
)
// Clone/fetch in a blocking task (git2 is sync) with timeout
let (repo_path, commit_hash) = tokio::time::timeout(
git_timeout,
tokio::task::spawn_blocking(move || {
crate::git::clone_or_fetch(
&url,
&work_dir,
&project_name,
branch.as_deref(),
)
}),
)
.await
.map_err(|_| {
anyhow::anyhow!("Git operation timed out after {git_timeout:?}")
})???;
// Query jobset inputs
let inputs = repo::jobset_inputs::list_for_jobset(pool, jobset.id)
.await
.map_err(|_| anyhow::anyhow!("Git operation timed out after {git_timeout:?}"))???;
.unwrap_or_default();
// Query jobset inputs
let inputs = repo::jobset_inputs::list_for_jobset(pool, jobset.id)
.await
.unwrap_or_default();
// Compute inputs hash for eval caching (commit + all input values/revisions)
let inputs_hash = compute_inputs_hash(&commit_hash, &inputs);
// Compute inputs hash for eval caching (commit + all input values/revisions)
let inputs_hash = compute_inputs_hash(&commit_hash, &inputs);
// Check if this exact combination was already evaluated (eval caching)
if let Ok(Some(cached)) =
repo::evaluations::get_by_inputs_hash(pool, jobset.id, &inputs_hash).await
{
tracing::debug!(
jobset = %jobset.name,
commit = %commit_hash,
cached_eval = %cached.id,
"Inputs unchanged (hash: {}), skipping evaluation",
&inputs_hash[..16],
);
return Ok(());
}
// Also skip if commit hasn't changed (backward compat)
if let Some(latest) = repo::evaluations::get_latest(pool, jobset.id).await?
&& latest.commit_hash == commit_hash && latest.inputs_hash.as_deref() == Some(&inputs_hash)
{
tracing::debug!(
jobset = %jobset.name,
commit = %commit_hash,
"Already evaluated, skipping"
);
return Ok(());
}
tracing::info!(
// Check if this exact combination was already evaluated (eval caching)
if let Ok(Some(cached)) =
repo::evaluations::get_by_inputs_hash(pool, jobset.id, &inputs_hash).await
{
tracing::debug!(
jobset = %jobset.name,
commit = %commit_hash,
"Starting evaluation"
cached_eval = %cached.id,
"Inputs unchanged (hash: {}), skipping evaluation",
&inputs_hash[..16],
);
return Ok(());
}
// Create evaluation record
let eval = repo::evaluations::create(
// Also skip if commit hasn't changed (backward compat)
if let Some(latest) = repo::evaluations::get_latest(pool, jobset.id).await?
&& latest.commit_hash == commit_hash
&& latest.inputs_hash.as_deref() == Some(&inputs_hash)
{
tracing::debug!(
jobset = %jobset.name,
commit = %commit_hash,
"Already evaluated, skipping"
);
return Ok(());
}
tracing::info!(
jobset = %jobset.name,
commit = %commit_hash,
"Starting evaluation"
);
// Create evaluation record
let eval = repo::evaluations::create(pool, CreateEvaluation {
jobset_id: jobset.id,
commit_hash: commit_hash.clone(),
})
.await?;
// Mark as running and set inputs hash
repo::evaluations::update_status(
pool,
eval.id,
EvaluationStatus::Running,
None,
)
.await?;
let _ = repo::evaluations::set_inputs_hash(pool, eval.id, &inputs_hash).await;
// Check for declarative config in repo
check_declarative_config(pool, &repo_path, jobset.project_id).await;
// Run nix evaluation
match crate::nix::evaluate(
&repo_path,
&jobset.nix_expression,
jobset.flake_mode,
nix_timeout,
config,
&inputs,
)
.await
{
Ok(eval_result) => {
tracing::info!(
jobset = %jobset.name,
count = eval_result.jobs.len(),
errors = eval_result.error_count,
"Evaluation discovered jobs"
);
// Create build records, tracking drv_path -> build_id for dependency
// resolution
let mut drv_to_build: HashMap<String, Uuid> = HashMap::new();
let mut name_to_build: HashMap<String, Uuid> = HashMap::new();
for job in &eval_result.jobs {
let outputs_json = job
.outputs
.as_ref()
.map(|o| serde_json::to_value(o).unwrap_or_default());
let constituents_json = job
.constituents
.as_ref()
.map(|c| serde_json::to_value(c).unwrap_or_default());
let is_aggregate = job.constituents.is_some();
let build = repo::builds::create(pool, CreateBuild {
evaluation_id: eval.id,
job_name: job.name.clone(),
drv_path: job.drv_path.clone(),
system: job.system.clone(),
outputs: outputs_json,
is_aggregate: Some(is_aggregate),
constituents: constituents_json,
})
.await?;
drv_to_build.insert(job.drv_path.clone(), build.id);
name_to_build.insert(job.name.clone(), build.id);
}
// Resolve dependencies
for job in &eval_result.jobs {
let build_id = match drv_to_build.get(&job.drv_path) {
Some(id) => *id,
None => continue,
};
// Input derivation dependencies
if let Some(ref input_drvs) = job.input_drvs {
for dep_drv in input_drvs.keys() {
if let Some(&dep_build_id) = drv_to_build.get(dep_drv)
&& dep_build_id != build_id
{
let _ =
repo::build_dependencies::create(pool, build_id, dep_build_id)
.await;
}
}
}
// Aggregate constituent dependencies
if let Some(ref constituents) = job.constituents {
for constituent_name in constituents {
if let Some(&dep_build_id) = name_to_build.get(constituent_name)
&& dep_build_id != build_id
{
let _ =
repo::build_dependencies::create(pool, build_id, dep_build_id)
.await;
}
}
}
}
repo::evaluations::update_status(
pool,
CreateEvaluation {
jobset_id: jobset.id,
commit_hash: commit_hash.clone(),
},
)
.await?;
eval.id,
EvaluationStatus::Completed,
None,
)
.await?;
},
Err(e) => {
let msg = e.to_string();
tracing::error!(jobset = %jobset.name, "Evaluation failed: {msg}");
repo::evaluations::update_status(
pool,
eval.id,
EvaluationStatus::Failed,
Some(&msg),
)
.await?;
},
}
// Mark as running and set inputs hash
repo::evaluations::update_status(pool, eval.id, EvaluationStatus::Running, None).await?;
let _ = repo::evaluations::set_inputs_hash(pool, eval.id, &inputs_hash).await;
// Check for declarative config in repo
check_declarative_config(pool, &repo_path, jobset.project_id).await;
// Run nix evaluation
match crate::nix::evaluate(
&repo_path,
&jobset.nix_expression,
jobset.flake_mode,
nix_timeout,
config,
&inputs,
)
.await
{
Ok(eval_result) => {
tracing::info!(
jobset = %jobset.name,
count = eval_result.jobs.len(),
errors = eval_result.error_count,
"Evaluation discovered jobs"
);
// Create build records, tracking drv_path -> build_id for dependency resolution
let mut drv_to_build: HashMap<String, Uuid> = HashMap::new();
let mut name_to_build: HashMap<String, Uuid> = HashMap::new();
for job in &eval_result.jobs {
let outputs_json = job
.outputs
.as_ref()
.map(|o| serde_json::to_value(o).unwrap_or_default());
let constituents_json = job
.constituents
.as_ref()
.map(|c| serde_json::to_value(c).unwrap_or_default());
let is_aggregate = job.constituents.is_some();
let build = repo::builds::create(
pool,
CreateBuild {
evaluation_id: eval.id,
job_name: job.name.clone(),
drv_path: job.drv_path.clone(),
system: job.system.clone(),
outputs: outputs_json,
is_aggregate: Some(is_aggregate),
constituents: constituents_json,
},
)
.await?;
drv_to_build.insert(job.drv_path.clone(), build.id);
name_to_build.insert(job.name.clone(), build.id);
}
// Resolve dependencies
for job in &eval_result.jobs {
let build_id = match drv_to_build.get(&job.drv_path) {
Some(id) => *id,
None => continue,
};
// Input derivation dependencies
if let Some(ref input_drvs) = job.input_drvs {
for dep_drv in input_drvs.keys() {
if let Some(&dep_build_id) = drv_to_build.get(dep_drv)
&& dep_build_id != build_id {
let _ =
repo::build_dependencies::create(pool, build_id, dep_build_id)
.await;
}
}
}
// Aggregate constituent dependencies
if let Some(ref constituents) = job.constituents {
for constituent_name in constituents {
if let Some(&dep_build_id) = name_to_build.get(constituent_name)
&& dep_build_id != build_id {
let _ =
repo::build_dependencies::create(pool, build_id, dep_build_id)
.await;
}
}
}
}
repo::evaluations::update_status(pool, eval.id, EvaluationStatus::Completed, None)
.await?;
}
Err(e) => {
let msg = e.to_string();
tracing::error!(jobset = %jobset.name, "Evaluation failed: {msg}");
repo::evaluations::update_status(pool, eval.id, EvaluationStatus::Failed, Some(&msg))
.await?;
}
}
Ok(())
Ok(())
}
/// Compute a deterministic hash over the commit and all jobset inputs.
/// Used for evaluation caching — skip re-eval when inputs haven't changed.
fn compute_inputs_hash(commit_hash: &str, inputs: &[JobsetInput]) -> String {
use sha2::{Digest, Sha256};
use sha2::{Digest, Sha256};
let mut hasher = Sha256::new();
hasher.update(commit_hash.as_bytes());
let mut hasher = Sha256::new();
hasher.update(commit_hash.as_bytes());
// Sort inputs by name for deterministic hashing
let mut sorted_inputs: Vec<&JobsetInput> = inputs.iter().collect();
sorted_inputs.sort_by_key(|i| &i.name);
// Sort inputs by name for deterministic hashing
let mut sorted_inputs: Vec<&JobsetInput> = inputs.iter().collect();
sorted_inputs.sort_by_key(|i| &i.name);
for input in sorted_inputs {
hasher.update(input.name.as_bytes());
hasher.update(input.input_type.as_bytes());
hasher.update(input.value.as_bytes());
if let Some(ref rev) = input.revision {
hasher.update(rev.as_bytes());
}
for input in sorted_inputs {
hasher.update(input.name.as_bytes());
hasher.update(input.input_type.as_bytes());
hasher.update(input.value.as_bytes());
if let Some(ref rev) = input.revision {
hasher.update(rev.as_bytes());
}
}
hex::encode(hasher.finalize())
hex::encode(hasher.finalize())
}
/// Check for declarative project config (.fc.toml or .fc/config.toml) in the repo.
async fn check_declarative_config(pool: &PgPool, repo_path: &std::path::Path, project_id: Uuid) {
let config_path = repo_path.join(".fc.toml");
let alt_config_path = repo_path.join(".fc/config.toml");
/// Check for declarative project config (.fc.toml or .fc/config.toml) in the
/// repo.
async fn check_declarative_config(
pool: &PgPool,
repo_path: &std::path::Path,
project_id: Uuid,
) {
let config_path = repo_path.join(".fc.toml");
let alt_config_path = repo_path.join(".fc/config.toml");
let path = if config_path.exists() {
config_path
} else if alt_config_path.exists() {
alt_config_path
} else {
return;
};
let path = if config_path.exists() {
config_path
} else if alt_config_path.exists() {
alt_config_path
} else {
return;
};
let content = match std::fs::read_to_string(&path) {
Ok(c) => c,
Err(e) => {
tracing::warn!("Failed to read declarative config {}: {e}", path.display());
return;
}
};
let content = match std::fs::read_to_string(&path) {
Ok(c) => c,
Err(e) => {
tracing::warn!(
"Failed to read declarative config {}: {e}",
path.display()
);
return;
},
};
#[derive(serde::Deserialize)]
struct DeclarativeConfig {
jobsets: Option<Vec<DeclarativeJobset>>,
}
#[derive(serde::Deserialize)]
struct DeclarativeJobset {
name: String,
nix_expression: String,
flake_mode: Option<bool>,
check_interval: Option<i32>,
enabled: Option<bool>,
}
let config: DeclarativeConfig = match toml::from_str(&content) {
Ok(c) => c,
Err(e) => {
tracing::warn!("Failed to parse declarative config: {e}");
return;
}
};
if let Some(jobsets) = config.jobsets {
for js in jobsets {
let input = fc_common::models::CreateJobset {
project_id,
name: js.name,
nix_expression: js.nix_expression,
enabled: js.enabled,
flake_mode: js.flake_mode,
check_interval: js.check_interval,
branch: None,
scheduling_shares: None,
};
if let Err(e) = repo::jobsets::upsert(pool, input).await {
tracing::warn!("Failed to upsert declarative jobset: {e}");
}
}
#[derive(serde::Deserialize)]
struct DeclarativeConfig {
jobsets: Option<Vec<DeclarativeJobset>>,
}
#[derive(serde::Deserialize)]
struct DeclarativeJobset {
name: String,
nix_expression: String,
flake_mode: Option<bool>,
check_interval: Option<i32>,
enabled: Option<bool>,
}
let config: DeclarativeConfig = match toml::from_str(&content) {
Ok(c) => c,
Err(e) => {
tracing::warn!("Failed to parse declarative config: {e}");
return;
},
};
if let Some(jobsets) = config.jobsets {
for js in jobsets {
let input = fc_common::models::CreateJobset {
project_id,
name: js.name,
nix_expression: js.nix_expression,
enabled: js.enabled,
flake_mode: js.flake_mode,
check_interval: js.check_interval,
branch: None,
scheduling_shares: None,
};
if let Err(e) = repo::jobsets::upsert(pool, input).await {
tracing::warn!("Failed to upsert declarative jobset: {e}");
}
}
}
}

View file

@ -5,43 +5,45 @@ use git2::Repository;
/// Clone or fetch a repository. Returns (repo_path, commit_hash).
///
/// If `branch` is `Some`, resolve `refs/remotes/origin/<branch>` instead of HEAD.
/// If `branch` is `Some`, resolve `refs/remotes/origin/<branch>` instead of
/// HEAD.
#[tracing::instrument(skip(work_dir))]
pub fn clone_or_fetch(
url: &str,
work_dir: &Path,
project_name: &str,
branch: Option<&str>,
url: &str,
work_dir: &Path,
project_name: &str,
branch: Option<&str>,
) -> Result<(PathBuf, String)> {
let repo_path = work_dir.join(project_name);
let repo_path = work_dir.join(project_name);
let repo = if repo_path.exists() {
let repo = Repository::open(&repo_path)?;
// Fetch origin — scope the borrow so `remote` is dropped before we move `repo`
{
let mut remote = repo.find_remote("origin")?;
remote.fetch(&["refs/heads/*:refs/remotes/origin/*"], None, None)?;
}
repo
} else {
Repository::clone(url, &repo_path)?
};
let repo = if repo_path.exists() {
let repo = Repository::open(&repo_path)?;
// Fetch origin — scope the borrow so `remote` is dropped before we move
// `repo`
{
let mut remote = repo.find_remote("origin")?;
remote.fetch(&["refs/heads/*:refs/remotes/origin/*"], None, None)?;
}
repo
} else {
Repository::clone(url, &repo_path)?
};
// Resolve commit: use specific branch ref or fall back to HEAD
let hash = if let Some(branch_name) = branch {
let refname = format!("refs/remotes/origin/{branch_name}");
let reference = repo.find_reference(&refname).map_err(|e| {
fc_common::error::CiError::NotFound(format!(
"Branch '{branch_name}' not found ({refname}): {e}"
))
})?;
let commit = reference.peel_to_commit()?;
commit.id().to_string()
} else {
let head = repo.head()?;
let commit = head.peel_to_commit()?;
commit.id().to_string()
};
// Resolve commit: use specific branch ref or fall back to HEAD
let hash = if let Some(branch_name) = branch {
let refname = format!("refs/remotes/origin/{branch_name}");
let reference = repo.find_reference(&refname).map_err(|e| {
fc_common::error::CiError::NotFound(format!(
"Branch '{branch_name}' not found ({refname}): {e}"
))
})?;
let commit = reference.peel_to_commit()?;
commit.id().to_string()
} else {
let head = repo.head()?;
let commit = head.peel_to_commit()?;
commit.id().to_string()
};
Ok((repo_path, hash))
Ok((repo_path, hash))
}

View file

@ -5,67 +5,67 @@ use fc_common::{Config, Database};
#[command(name = "fc-evaluator")]
#[command(about = "CI Evaluator - Git polling and Nix evaluation")]
struct Cli {
#[arg(short, long)]
config: Option<String>,
#[arg(short, long)]
config: Option<String>,
}
#[tokio::main]
async fn main() -> anyhow::Result<()> {
let _cli = Cli::parse();
let _cli = Cli::parse();
let config = Config::load()?;
fc_common::init_tracing(&config.tracing);
let config = Config::load()?;
fc_common::init_tracing(&config.tracing);
tracing::info!("Starting CI Evaluator");
tracing::info!("Configuration loaded");
tracing::info!("Starting CI Evaluator");
tracing::info!("Configuration loaded");
// Ensure work directory exists
tokio::fs::create_dir_all(&config.evaluator.work_dir).await?;
tracing::info!(work_dir = %config.evaluator.work_dir.display(), "Work directory ready");
// Ensure work directory exists
tokio::fs::create_dir_all(&config.evaluator.work_dir).await?;
tracing::info!(work_dir = %config.evaluator.work_dir.display(), "Work directory ready");
let db = Database::new(config.database.clone()).await?;
tracing::info!("Database connection established");
let db = Database::new(config.database.clone()).await?;
tracing::info!("Database connection established");
let pool = db.pool().clone();
let eval_config = config.evaluator;
let pool = db.pool().clone();
let eval_config = config.evaluator;
tokio::select! {
result = fc_evaluator::eval_loop::run(pool, eval_config) => {
if let Err(e) = result {
tracing::error!("Evaluator loop failed: {e}");
}
}
() = shutdown_signal() => {
tracing::info!("Shutdown signal received");
}
}
tokio::select! {
result = fc_evaluator::eval_loop::run(pool, eval_config) => {
if let Err(e) = result {
tracing::error!("Evaluator loop failed: {e}");
}
}
() = shutdown_signal() => {
tracing::info!("Shutdown signal received");
}
}
tracing::info!("Evaluator shutting down, closing database pool");
db.close().await;
tracing::info!("Evaluator shutting down, closing database pool");
db.close().await;
Ok(())
Ok(())
}
async fn shutdown_signal() {
let ctrl_c = async {
tokio::signal::ctrl_c()
.await
.expect("failed to install Ctrl+C handler");
};
let ctrl_c = async {
tokio::signal::ctrl_c()
.await
.expect("failed to install Ctrl+C handler");
};
#[cfg(unix)]
let terminate = async {
tokio::signal::unix::signal(tokio::signal::unix::SignalKind::terminate())
.expect("failed to install SIGTERM handler")
.recv()
.await;
};
#[cfg(unix)]
let terminate = async {
tokio::signal::unix::signal(tokio::signal::unix::SignalKind::terminate())
.expect("failed to install SIGTERM handler")
.recv()
.await;
};
#[cfg(not(unix))]
let terminate = std::future::pending::<()>();
#[cfg(not(unix))]
let terminate = std::future::pending::<()>();
tokio::select! {
() = ctrl_c => {},
() = terminate => {},
}
tokio::select! {
() = ctrl_c => {},
() = terminate => {},
}
}

View file

@ -1,73 +1,74 @@
use std::collections::HashMap;
use std::path::Path;
use std::time::Duration;
use std::{collections::HashMap, path::Path, time::Duration};
use fc_common::CiError;
use fc_common::config::EvaluatorConfig;
use fc_common::error::Result;
use fc_common::models::JobsetInput;
use fc_common::{
CiError,
config::EvaluatorConfig,
error::Result,
models::JobsetInput,
};
use serde::Deserialize;
#[derive(Debug, Clone, Deserialize)]
pub struct NixJob {
pub name: String,
#[serde(alias = "drvPath")]
pub drv_path: String,
pub system: Option<String>,
pub outputs: Option<HashMap<String, String>>,
#[serde(alias = "inputDrvs")]
pub input_drvs: Option<HashMap<String, serde_json::Value>>,
pub constituents: Option<Vec<String>>,
pub name: String,
#[serde(alias = "drvPath")]
pub drv_path: String,
pub system: Option<String>,
pub outputs: Option<HashMap<String, String>>,
#[serde(alias = "inputDrvs")]
pub input_drvs: Option<HashMap<String, serde_json::Value>>,
pub constituents: Option<Vec<String>>,
}
/// An error reported by nix-eval-jobs for a single job.
#[derive(Debug, Clone, Deserialize)]
struct NixEvalError {
#[serde(alias = "attr")]
name: Option<String>,
error: String,
#[serde(alias = "attr")]
name: Option<String>,
error: String,
}
/// Result of evaluating nix expressions.
pub struct EvalResult {
pub jobs: Vec<NixJob>,
pub error_count: usize,
pub jobs: Vec<NixJob>,
pub error_count: usize,
}
/// Parse nix-eval-jobs output lines into jobs and error counts.
/// Extracted as a testable function from the inline parsing loops.
pub fn parse_eval_output(stdout: &str) -> EvalResult {
let mut jobs = Vec::new();
let mut error_count = 0;
let mut jobs = Vec::new();
let mut error_count = 0;
for line in stdout.lines() {
if line.trim().is_empty() {
continue;
}
if let Ok(parsed) = serde_json::from_str::<serde_json::Value>(line)
&& parsed.get("error").is_some() {
if let Ok(eval_err) = serde_json::from_str::<NixEvalError>(line) {
let name = eval_err.name.as_deref().unwrap_or("<unknown>");
tracing::warn!(
job = name,
"nix-eval-jobs reported error: {}",
eval_err.error
);
error_count += 1;
}
continue;
}
match serde_json::from_str::<NixJob>(line) {
Ok(job) => jobs.push(job),
Err(e) => {
tracing::warn!("Failed to parse nix-eval-jobs line: {e}");
}
}
for line in stdout.lines() {
if line.trim().is_empty() {
continue;
}
EvalResult { jobs, error_count }
if let Ok(parsed) = serde_json::from_str::<serde_json::Value>(line)
&& parsed.get("error").is_some()
{
if let Ok(eval_err) = serde_json::from_str::<NixEvalError>(line) {
let name = eval_err.name.as_deref().unwrap_or("<unknown>");
tracing::warn!(
job = name,
"nix-eval-jobs reported error: {}",
eval_err.error
);
error_count += 1;
}
continue;
}
match serde_json::from_str::<NixJob>(line) {
Ok(job) => jobs.push(job),
Err(e) => {
tracing::warn!("Failed to parse nix-eval-jobs line: {e}");
},
}
}
EvalResult { jobs, error_count }
}
/// Evaluate nix expressions and return discovered jobs.
@ -75,214 +76,229 @@ pub fn parse_eval_output(stdout: &str) -> EvalResult {
/// If flake_mode is false, evaluates a legacy expression file.
#[tracing::instrument(skip(config, inputs), fields(flake_mode, nix_expression))]
pub async fn evaluate(
repo_path: &Path,
nix_expression: &str,
flake_mode: bool,
timeout: Duration,
config: &EvaluatorConfig,
inputs: &[JobsetInput],
repo_path: &Path,
nix_expression: &str,
flake_mode: bool,
timeout: Duration,
config: &EvaluatorConfig,
inputs: &[JobsetInput],
) -> Result<EvalResult> {
if flake_mode {
evaluate_flake(repo_path, nix_expression, timeout, config, inputs).await
} else {
evaluate_legacy(repo_path, nix_expression, timeout, config, inputs).await
}
if flake_mode {
evaluate_flake(repo_path, nix_expression, timeout, config, inputs).await
} else {
evaluate_legacy(repo_path, nix_expression, timeout, config, inputs).await
}
}
#[tracing::instrument(skip(config, inputs))]
async fn evaluate_flake(
repo_path: &Path,
nix_expression: &str,
timeout: Duration,
config: &EvaluatorConfig,
inputs: &[JobsetInput],
repo_path: &Path,
nix_expression: &str,
timeout: Duration,
config: &EvaluatorConfig,
inputs: &[JobsetInput],
) -> Result<EvalResult> {
let flake_ref = format!("{}#{}", repo_path.display(), nix_expression);
let flake_ref = format!("{}#{}", repo_path.display(), nix_expression);
tokio::time::timeout(timeout, async {
let mut cmd = tokio::process::Command::new("nix-eval-jobs");
cmd.arg("--flake").arg(&flake_ref);
tokio::time::timeout(timeout, async {
let mut cmd = tokio::process::Command::new("nix-eval-jobs");
cmd.arg("--flake").arg(&flake_ref);
if config.restrict_eval {
cmd.args(["--option", "restrict-eval", "true"]);
}
if !config.allow_ifd {
cmd.args(["--option", "allow-import-from-derivation", "false"]);
}
for input in inputs {
if input.input_type == "git" {
cmd.args(["--override-input", &input.name, &input.value]);
}
}
if config.restrict_eval {
cmd.args(["--option", "restrict-eval", "true"]);
}
if !config.allow_ifd {
cmd.args(["--option", "allow-import-from-derivation", "false"]);
}
for input in inputs {
if input.input_type == "git" {
cmd.args(["--override-input", &input.name, &input.value]);
}
let output = cmd.output().await;
match output {
Ok(out) if out.status.success() || !out.stdout.is_empty() => {
let stdout = String::from_utf8_lossy(&out.stdout);
let result = parse_eval_output(&stdout);
if result.error_count > 0 {
tracing::warn!(
error_count = result.error_count,
"nix-eval-jobs reported errors for some jobs"
);
}
let output = cmd.output().await;
match output {
Ok(out) if out.status.success() || !out.stdout.is_empty() => {
let stdout = String::from_utf8_lossy(&out.stdout);
let result = parse_eval_output(&stdout);
if result.error_count > 0 {
tracing::warn!(
error_count = result.error_count,
"nix-eval-jobs reported errors for some jobs"
);
}
Ok(result)
}
_ => {
tracing::info!("nix-eval-jobs unavailable, falling back to nix eval");
let jobs = evaluate_with_nix_eval(repo_path, nix_expression).await?;
Ok(EvalResult {
jobs,
error_count: 0,
})
}
}
})
.await
.map_err(|_| CiError::Timeout(format!("Nix evaluation timed out after {timeout:?}")))?
Ok(result)
},
_ => {
tracing::info!("nix-eval-jobs unavailable, falling back to nix eval");
let jobs = evaluate_with_nix_eval(repo_path, nix_expression).await?;
Ok(EvalResult {
jobs,
error_count: 0,
})
},
}
})
.await
.map_err(|_| {
CiError::Timeout(format!("Nix evaluation timed out after {timeout:?}"))
})?
}
/// Legacy (non-flake) evaluation: import the nix expression file and evaluate it.
/// Legacy (non-flake) evaluation: import the nix expression file and evaluate
/// it.
#[tracing::instrument(skip(config, inputs))]
async fn evaluate_legacy(
repo_path: &Path,
nix_expression: &str,
timeout: Duration,
config: &EvaluatorConfig,
inputs: &[JobsetInput],
repo_path: &Path,
nix_expression: &str,
timeout: Duration,
config: &EvaluatorConfig,
inputs: &[JobsetInput],
) -> Result<EvalResult> {
let expr_path = repo_path.join(nix_expression);
let expr_path = repo_path.join(nix_expression);
tokio::time::timeout(timeout, async {
// Try nix-eval-jobs without --flake for legacy expressions
let mut cmd = tokio::process::Command::new("nix-eval-jobs");
cmd.arg(&expr_path);
tokio::time::timeout(timeout, async {
// Try nix-eval-jobs without --flake for legacy expressions
let mut cmd = tokio::process::Command::new("nix-eval-jobs");
cmd.arg(&expr_path);
if config.restrict_eval {
cmd.args(["--option", "restrict-eval", "true"]);
}
if !config.allow_ifd {
cmd.args(["--option", "allow-import-from-derivation", "false"]);
}
for input in inputs {
if input.input_type == "string" || input.input_type == "path" {
cmd.args(["--arg", &input.name, &input.value]);
}
}
if config.restrict_eval {
cmd.args(["--option", "restrict-eval", "true"]);
let output = cmd.output().await;
match output {
Ok(out) if out.status.success() || !out.stdout.is_empty() => {
let stdout = String::from_utf8_lossy(&out.stdout);
Ok(parse_eval_output(&stdout))
},
_ => {
// Fallback: nix eval on the legacy import
tracing::info!(
"nix-eval-jobs unavailable for legacy expr, using nix-instantiate"
);
let output = tokio::process::Command::new("nix-instantiate")
.arg(&expr_path)
.arg("--strict")
.arg("--json")
.output()
.await
.map_err(|e| {
CiError::NixEval(format!("nix-instantiate failed: {e}"))
})?;
if !output.status.success() {
let stderr = String::from_utf8_lossy(&output.stderr);
return Err(CiError::NixEval(format!(
"nix-instantiate failed: {stderr}"
)));
}
if !config.allow_ifd {
cmd.args(["--option", "allow-import-from-derivation", "false"]);
}
for input in inputs {
if input.input_type == "string" || input.input_type == "path" {
cmd.args(["--arg", &input.name, &input.value]);
let stdout = String::from_utf8_lossy(&output.stdout);
// nix-instantiate --json outputs the derivation path(s)
let drv_paths: Vec<String> =
serde_json::from_str(&stdout).unwrap_or_default();
let jobs: Vec<NixJob> = drv_paths
.into_iter()
.enumerate()
.map(|(i, drv_path)| {
NixJob {
name: format!("job-{i}"),
drv_path,
system: None,
outputs: None,
input_drvs: None,
constituents: None,
}
}
})
.collect();
let output = cmd.output().await;
match output {
Ok(out) if out.status.success() || !out.stdout.is_empty() => {
let stdout = String::from_utf8_lossy(&out.stdout);
Ok(parse_eval_output(&stdout))
}
_ => {
// Fallback: nix eval on the legacy import
tracing::info!("nix-eval-jobs unavailable for legacy expr, using nix-instantiate");
let output = tokio::process::Command::new("nix-instantiate")
.arg(&expr_path)
.arg("--strict")
.arg("--json")
.output()
.await
.map_err(|e| CiError::NixEval(format!("nix-instantiate failed: {e}")))?;
if !output.status.success() {
let stderr = String::from_utf8_lossy(&output.stderr);
return Err(CiError::NixEval(format!(
"nix-instantiate failed: {stderr}"
)));
}
let stdout = String::from_utf8_lossy(&output.stdout);
// nix-instantiate --json outputs the derivation path(s)
let drv_paths: Vec<String> = serde_json::from_str(&stdout).unwrap_or_default();
let jobs: Vec<NixJob> = drv_paths
.into_iter()
.enumerate()
.map(|(i, drv_path)| NixJob {
name: format!("job-{i}"),
drv_path,
system: None,
outputs: None,
input_drvs: None,
constituents: None,
})
.collect();
Ok(EvalResult {
jobs,
error_count: 0,
})
}
}
})
.await
.map_err(|_| CiError::Timeout(format!("Nix evaluation timed out after {timeout:?}")))?
Ok(EvalResult {
jobs,
error_count: 0,
})
},
}
})
.await
.map_err(|_| {
CiError::Timeout(format!("Nix evaluation timed out after {timeout:?}"))
})?
}
async fn evaluate_with_nix_eval(repo_path: &Path, nix_expression: &str) -> Result<Vec<NixJob>> {
let flake_ref = format!("{}#{}", repo_path.display(), nix_expression);
async fn evaluate_with_nix_eval(
repo_path: &Path,
nix_expression: &str,
) -> Result<Vec<NixJob>> {
let flake_ref = format!("{}#{}", repo_path.display(), nix_expression);
let output = tokio::process::Command::new("nix")
.args(["eval", "--json", &flake_ref])
let output = tokio::process::Command::new("nix")
.args(["eval", "--json", &flake_ref])
.output()
.await
.map_err(|e| CiError::NixEval(format!("Failed to run nix eval: {e}")))?;
if !output.status.success() {
let stderr = String::from_utf8_lossy(&output.stderr);
return Err(CiError::NixEval(format!("nix eval failed: {stderr}")));
}
// Parse the JSON output - expecting an attrset of name -> derivation
let stdout = String::from_utf8_lossy(&output.stdout);
let attrs: serde_json::Value =
serde_json::from_str(&stdout).map_err(|e| {
CiError::NixEval(format!("Failed to parse nix eval output: {e}"))
})?;
let mut jobs = Vec::new();
if let serde_json::Value::Object(map) = attrs {
for (name, _value) in map {
// Get derivation path via nix derivation show
let drv_ref =
format!("{}#{}.{}", repo_path.display(), nix_expression, name);
let drv_output = tokio::process::Command::new("nix")
.args(["derivation", "show", &drv_ref])
.output()
.await
.map_err(|e| CiError::NixEval(format!("Failed to run nix eval: {e}")))?;
.map_err(|e| {
CiError::NixEval(format!("Failed to get derivation for {name}: {e}"))
})?;
if !output.status.success() {
let stderr = String::from_utf8_lossy(&output.stderr);
return Err(CiError::NixEval(format!("nix eval failed: {stderr}")));
}
// Parse the JSON output - expecting an attrset of name -> derivation
let stdout = String::from_utf8_lossy(&output.stdout);
let attrs: serde_json::Value = serde_json::from_str(&stdout)
.map_err(|e| CiError::NixEval(format!("Failed to parse nix eval output: {e}")))?;
let mut jobs = Vec::new();
if let serde_json::Value::Object(map) = attrs {
for (name, _value) in map {
// Get derivation path via nix derivation show
let drv_ref = format!("{}#{}.{}", repo_path.display(), nix_expression, name);
let drv_output = tokio::process::Command::new("nix")
.args(["derivation", "show", &drv_ref])
.output()
.await
.map_err(|e| {
CiError::NixEval(format!("Failed to get derivation for {name}: {e}"))
})?;
if drv_output.status.success() {
let drv_stdout = String::from_utf8_lossy(&drv_output.stdout);
if let Ok(drv_json) = serde_json::from_str::<serde_json::Value>(&drv_stdout)
&& let Some((drv_path, drv_val)) =
drv_json.as_object().and_then(|o| o.iter().next())
{
let system = drv_val
.get("system")
.and_then(|v| v.as_str())
.map(|s| s.to_string());
jobs.push(NixJob {
name: name.clone(),
drv_path: drv_path.clone(),
system,
outputs: None,
input_drvs: None,
constituents: None,
});
}
}
if drv_output.status.success() {
let drv_stdout = String::from_utf8_lossy(&drv_output.stdout);
if let Ok(drv_json) =
serde_json::from_str::<serde_json::Value>(&drv_stdout)
&& let Some((drv_path, drv_val)) =
drv_json.as_object().and_then(|o| o.iter().next())
{
let system = drv_val
.get("system")
.and_then(|v| v.as_str())
.map(|s| s.to_string());
jobs.push(NixJob {
name: name.clone(),
drv_path: drv_path.clone(),
system,
outputs: None,
input_drvs: None,
constituents: None,
});
}
}
}
}
Ok(jobs)
Ok(jobs)
}

View file

@ -3,95 +3,95 @@
#[test]
fn test_parse_valid_job() {
let line = r#"{"name":"hello","drvPath":"/nix/store/abc123-hello.drv","system":"x86_64-linux","outputs":{"out":"/nix/store/abc123-hello"}}"#;
let result = fc_evaluator::nix::parse_eval_output(line);
assert_eq!(result.jobs.len(), 1);
assert_eq!(result.error_count, 0);
assert_eq!(result.jobs[0].name, "hello");
assert_eq!(result.jobs[0].drv_path, "/nix/store/abc123-hello.drv");
assert_eq!(result.jobs[0].system.as_deref(), Some("x86_64-linux"));
let line = r#"{"name":"hello","drvPath":"/nix/store/abc123-hello.drv","system":"x86_64-linux","outputs":{"out":"/nix/store/abc123-hello"}}"#;
let result = fc_evaluator::nix::parse_eval_output(line);
assert_eq!(result.jobs.len(), 1);
assert_eq!(result.error_count, 0);
assert_eq!(result.jobs[0].name, "hello");
assert_eq!(result.jobs[0].drv_path, "/nix/store/abc123-hello.drv");
assert_eq!(result.jobs[0].system.as_deref(), Some("x86_64-linux"));
}
#[test]
fn test_parse_multiple_jobs() {
let output = r#"{"name":"hello","drvPath":"/nix/store/abc-hello.drv","system":"x86_64-linux"}
let output = r#"{"name":"hello","drvPath":"/nix/store/abc-hello.drv","system":"x86_64-linux"}
{"name":"world","drvPath":"/nix/store/def-world.drv","system":"aarch64-linux"}"#;
let result = fc_evaluator::nix::parse_eval_output(output);
assert_eq!(result.jobs.len(), 2);
assert_eq!(result.error_count, 0);
assert_eq!(result.jobs[0].name, "hello");
assert_eq!(result.jobs[1].name, "world");
let result = fc_evaluator::nix::parse_eval_output(output);
assert_eq!(result.jobs.len(), 2);
assert_eq!(result.error_count, 0);
assert_eq!(result.jobs[0].name, "hello");
assert_eq!(result.jobs[1].name, "world");
}
#[test]
fn test_parse_error_lines() {
let output = r#"{"name":"hello","drvPath":"/nix/store/abc-hello.drv"}
let output = r#"{"name":"hello","drvPath":"/nix/store/abc-hello.drv"}
{"attr":"broken","error":"attribute 'broken' missing"}
{"name":"world","drvPath":"/nix/store/def-world.drv"}"#;
let result = fc_evaluator::nix::parse_eval_output(output);
assert_eq!(result.jobs.len(), 2);
assert_eq!(result.error_count, 1);
let result = fc_evaluator::nix::parse_eval_output(output);
assert_eq!(result.jobs.len(), 2);
assert_eq!(result.error_count, 1);
}
#[test]
fn test_parse_empty_output() {
let result = fc_evaluator::nix::parse_eval_output("");
assert_eq!(result.jobs.len(), 0);
assert_eq!(result.error_count, 0);
let result = fc_evaluator::nix::parse_eval_output("");
assert_eq!(result.jobs.len(), 0);
assert_eq!(result.error_count, 0);
}
#[test]
fn test_parse_blank_lines_ignored() {
let output = "\n \n\n";
let result = fc_evaluator::nix::parse_eval_output(output);
assert_eq!(result.jobs.len(), 0);
assert_eq!(result.error_count, 0);
let output = "\n \n\n";
let result = fc_evaluator::nix::parse_eval_output(output);
assert_eq!(result.jobs.len(), 0);
assert_eq!(result.error_count, 0);
}
#[test]
fn test_parse_malformed_json_skipped() {
let output =
"not json at all\n{invalid json}\n{\"name\":\"ok\",\"drvPath\":\"/nix/store/x-ok.drv\"}";
let result = fc_evaluator::nix::parse_eval_output(output);
assert_eq!(result.jobs.len(), 1);
assert_eq!(result.jobs[0].name, "ok");
let output = "not json at all\n{invalid \
json}\n{\"name\":\"ok\",\"drvPath\":\"/nix/store/x-ok.drv\"}";
let result = fc_evaluator::nix::parse_eval_output(output);
assert_eq!(result.jobs.len(), 1);
assert_eq!(result.jobs[0].name, "ok");
}
#[test]
fn test_parse_job_with_input_drvs() {
let line = r#"{"name":"hello","drvPath":"/nix/store/abc-hello.drv","inputDrvs":{"/nix/store/dep1.drv":["out"],"/nix/store/dep2.drv":["out"]}}"#;
let result = fc_evaluator::nix::parse_eval_output(line);
assert_eq!(result.jobs.len(), 1);
let input_drvs = result.jobs[0].input_drvs.as_ref().unwrap();
assert_eq!(input_drvs.len(), 2);
let line = r#"{"name":"hello","drvPath":"/nix/store/abc-hello.drv","inputDrvs":{"/nix/store/dep1.drv":["out"],"/nix/store/dep2.drv":["out"]}}"#;
let result = fc_evaluator::nix::parse_eval_output(line);
assert_eq!(result.jobs.len(), 1);
let input_drvs = result.jobs[0].input_drvs.as_ref().unwrap();
assert_eq!(input_drvs.len(), 2);
}
#[test]
fn test_parse_job_with_constituents() {
let line = r#"{"name":"aggregate","drvPath":"/nix/store/abc-aggregate.drv","constituents":["hello","world"]}"#;
let result = fc_evaluator::nix::parse_eval_output(line);
assert_eq!(result.jobs.len(), 1);
let constituents = result.jobs[0].constituents.as_ref().unwrap();
assert_eq!(constituents.len(), 2);
assert_eq!(constituents[0], "hello");
assert_eq!(constituents[1], "world");
let line = r#"{"name":"aggregate","drvPath":"/nix/store/abc-aggregate.drv","constituents":["hello","world"]}"#;
let result = fc_evaluator::nix::parse_eval_output(line);
assert_eq!(result.jobs.len(), 1);
let constituents = result.jobs[0].constituents.as_ref().unwrap();
assert_eq!(constituents.len(), 2);
assert_eq!(constituents[0], "hello");
assert_eq!(constituents[1], "world");
}
#[test]
fn test_parse_error_without_name() {
let line = r#"{"error":"some eval error"}"#;
let result = fc_evaluator::nix::parse_eval_output(line);
assert_eq!(result.jobs.len(), 0);
assert_eq!(result.error_count, 1);
let line = r#"{"error":"some eval error"}"#;
let result = fc_evaluator::nix::parse_eval_output(line);
assert_eq!(result.jobs.len(), 0);
assert_eq!(result.error_count, 1);
}
// --- Inputs hash computation ---
#[test]
fn test_inputs_hash_deterministic() {
// The compute_inputs_hash function is in eval_loop which is not easily testable
// as a standalone function since it's not public. We test the nix parsing above
// and trust the hash logic is correct since it uses sha2.
// The compute_inputs_hash function is in eval_loop which is not easily
// testable as a standalone function since it's not public. We test the nix
// parsing above and trust the hash logic is correct since it uses sha2.
}

View file

@ -6,85 +6,100 @@ use tempfile::TempDir;
#[test]
fn test_clone_or_fetch_clones_new_repo() {
let upstream_dir = TempDir::new().unwrap();
let work_dir = TempDir::new().unwrap();
let upstream_dir = TempDir::new().unwrap();
let work_dir = TempDir::new().unwrap();
// Create a non-bare repo to clone from (bare repos have no HEAD by default)
let upstream = Repository::init(upstream_dir.path()).unwrap();
// Create initial commit
{
let sig = Signature::now("Test", "test@example.com").unwrap();
let tree_id = upstream.index().unwrap().write_tree().unwrap();
let tree = upstream.find_tree(tree_id).unwrap();
upstream
.commit(Some("HEAD"), &sig, &sig, "initial", &tree, &[])
.unwrap();
}
// Create a non-bare repo to clone from (bare repos have no HEAD by default)
let upstream = Repository::init(upstream_dir.path()).unwrap();
// Create initial commit
{
let sig = Signature::now("Test", "test@example.com").unwrap();
let tree_id = upstream.index().unwrap().write_tree().unwrap();
let tree = upstream.find_tree(tree_id).unwrap();
upstream
.commit(Some("HEAD"), &sig, &sig, "initial", &tree, &[])
.unwrap();
}
let url = format!("file://{}", upstream_dir.path().display());
let result = fc_evaluator::git::clone_or_fetch(&url, work_dir.path(), "test-project", None);
let url = format!("file://{}", upstream_dir.path().display());
let result = fc_evaluator::git::clone_or_fetch(
&url,
work_dir.path(),
"test-project",
None,
);
assert!(
result.is_ok(),
"clone_or_fetch should succeed: {:?}",
result.err()
);
let (repo_path, hash): (std::path::PathBuf, String) = result.unwrap();
assert!(repo_path.exists());
assert!(!hash.is_empty());
assert_eq!(hash.len(), 40); // full SHA-1
assert!(
result.is_ok(),
"clone_or_fetch should succeed: {:?}",
result.err()
);
let (repo_path, hash): (std::path::PathBuf, String) = result.unwrap();
assert!(repo_path.exists());
assert!(!hash.is_empty());
assert_eq!(hash.len(), 40); // full SHA-1
}
#[test]
fn test_clone_or_fetch_fetches_existing() {
let upstream_dir = TempDir::new().unwrap();
let work_dir = TempDir::new().unwrap();
let upstream_dir = TempDir::new().unwrap();
let work_dir = TempDir::new().unwrap();
let upstream = Repository::init(upstream_dir.path()).unwrap();
{
let sig = Signature::now("Test", "test@example.com").unwrap();
let tree_id = upstream.index().unwrap().write_tree().unwrap();
let tree = upstream.find_tree(tree_id).unwrap();
upstream
.commit(Some("HEAD"), &sig, &sig, "initial", &tree, &[])
.unwrap();
}
let upstream = Repository::init(upstream_dir.path()).unwrap();
{
let sig = Signature::now("Test", "test@example.com").unwrap();
let tree_id = upstream.index().unwrap().write_tree().unwrap();
let tree = upstream.find_tree(tree_id).unwrap();
upstream
.commit(Some("HEAD"), &sig, &sig, "initial", &tree, &[])
.unwrap();
}
let url = format!("file://{}", upstream_dir.path().display());
let url = format!("file://{}", upstream_dir.path().display());
// First clone
let (_, hash1): (std::path::PathBuf, String) =
fc_evaluator::git::clone_or_fetch(&url, work_dir.path(), "test-project", None)
.expect("first clone failed");
// First clone
let (_, hash1): (std::path::PathBuf, String) =
fc_evaluator::git::clone_or_fetch(
&url,
work_dir.path(),
"test-project",
None,
)
.expect("first clone failed");
// Make another commit upstream
{
let sig = Signature::now("Test", "test@example.com").unwrap();
let tree_id = upstream.index().unwrap().write_tree().unwrap();
let tree = upstream.find_tree(tree_id).unwrap();
let head = upstream.head().unwrap().peel_to_commit().unwrap();
upstream
.commit(Some("HEAD"), &sig, &sig, "second", &tree, &[&head])
.unwrap();
}
// Make another commit upstream
{
let sig = Signature::now("Test", "test@example.com").unwrap();
let tree_id = upstream.index().unwrap().write_tree().unwrap();
let tree = upstream.find_tree(tree_id).unwrap();
let head = upstream.head().unwrap().peel_to_commit().unwrap();
upstream
.commit(Some("HEAD"), &sig, &sig, "second", &tree, &[&head])
.unwrap();
}
// Second fetch
let (_, hash2): (std::path::PathBuf, String) =
fc_evaluator::git::clone_or_fetch(&url, work_dir.path(), "test-project", None)
.expect("second fetch failed");
// Second fetch
let (_, hash2): (std::path::PathBuf, String) =
fc_evaluator::git::clone_or_fetch(
&url,
work_dir.path(),
"test-project",
None,
)
.expect("second fetch failed");
assert!(!hash1.is_empty());
assert!(!hash2.is_empty());
assert!(!hash1.is_empty());
assert!(!hash2.is_empty());
}
#[test]
fn test_clone_invalid_url_returns_error() {
let work_dir = TempDir::new().unwrap();
let result = fc_evaluator::git::clone_or_fetch(
"file:///nonexistent/repo",
work_dir.path(),
"bad-proj",
None,
);
assert!(result.is_err());
let work_dir = TempDir::new().unwrap();
let result = fc_evaluator::git::clone_or_fetch(
"file:///nonexistent/repo",
work_dir.path(),
"bad-proj",
None,
);
assert!(result.is_err());
}

View file

@ -1,9 +1,9 @@
[package]
name = "fc-migrate-cli"
version.workspace = true
edition.workspace = true
authors.workspace = true
license.workspace = true
name = "fc-migrate-cli"
version.workspace = true
edition.workspace = true
authors.workspace = true
license.workspace = true
repository.workspace = true
[[bin]]
@ -11,8 +11,8 @@ name = "fc-migrate"
path = "src/main.rs"
[dependencies]
fc-common = { path = "../common" }
clap.workspace = true
anyhow.workspace = true
anyhow.workspace = true
clap.workspace = true
fc-common = { path = "../common" }
tokio.workspace = true
tracing-subscriber.workspace = true
tokio.workspace = true

View file

@ -4,5 +4,5 @@ use fc_common::migrate_cli::run;
#[tokio::main]
async fn main() -> anyhow::Result<()> {
run().await
run().await
}

View file

@ -1,25 +1,25 @@
[package]
name = "fc-queue-runner"
version.workspace = true
edition.workspace = true
authors.workspace = true
license.workspace = true
name = "fc-queue-runner"
version.workspace = true
edition.workspace = true
authors.workspace = true
license.workspace = true
repository.workspace = true
[dependencies]
tokio.workspace = true
sqlx.workspace = true
serde.workspace = true
serde_json.workspace = true
uuid.workspace = true
chrono.workspace = true
tracing.workspace = true
anyhow.workspace = true
chrono.workspace = true
clap.workspace = true
config.workspace = true
serde.workspace = true
serde_json.workspace = true
sqlx.workspace = true
thiserror.workspace = true
tokio.workspace = true
tokio-util.workspace = true
tracing.workspace = true
tracing-subscriber.workspace = true
anyhow.workspace = true
thiserror.workspace = true
clap.workspace = true
config.workspace = true
tokio-util.workspace = true
uuid.workspace = true
# Our crates
fc-common.workspace = true

View file

@ -1,301 +1,306 @@
use std::path::Path;
use std::time::Duration;
use std::{path::Path, time::Duration};
use fc_common::CiError;
use fc_common::error::Result;
use fc_common::{CiError, error::Result};
use tokio::io::{AsyncBufReadExt, BufReader};
const MAX_LOG_SIZE: usize = 100 * 1024 * 1024; // 100MB
/// Run a build on a remote machine via `nix build --store ssh://...`.
#[tracing::instrument(skip(work_dir, live_log_path), fields(drv_path, store_uri))]
#[tracing::instrument(
skip(work_dir, live_log_path),
fields(drv_path, store_uri)
)]
pub async fn run_nix_build_remote(
drv_path: &str,
work_dir: &Path,
timeout: Duration,
store_uri: &str,
ssh_key_file: Option<&str>,
live_log_path: Option<&Path>,
drv_path: &str,
work_dir: &Path,
timeout: Duration,
store_uri: &str,
ssh_key_file: Option<&str>,
live_log_path: Option<&Path>,
) -> Result<BuildResult> {
let result = tokio::time::timeout(timeout, async {
let mut cmd = tokio::process::Command::new("nix");
cmd.args([
"build",
"--no-link",
"--print-out-paths",
"--log-format",
"internal-json",
"--option",
"sandbox",
"true",
"--max-build-log-size",
"104857600",
"--store",
store_uri,
drv_path,
])
.current_dir(work_dir)
.kill_on_drop(true)
.stdout(std::process::Stdio::piped())
.stderr(std::process::Stdio::piped());
let result = tokio::time::timeout(timeout, async {
let mut cmd = tokio::process::Command::new("nix");
cmd
.args([
"build",
"--no-link",
"--print-out-paths",
"--log-format",
"internal-json",
"--option",
"sandbox",
"true",
"--max-build-log-size",
"104857600",
"--store",
store_uri,
drv_path,
])
.current_dir(work_dir)
.kill_on_drop(true)
.stdout(std::process::Stdio::piped())
.stderr(std::process::Stdio::piped());
if let Some(key_file) = ssh_key_file {
cmd.env(
"NIX_SSHOPTS",
format!("-i {key_file} -o StrictHostKeyChecking=accept-new"),
);
}
let mut child = cmd
.spawn()
.map_err(|e| CiError::Build(format!("Failed to run remote nix build: {e}")))?;
let stdout_handle = child.stdout.take();
let stderr_handle = child.stderr.take();
let stdout_task = tokio::spawn(async move {
let mut buf = String::new();
if let Some(stdout) = stdout_handle {
let mut reader = BufReader::new(stdout);
let mut line = String::new();
while reader.read_line(&mut line).await.unwrap_or(0) > 0 {
buf.push_str(&line);
line.clear();
}
}
buf
});
let live_log_path_owned = live_log_path.map(|p| p.to_path_buf());
let stderr_task = tokio::spawn(async move {
let mut buf = String::new();
let steps: Vec<SubStep> = Vec::new();
let mut log_file = if let Some(ref path) = live_log_path_owned {
tokio::fs::File::create(path).await.ok()
} else {
None
};
if let Some(stderr) = stderr_handle {
let mut reader = BufReader::new(stderr);
let mut line = String::new();
while reader.read_line(&mut line).await.unwrap_or(0) > 0 {
if let Some(ref mut f) = log_file {
use tokio::io::AsyncWriteExt;
let _ = f.write_all(line.as_bytes()).await;
let _ = f.flush().await;
}
if buf.len() < MAX_LOG_SIZE {
buf.push_str(&line);
}
line.clear();
}
}
(buf, steps)
});
let stdout_buf = stdout_task.await.unwrap_or_default();
let (stderr_buf, sub_steps) = stderr_task.await.unwrap_or_default();
let status = child
.wait()
.await
.map_err(|e| CiError::Build(format!("Failed to wait for remote nix build: {e}")))?;
let output_paths: Vec<String> = stdout_buf
.lines()
.map(|s| s.trim().to_string())
.filter(|s| !s.is_empty())
.collect();
Ok::<_, CiError>(BuildResult {
success: status.success(),
stdout: stdout_buf,
stderr: stderr_buf,
output_paths,
sub_steps,
})
})
.await;
match result {
Ok(inner) => inner,
Err(_) => Err(CiError::Timeout(format!(
"Remote build timed out after {timeout:?}"
))),
if let Some(key_file) = ssh_key_file {
cmd.env(
"NIX_SSHOPTS",
format!("-i {key_file} -o StrictHostKeyChecking=accept-new"),
);
}
let mut child = cmd.spawn().map_err(|e| {
CiError::Build(format!("Failed to run remote nix build: {e}"))
})?;
let stdout_handle = child.stdout.take();
let stderr_handle = child.stderr.take();
let stdout_task = tokio::spawn(async move {
let mut buf = String::new();
if let Some(stdout) = stdout_handle {
let mut reader = BufReader::new(stdout);
let mut line = String::new();
while reader.read_line(&mut line).await.unwrap_or(0) > 0 {
buf.push_str(&line);
line.clear();
}
}
buf
});
let live_log_path_owned = live_log_path.map(|p| p.to_path_buf());
let stderr_task = tokio::spawn(async move {
let mut buf = String::new();
let steps: Vec<SubStep> = Vec::new();
let mut log_file = if let Some(ref path) = live_log_path_owned {
tokio::fs::File::create(path).await.ok()
} else {
None
};
if let Some(stderr) = stderr_handle {
let mut reader = BufReader::new(stderr);
let mut line = String::new();
while reader.read_line(&mut line).await.unwrap_or(0) > 0 {
if let Some(ref mut f) = log_file {
use tokio::io::AsyncWriteExt;
let _ = f.write_all(line.as_bytes()).await;
let _ = f.flush().await;
}
if buf.len() < MAX_LOG_SIZE {
buf.push_str(&line);
}
line.clear();
}
}
(buf, steps)
});
let stdout_buf = stdout_task.await.unwrap_or_default();
let (stderr_buf, sub_steps) = stderr_task.await.unwrap_or_default();
let status = child.wait().await.map_err(|e| {
CiError::Build(format!("Failed to wait for remote nix build: {e}"))
})?;
let output_paths: Vec<String> = stdout_buf
.lines()
.map(|s| s.trim().to_string())
.filter(|s| !s.is_empty())
.collect();
Ok::<_, CiError>(BuildResult {
success: status.success(),
stdout: stdout_buf,
stderr: stderr_buf,
output_paths,
sub_steps,
})
})
.await;
match result {
Ok(inner) => inner,
Err(_) => {
Err(CiError::Timeout(format!(
"Remote build timed out after {timeout:?}"
)))
},
}
}
pub struct BuildResult {
pub success: bool,
pub stdout: String,
pub stderr: String,
pub output_paths: Vec<String>,
pub sub_steps: Vec<SubStep>,
pub success: bool,
pub stdout: String,
pub stderr: String,
pub output_paths: Vec<String>,
pub sub_steps: Vec<SubStep>,
}
/// A sub-step parsed from nix's internal JSON log format.
pub struct SubStep {
pub drv_path: String,
pub completed_at: Option<chrono::DateTime<chrono::Utc>>,
pub success: bool,
pub drv_path: String,
pub completed_at: Option<chrono::DateTime<chrono::Utc>>,
pub success: bool,
}
/// Parse a single nix internal JSON log line (`@nix {...}`).
/// Returns `Some(action, drv_path)` if the line contains a derivation action.
pub fn parse_nix_log_line(line: &str) -> Option<(&'static str, String)> {
let json_str = line.strip_prefix("@nix ")?.trim();
let parsed: serde_json::Value = serde_json::from_str(json_str).ok()?;
let action = parsed.get("action")?.as_str()?;
let drv = parsed.get("derivation")?.as_str()?.to_string();
let json_str = line.strip_prefix("@nix ")?.trim();
let parsed: serde_json::Value = serde_json::from_str(json_str).ok()?;
let action = parsed.get("action")?.as_str()?;
let drv = parsed.get("derivation")?.as_str()?.to_string();
match action {
"start" => Some(("start", drv)),
"stop" => Some(("stop", drv)),
_ => None,
}
match action {
"start" => Some(("start", drv)),
"stop" => Some(("stop", drv)),
_ => None,
}
}
/// Run `nix build` for a derivation path.
/// If `live_log_path` is provided, build output is streamed to that file incrementally.
/// If `live_log_path` is provided, build output is streamed to that file
/// incrementally.
#[tracing::instrument(skip(work_dir, live_log_path), fields(drv_path))]
pub async fn run_nix_build(
drv_path: &str,
work_dir: &Path,
timeout: Duration,
live_log_path: Option<&Path>,
drv_path: &str,
work_dir: &Path,
timeout: Duration,
live_log_path: Option<&Path>,
) -> Result<BuildResult> {
let result = tokio::time::timeout(timeout, async {
let mut child = tokio::process::Command::new("nix")
.args([
"build",
"--no-link",
"--print-out-paths",
"--log-format",
"internal-json",
"--option",
"sandbox",
"true",
"--max-build-log-size",
"104857600",
drv_path,
])
.current_dir(work_dir)
.kill_on_drop(true)
.stdout(std::process::Stdio::piped())
.stderr(std::process::Stdio::piped())
.spawn()
.map_err(|e| CiError::Build(format!("Failed to run nix build: {e}")))?;
let result = tokio::time::timeout(timeout, async {
let mut child = tokio::process::Command::new("nix")
.args([
"build",
"--no-link",
"--print-out-paths",
"--log-format",
"internal-json",
"--option",
"sandbox",
"true",
"--max-build-log-size",
"104857600",
drv_path,
])
.current_dir(work_dir)
.kill_on_drop(true)
.stdout(std::process::Stdio::piped())
.stderr(std::process::Stdio::piped())
.spawn()
.map_err(|e| CiError::Build(format!("Failed to run nix build: {e}")))?;
let stdout_handle = child.stdout.take();
let stderr_handle = child.stderr.take();
let stdout_handle = child.stdout.take();
let stderr_handle = child.stderr.take();
// Read stdout (output paths)
let stdout_task = tokio::spawn(async move {
let mut buf = String::new();
if let Some(stdout) = stdout_handle {
let mut reader = BufReader::new(stdout);
let mut line = String::new();
while reader.read_line(&mut line).await.unwrap_or(0) > 0 {
buf.push_str(&line);
line.clear();
// Read stdout (output paths)
let stdout_task = tokio::spawn(async move {
let mut buf = String::new();
if let Some(stdout) = stdout_handle {
let mut reader = BufReader::new(stdout);
let mut line = String::new();
while reader.read_line(&mut line).await.unwrap_or(0) > 0 {
buf.push_str(&line);
line.clear();
}
}
buf
});
// Read stderr (logs + internal JSON)
let live_log_path_owned = live_log_path.map(|p| p.to_path_buf());
let stderr_task = tokio::spawn(async move {
let mut buf = String::new();
let mut steps: Vec<SubStep> = Vec::new();
let mut log_file = if let Some(ref path) = live_log_path_owned {
tokio::fs::File::create(path).await.ok()
} else {
None
};
if let Some(stderr) = stderr_handle {
let mut reader = BufReader::new(stderr);
let mut line = String::new();
while reader.read_line(&mut line).await.unwrap_or(0) > 0 {
// Write to live log file if available
if let Some(ref mut f) = log_file {
use tokio::io::AsyncWriteExt;
let _ = f.write_all(line.as_bytes()).await;
let _ = f.flush().await;
}
// Parse nix internal JSON log lines
if line.starts_with("@nix ")
&& let Some(json_str) = line.strip_prefix("@nix ")
&& let Ok(parsed) =
serde_json::from_str::<serde_json::Value>(json_str.trim())
&& let Some(action) = parsed.get("action").and_then(|a| a.as_str())
{
match action {
"start" => {
if let Some(drv) =
parsed.get("derivation").and_then(|d| d.as_str())
{
steps.push(SubStep {
drv_path: drv.to_string(),
completed_at: None,
success: false,
});
}
}
buf
});
// Read stderr (logs + internal JSON)
let live_log_path_owned = live_log_path.map(|p| p.to_path_buf());
let stderr_task = tokio::spawn(async move {
let mut buf = String::new();
let mut steps: Vec<SubStep> = Vec::new();
let mut log_file = if let Some(ref path) = live_log_path_owned {
tokio::fs::File::create(path).await.ok()
} else {
None
};
if let Some(stderr) = stderr_handle {
let mut reader = BufReader::new(stderr);
let mut line = String::new();
while reader.read_line(&mut line).await.unwrap_or(0) > 0 {
// Write to live log file if available
if let Some(ref mut f) = log_file {
use tokio::io::AsyncWriteExt;
let _ = f.write_all(line.as_bytes()).await;
let _ = f.flush().await;
}
// Parse nix internal JSON log lines
if line.starts_with("@nix ")
&& let Some(json_str) = line.strip_prefix("@nix ")
&& let Ok(parsed) =
serde_json::from_str::<serde_json::Value>(json_str.trim())
&& let Some(action) = parsed.get("action").and_then(|a| a.as_str())
{
match action {
"start" => {
if let Some(drv) =
parsed.get("derivation").and_then(|d| d.as_str())
{
steps.push(SubStep {
drv_path: drv.to_string(),
completed_at: None,
success: false,
});
}
}
"stop" => {
if let Some(drv) =
parsed.get("derivation").and_then(|d| d.as_str())
&& let Some(step) =
steps.iter_mut().rfind(|s| s.drv_path == drv)
{
step.completed_at = Some(chrono::Utc::now());
step.success = true;
}
}
_ => {}
}
}
if buf.len() < MAX_LOG_SIZE {
buf.push_str(&line);
}
line.clear();
},
"stop" => {
if let Some(drv) =
parsed.get("derivation").and_then(|d| d.as_str())
&& let Some(step) =
steps.iter_mut().rfind(|s| s.drv_path == drv)
{
step.completed_at = Some(chrono::Utc::now());
step.success = true;
}
},
_ => {},
}
(buf, steps)
});
}
let stdout_buf = stdout_task.await.unwrap_or_default();
let (stderr_buf, sub_steps) = stderr_task.await.unwrap_or_default();
if buf.len() < MAX_LOG_SIZE {
buf.push_str(&line);
}
line.clear();
}
}
(buf, steps)
});
let status = child
.wait()
.await
.map_err(|e| CiError::Build(format!("Failed to wait for nix build: {e}")))?;
let stdout_buf = stdout_task.await.unwrap_or_default();
let (stderr_buf, sub_steps) = stderr_task.await.unwrap_or_default();
let output_paths: Vec<String> = stdout_buf
.lines()
.map(|s| s.trim().to_string())
.filter(|s| !s.is_empty())
.collect();
let status = child.wait().await.map_err(|e| {
CiError::Build(format!("Failed to wait for nix build: {e}"))
})?;
Ok::<_, CiError>(BuildResult {
success: status.success(),
stdout: stdout_buf,
stderr: stderr_buf,
output_paths,
sub_steps,
})
let output_paths: Vec<String> = stdout_buf
.lines()
.map(|s| s.trim().to_string())
.filter(|s| !s.is_empty())
.collect();
Ok::<_, CiError>(BuildResult {
success: status.success(),
stdout: stdout_buf,
stderr: stderr_buf,
output_paths,
sub_steps,
})
.await;
})
.await;
match result {
Ok(inner) => inner,
Err(_) => Err(CiError::Timeout(format!(
"Build timed out after {timeout:?}"
))),
}
match result {
Ok(inner) => inner,
Err(_) => {
Err(CiError::Timeout(format!(
"Build timed out after {timeout:?}"
)))
},
}
}

View file

@ -1,162 +1,161 @@
use std::time::Duration;
use std::{sync::Arc, time::Duration};
use clap::Parser;
use fc_common::config::{Config, GcConfig};
use fc_common::database::Database;
use fc_common::gc_roots;
use std::sync::Arc;
use fc_common::{
config::{Config, GcConfig},
database::Database,
gc_roots,
};
use fc_queue_runner::worker::WorkerPool;
#[derive(Parser)]
#[command(name = "fc-queue-runner")]
#[command(about = "CI Queue Runner - Build dispatch and execution")]
struct Cli {
#[arg(short, long)]
workers: Option<usize>,
#[arg(short, long)]
workers: Option<usize>,
}
#[tokio::main]
async fn main() -> anyhow::Result<()> {
let cli = Cli::parse();
let cli = Cli::parse();
let config = Config::load()?;
fc_common::init_tracing(&config.tracing);
let config = Config::load()?;
fc_common::init_tracing(&config.tracing);
tracing::info!("Starting CI Queue Runner");
let log_config = config.logs;
let gc_config = config.gc;
let gc_config_for_loop = gc_config.clone();
let notifications_config = config.notifications;
let signing_config = config.signing;
let cache_upload_config = config.cache_upload;
let qr_config = config.queue_runner;
tracing::info!("Starting CI Queue Runner");
let log_config = config.logs;
let gc_config = config.gc;
let gc_config_for_loop = gc_config.clone();
let notifications_config = config.notifications;
let signing_config = config.signing;
let cache_upload_config = config.cache_upload;
let qr_config = config.queue_runner;
let workers = cli.workers.unwrap_or(qr_config.workers);
let poll_interval = Duration::from_secs(qr_config.poll_interval);
let build_timeout = Duration::from_secs(qr_config.build_timeout);
let work_dir = qr_config.work_dir;
let workers = cli.workers.unwrap_or(qr_config.workers);
let poll_interval = Duration::from_secs(qr_config.poll_interval);
let build_timeout = Duration::from_secs(qr_config.build_timeout);
let work_dir = qr_config.work_dir;
// Ensure the work directory exists
tokio::fs::create_dir_all(&work_dir).await?;
// Ensure the work directory exists
tokio::fs::create_dir_all(&work_dir).await?;
// Clean up orphaned active logs from previous crashes
cleanup_stale_logs(&log_config.log_dir).await;
// Clean up orphaned active logs from previous crashes
cleanup_stale_logs(&log_config.log_dir).await;
let db = Database::new(config.database).await?;
let db = Database::new(config.database).await?;
let worker_pool = Arc::new(WorkerPool::new(
db.pool().clone(),
workers,
work_dir.clone(),
build_timeout,
log_config,
gc_config,
notifications_config,
signing_config,
cache_upload_config,
));
let worker_pool = Arc::new(WorkerPool::new(
db.pool().clone(),
workers,
work_dir.clone(),
build_timeout,
log_config,
gc_config,
notifications_config,
signing_config,
cache_upload_config,
));
tracing::info!(
workers = workers,
poll_interval = ?poll_interval,
build_timeout = ?build_timeout,
work_dir = %work_dir.display(),
"Queue runner configured"
);
tracing::info!(
workers = workers,
poll_interval = ?poll_interval,
build_timeout = ?build_timeout,
work_dir = %work_dir.display(),
"Queue runner configured"
);
let worker_pool_for_drain = worker_pool.clone();
let worker_pool_for_drain = worker_pool.clone();
tokio::select! {
result = fc_queue_runner::runner_loop::run(db.pool().clone(), worker_pool, poll_interval) => {
if let Err(e) = result {
tracing::error!("Runner loop failed: {e}");
}
}
() = gc_loop(gc_config_for_loop) => {}
() = shutdown_signal() => {
tracing::info!("Shutdown signal received, draining in-flight builds...");
worker_pool_for_drain.drain();
worker_pool_for_drain.wait_for_drain().await;
tracing::info!("All in-flight builds completed");
}
}
tokio::select! {
result = fc_queue_runner::runner_loop::run(db.pool().clone(), worker_pool, poll_interval) => {
if let Err(e) = result {
tracing::error!("Runner loop failed: {e}");
}
}
() = gc_loop(gc_config_for_loop) => {}
() = shutdown_signal() => {
tracing::info!("Shutdown signal received, draining in-flight builds...");
worker_pool_for_drain.drain();
worker_pool_for_drain.wait_for_drain().await;
tracing::info!("All in-flight builds completed");
}
}
tracing::info!("Queue runner shutting down, closing database pool");
db.close().await;
tracing::info!("Queue runner shutting down, closing database pool");
db.close().await;
Ok(())
Ok(())
}
async fn cleanup_stale_logs(log_dir: &std::path::Path) {
if let Ok(mut entries) = tokio::fs::read_dir(log_dir).await {
while let Ok(Some(entry)) = entries.next_entry().await {
if entry.file_name().to_string_lossy().ends_with(".active.log") {
let _ = tokio::fs::remove_file(entry.path()).await;
tracing::info!("Removed stale active log: {}", entry.path().display());
}
}
if let Ok(mut entries) = tokio::fs::read_dir(log_dir).await {
while let Ok(Some(entry)) = entries.next_entry().await {
if entry.file_name().to_string_lossy().ends_with(".active.log") {
let _ = tokio::fs::remove_file(entry.path()).await;
tracing::info!("Removed stale active log: {}", entry.path().display());
}
}
}
}
async fn gc_loop(gc_config: GcConfig) {
if !gc_config.enabled {
return std::future::pending().await;
}
let interval = std::time::Duration::from_secs(gc_config.cleanup_interval);
let max_age = std::time::Duration::from_secs(gc_config.max_age_days * 86400);
if !gc_config.enabled {
return std::future::pending().await;
}
let interval = std::time::Duration::from_secs(gc_config.cleanup_interval);
let max_age = std::time::Duration::from_secs(gc_config.max_age_days * 86400);
loop {
tokio::time::sleep(interval).await;
match gc_roots::cleanup_old_roots(&gc_config.gc_roots_dir, max_age) {
Ok(count) if count > 0 => {
tracing::info!(count, "Cleaned up old GC roots");
// Optionally run nix-collect-garbage
match tokio::process::Command::new("nix-collect-garbage")
.output()
.await
{
Ok(output) if output.status.success() => {
tracing::info!("nix-collect-garbage completed");
}
Ok(output) => {
let stderr = String::from_utf8_lossy(&output.stderr);
tracing::warn!("nix-collect-garbage failed: {stderr}");
}
Err(e) => {
tracing::warn!("Failed to run nix-collect-garbage: {e}");
}
}
}
Ok(_) => {}
Err(e) => {
tracing::error!("GC cleanup failed: {e}");
}
loop {
tokio::time::sleep(interval).await;
match gc_roots::cleanup_old_roots(&gc_config.gc_roots_dir, max_age) {
Ok(count) if count > 0 => {
tracing::info!(count, "Cleaned up old GC roots");
// Optionally run nix-collect-garbage
match tokio::process::Command::new("nix-collect-garbage")
.output()
.await
{
Ok(output) if output.status.success() => {
tracing::info!("nix-collect-garbage completed");
},
Ok(output) => {
let stderr = String::from_utf8_lossy(&output.stderr);
tracing::warn!("nix-collect-garbage failed: {stderr}");
},
Err(e) => {
tracing::warn!("Failed to run nix-collect-garbage: {e}");
},
}
},
Ok(_) => {},
Err(e) => {
tracing::error!("GC cleanup failed: {e}");
},
}
}
}
async fn shutdown_signal() {
let ctrl_c = async {
tokio::signal::ctrl_c()
.await
.expect("failed to install Ctrl+C handler");
};
let ctrl_c = async {
tokio::signal::ctrl_c()
.await
.expect("failed to install Ctrl+C handler");
};
#[cfg(unix)]
let terminate = async {
tokio::signal::unix::signal(tokio::signal::unix::SignalKind::terminate())
.expect("failed to install SIGTERM handler")
.recv()
.await;
};
#[cfg(unix)]
let terminate = async {
tokio::signal::unix::signal(tokio::signal::unix::SignalKind::terminate())
.expect("failed to install SIGTERM handler")
.recv()
.await;
};
#[cfg(not(unix))]
let terminate = std::future::pending::<()>();
#[cfg(not(unix))]
let terminate = std::future::pending::<()>();
tokio::select! {
() = ctrl_c => {},
() = terminate => {},
}
tokio::select! {
() = ctrl_c => {},
() = terminate => {},
}
}

View file

@ -1,125 +1,129 @@
use std::sync::Arc;
use std::time::Duration;
use std::{sync::Arc, time::Duration};
use fc_common::{models::BuildStatus, repo};
use sqlx::PgPool;
use fc_common::models::BuildStatus;
use fc_common::repo;
use crate::worker::WorkerPool;
pub async fn run(
pool: PgPool,
worker_pool: Arc<WorkerPool>,
poll_interval: Duration,
pool: PgPool,
worker_pool: Arc<WorkerPool>,
poll_interval: Duration,
) -> anyhow::Result<()> {
// Reset orphaned builds from previous crashes (older than 5 minutes)
match repo::builds::reset_orphaned(&pool, 300).await {
Ok(count) if count > 0 => {
tracing::warn!(count, "Reset orphaned builds back to pending");
// Reset orphaned builds from previous crashes (older than 5 minutes)
match repo::builds::reset_orphaned(&pool, 300).await {
Ok(count) if count > 0 => {
tracing::warn!(count, "Reset orphaned builds back to pending");
},
Ok(_) => {},
Err(e) => {
tracing::error!("Failed to reset orphaned builds: {e}");
},
}
loop {
match repo::builds::list_pending(&pool, 10).await {
Ok(builds) => {
if !builds.is_empty() {
tracing::info!("Found {} pending builds", builds.len());
}
Ok(_) => {}
Err(e) => {
tracing::error!("Failed to reset orphaned builds: {e}");
}
}
loop {
match repo::builds::list_pending(&pool, 10).await {
Ok(builds) => {
if !builds.is_empty() {
tracing::info!("Found {} pending builds", builds.len());
}
for build in builds {
// Aggregate builds: check if all constituents are done
if build.is_aggregate {
match repo::build_dependencies::all_deps_completed(&pool, build.id).await {
Ok(true) => {
// All constituents done — mark aggregate as completed
tracing::info!(
build_id = %build.id,
job = %build.job_name,
"Aggregate build: all constituents completed"
);
let _ = repo::builds::start(&pool, build.id).await;
let _ = repo::builds::complete(
&pool,
build.id,
BuildStatus::Completed,
None,
None,
None,
)
.await;
continue;
}
Ok(false) => {
tracing::debug!(
build_id = %build.id,
"Aggregate build waiting for constituents"
);
continue;
}
Err(e) => {
tracing::error!(
build_id = %build.id,
"Failed to check aggregate deps: {e}"
);
continue;
}
}
}
// Derivation deduplication: reuse result if same drv was already built
match repo::builds::get_completed_by_drv_path(&pool, &build.drv_path).await {
Ok(Some(existing)) if existing.id != build.id => {
tracing::info!(
build_id = %build.id,
existing_id = %existing.id,
drv = %build.drv_path,
"Dedup: reusing result from existing build"
);
let _ = repo::builds::start(&pool, build.id).await;
let _ = repo::builds::complete(
&pool,
build.id,
BuildStatus::Completed,
existing.log_path.as_deref(),
existing.build_output_path.as_deref(),
None,
)
.await;
continue;
}
_ => {}
}
// Dependency-aware scheduling: skip if deps not met
match repo::build_dependencies::all_deps_completed(&pool, build.id).await {
Ok(true) => {}
Ok(false) => {
tracing::debug!(
build_id = %build.id,
"Build waiting for dependencies"
);
continue;
}
Err(e) => {
tracing::error!(
build_id = %build.id,
"Failed to check build deps: {e}"
);
continue;
}
}
worker_pool.dispatch(build);
}
for build in builds {
// Aggregate builds: check if all constituents are done
if build.is_aggregate {
match repo::build_dependencies::all_deps_completed(&pool, build.id)
.await
{
Ok(true) => {
// All constituents done — mark aggregate as completed
tracing::info!(
build_id = %build.id,
job = %build.job_name,
"Aggregate build: all constituents completed"
);
let _ = repo::builds::start(&pool, build.id).await;
let _ = repo::builds::complete(
&pool,
build.id,
BuildStatus::Completed,
None,
None,
None,
)
.await;
continue;
},
Ok(false) => {
tracing::debug!(
build_id = %build.id,
"Aggregate build waiting for constituents"
);
continue;
},
Err(e) => {
tracing::error!(
build_id = %build.id,
"Failed to check aggregate deps: {e}"
);
continue;
},
}
}
// Derivation deduplication: reuse result if same drv was already
// built
match repo::builds::get_completed_by_drv_path(&pool, &build.drv_path)
.await
{
Ok(Some(existing)) if existing.id != build.id => {
tracing::info!(
build_id = %build.id,
existing_id = %existing.id,
drv = %build.drv_path,
"Dedup: reusing result from existing build"
);
let _ = repo::builds::start(&pool, build.id).await;
let _ = repo::builds::complete(
&pool,
build.id,
BuildStatus::Completed,
existing.log_path.as_deref(),
existing.build_output_path.as_deref(),
None,
)
.await;
continue;
},
_ => {},
}
// Dependency-aware scheduling: skip if deps not met
match repo::build_dependencies::all_deps_completed(&pool, build.id)
.await
{
Ok(true) => {},
Ok(false) => {
tracing::debug!(
build_id = %build.id,
"Build waiting for dependencies"
);
continue;
},
Err(e) => {
tracing::error!("Failed to fetch pending builds: {e}");
}
tracing::error!(
build_id = %build.id,
"Failed to check build deps: {e}"
);
continue;
},
}
worker_pool.dispatch(build);
}
tokio::time::sleep(poll_interval).await;
},
Err(e) => {
tracing::error!("Failed to fetch pending builds: {e}");
},
}
tokio::time::sleep(poll_interval).await;
}
}

File diff suppressed because it is too large Load diff

View file

@ -6,285 +6,284 @@
#[test]
fn test_parse_nix_log_start() {
let line = r#"@nix {"action":"start","derivation":"/nix/store/abc-hello.drv"}"#;
let result = fc_queue_runner::builder::parse_nix_log_line(line);
assert!(result.is_some());
let (action, drv) = result.unwrap();
assert_eq!(action, "start");
assert_eq!(drv, "/nix/store/abc-hello.drv");
let line =
r#"@nix {"action":"start","derivation":"/nix/store/abc-hello.drv"}"#;
let result = fc_queue_runner::builder::parse_nix_log_line(line);
assert!(result.is_some());
let (action, drv) = result.unwrap();
assert_eq!(action, "start");
assert_eq!(drv, "/nix/store/abc-hello.drv");
}
#[test]
fn test_parse_nix_log_stop() {
let line = r#"@nix {"action":"stop","derivation":"/nix/store/abc-hello.drv"}"#;
let result = fc_queue_runner::builder::parse_nix_log_line(line);
assert!(result.is_some());
let (action, drv) = result.unwrap();
assert_eq!(action, "stop");
assert_eq!(drv, "/nix/store/abc-hello.drv");
let line =
r#"@nix {"action":"stop","derivation":"/nix/store/abc-hello.drv"}"#;
let result = fc_queue_runner::builder::parse_nix_log_line(line);
assert!(result.is_some());
let (action, drv) = result.unwrap();
assert_eq!(action, "stop");
assert_eq!(drv, "/nix/store/abc-hello.drv");
}
#[test]
fn test_parse_nix_log_unknown_action() {
let line = r#"@nix {"action":"msg","msg":"building..."}"#;
let result = fc_queue_runner::builder::parse_nix_log_line(line);
assert!(result.is_none());
let line = r#"@nix {"action":"msg","msg":"building..."}"#;
let result = fc_queue_runner::builder::parse_nix_log_line(line);
assert!(result.is_none());
}
#[test]
fn test_parse_nix_log_not_nix_prefix() {
let line = "building '/nix/store/abc-hello.drv'...";
let result = fc_queue_runner::builder::parse_nix_log_line(line);
assert!(result.is_none());
let line = "building '/nix/store/abc-hello.drv'...";
let result = fc_queue_runner::builder::parse_nix_log_line(line);
assert!(result.is_none());
}
#[test]
fn test_parse_nix_log_invalid_json() {
let line = "@nix {invalid json}";
let result = fc_queue_runner::builder::parse_nix_log_line(line);
assert!(result.is_none());
let line = "@nix {invalid json}";
let result = fc_queue_runner::builder::parse_nix_log_line(line);
assert!(result.is_none());
}
#[test]
fn test_parse_nix_log_no_derivation_field() {
let line = r#"@nix {"action":"start","type":"build"}"#;
let result = fc_queue_runner::builder::parse_nix_log_line(line);
assert!(result.is_none());
let line = r#"@nix {"action":"start","type":"build"}"#;
let result = fc_queue_runner::builder::parse_nix_log_line(line);
assert!(result.is_none());
}
#[test]
fn test_parse_nix_log_empty_line() {
let result = fc_queue_runner::builder::parse_nix_log_line("");
assert!(result.is_none());
let result = fc_queue_runner::builder::parse_nix_log_line("");
assert!(result.is_none());
}
// --- WorkerPool drain ---
#[tokio::test]
async fn test_worker_pool_drain_stops_dispatch() {
// Create a minimal worker pool
let url = match std::env::var("TEST_DATABASE_URL") {
Ok(url) => url,
Err(_) => {
println!("Skipping: TEST_DATABASE_URL not set");
return;
}
};
// Create a minimal worker pool
let url = match std::env::var("TEST_DATABASE_URL") {
Ok(url) => url,
Err(_) => {
println!("Skipping: TEST_DATABASE_URL not set");
return;
},
};
let pool = sqlx::postgres::PgPoolOptions::new()
.max_connections(1)
.connect(&url)
.await
.expect("failed to connect");
let pool = sqlx::postgres::PgPoolOptions::new()
.max_connections(1)
.connect(&url)
.await
.expect("failed to connect");
let worker_pool = fc_queue_runner::worker::WorkerPool::new(
pool,
2,
std::env::temp_dir(),
std::time::Duration::from_secs(60),
fc_common::config::LogConfig::default(),
fc_common::config::GcConfig::default(),
fc_common::config::NotificationsConfig::default(),
fc_common::config::SigningConfig::default(),
fc_common::config::CacheUploadConfig::default(),
);
let worker_pool = fc_queue_runner::worker::WorkerPool::new(
pool,
2,
std::env::temp_dir(),
std::time::Duration::from_secs(60),
fc_common::config::LogConfig::default(),
fc_common::config::GcConfig::default(),
fc_common::config::NotificationsConfig::default(),
fc_common::config::SigningConfig::default(),
fc_common::config::CacheUploadConfig::default(),
);
// Drain should not panic
worker_pool.drain();
// Drain should not panic
worker_pool.drain();
// After drain, dispatching should be a no-op (build won't start)
// We can't easily test this without a real build, but at least verify drain doesn't crash
// After drain, dispatching should be a no-op (build won't start)
// We can't easily test this without a real build, but at least verify drain
// doesn't crash
}
// --- Database-dependent tests ---
#[tokio::test]
async fn test_atomic_build_claiming() {
let url = match std::env::var("TEST_DATABASE_URL") {
Ok(url) => url,
Err(_) => {
println!("Skipping: TEST_DATABASE_URL not set");
return;
}
};
let url = match std::env::var("TEST_DATABASE_URL") {
Ok(url) => url,
Err(_) => {
println!("Skipping: TEST_DATABASE_URL not set");
return;
},
};
let pool = sqlx::postgres::PgPoolOptions::new()
.max_connections(5)
.connect(&url)
.await
.expect("failed to connect");
sqlx::migrate!("../common/migrations")
.run(&pool)
.await
.expect("migration failed");
// Create a project -> jobset -> evaluation -> build chain
let project = fc_common::repo::projects::create(
&pool,
fc_common::models::CreateProject {
name: format!("runner-test-{}", uuid::Uuid::new_v4()),
description: None,
repository_url: "https://github.com/test/repo".to_string(),
},
)
let pool = sqlx::postgres::PgPoolOptions::new()
.max_connections(5)
.connect(&url)
.await
.expect("create project");
.expect("failed to connect");
let jobset = fc_common::repo::jobsets::create(
&pool,
fc_common::models::CreateJobset {
project_id: project.id,
name: "main".to_string(),
nix_expression: "packages".to_string(),
enabled: None,
flake_mode: None,
check_interval: None,
branch: None,
scheduling_shares: None,
},
)
sqlx::migrate!("../common/migrations")
.run(&pool)
.await
.expect("migration failed");
// Create a project -> jobset -> evaluation -> build chain
let project = fc_common::repo::projects::create(
&pool,
fc_common::models::CreateProject {
name: format!("runner-test-{}", uuid::Uuid::new_v4()),
description: None,
repository_url: "https://github.com/test/repo".to_string(),
},
)
.await
.expect("create project");
let jobset =
fc_common::repo::jobsets::create(&pool, fc_common::models::CreateJobset {
project_id: project.id,
name: "main".to_string(),
nix_expression: "packages".to_string(),
enabled: None,
flake_mode: None,
check_interval: None,
branch: None,
scheduling_shares: None,
})
.await
.expect("create jobset");
let eval = fc_common::repo::evaluations::create(
&pool,
fc_common::models::CreateEvaluation {
jobset_id: jobset.id,
commit_hash: "abcdef1234567890abcdef1234567890abcdef12".to_string(),
},
)
.await
.expect("create eval");
let eval = fc_common::repo::evaluations::create(
&pool,
fc_common::models::CreateEvaluation {
jobset_id: jobset.id,
commit_hash: "abcdef1234567890abcdef1234567890abcdef12".to_string(),
},
)
.await
.expect("create eval");
let build = fc_common::repo::builds::create(
&pool,
fc_common::models::CreateBuild {
evaluation_id: eval.id,
job_name: "test-build".to_string(),
drv_path: "/nix/store/test-runner-test.drv".to_string(),
system: Some("x86_64-linux".to_string()),
outputs: None,
is_aggregate: None,
constituents: None,
},
)
let build =
fc_common::repo::builds::create(&pool, fc_common::models::CreateBuild {
evaluation_id: eval.id,
job_name: "test-build".to_string(),
drv_path: "/nix/store/test-runner-test.drv".to_string(),
system: Some("x86_64-linux".to_string()),
outputs: None,
is_aggregate: None,
constituents: None,
})
.await
.expect("create build");
assert_eq!(build.status, fc_common::models::BuildStatus::Pending);
assert_eq!(build.status, fc_common::models::BuildStatus::Pending);
// First claim should succeed
let claimed = fc_common::repo::builds::start(&pool, build.id)
.await
.expect("start build");
assert!(claimed.is_some());
// First claim should succeed
let claimed = fc_common::repo::builds::start(&pool, build.id)
.await
.expect("start build");
assert!(claimed.is_some());
// Second claim should return None (already claimed)
let claimed2 = fc_common::repo::builds::start(&pool, build.id)
.await
.expect("start build again");
assert!(claimed2.is_none());
// Second claim should return None (already claimed)
let claimed2 = fc_common::repo::builds::start(&pool, build.id)
.await
.expect("start build again");
assert!(claimed2.is_none());
// Clean up
let _ = fc_common::repo::projects::delete(&pool, project.id).await;
// Clean up
let _ = fc_common::repo::projects::delete(&pool, project.id).await;
}
#[tokio::test]
async fn test_orphan_build_reset() {
let url = match std::env::var("TEST_DATABASE_URL") {
Ok(url) => url,
Err(_) => {
println!("Skipping: TEST_DATABASE_URL not set");
return;
}
};
let url = match std::env::var("TEST_DATABASE_URL") {
Ok(url) => url,
Err(_) => {
println!("Skipping: TEST_DATABASE_URL not set");
return;
},
};
let pool = sqlx::postgres::PgPoolOptions::new()
.max_connections(5)
.connect(&url)
.await
.expect("failed to connect");
sqlx::migrate!("../common/migrations")
.run(&pool)
.await
.expect("migration failed");
let project = fc_common::repo::projects::create(
&pool,
fc_common::models::CreateProject {
name: format!("orphan-test-{}", uuid::Uuid::new_v4()),
description: None,
repository_url: "https://github.com/test/repo".to_string(),
},
)
let pool = sqlx::postgres::PgPoolOptions::new()
.max_connections(5)
.connect(&url)
.await
.expect("create project");
.expect("failed to connect");
let jobset = fc_common::repo::jobsets::create(
&pool,
fc_common::models::CreateJobset {
project_id: project.id,
name: "main".to_string(),
nix_expression: "packages".to_string(),
enabled: None,
flake_mode: None,
check_interval: None,
branch: None,
scheduling_shares: None,
},
)
sqlx::migrate!("../common/migrations")
.run(&pool)
.await
.expect("migration failed");
let project = fc_common::repo::projects::create(
&pool,
fc_common::models::CreateProject {
name: format!("orphan-test-{}", uuid::Uuid::new_v4()),
description: None,
repository_url: "https://github.com/test/repo".to_string(),
},
)
.await
.expect("create project");
let jobset =
fc_common::repo::jobsets::create(&pool, fc_common::models::CreateJobset {
project_id: project.id,
name: "main".to_string(),
nix_expression: "packages".to_string(),
enabled: None,
flake_mode: None,
check_interval: None,
branch: None,
scheduling_shares: None,
})
.await
.expect("create jobset");
let eval = fc_common::repo::evaluations::create(
&pool,
fc_common::models::CreateEvaluation {
jobset_id: jobset.id,
commit_hash: "1234567890abcdef1234567890abcdef12345678".to_string(),
},
)
.await
.expect("create eval");
let eval = fc_common::repo::evaluations::create(
&pool,
fc_common::models::CreateEvaluation {
jobset_id: jobset.id,
commit_hash: "1234567890abcdef1234567890abcdef12345678".to_string(),
},
)
.await
.expect("create eval");
// Create a build and mark it running
let build = fc_common::repo::builds::create(
&pool,
fc_common::models::CreateBuild {
evaluation_id: eval.id,
job_name: "orphan-build".to_string(),
drv_path: "/nix/store/test-orphan.drv".to_string(),
system: None,
outputs: None,
is_aggregate: None,
constituents: None,
},
)
// Create a build and mark it running
let build =
fc_common::repo::builds::create(&pool, fc_common::models::CreateBuild {
evaluation_id: eval.id,
job_name: "orphan-build".to_string(),
drv_path: "/nix/store/test-orphan.drv".to_string(),
system: None,
outputs: None,
is_aggregate: None,
constituents: None,
})
.await
.expect("create build");
let _ = fc_common::repo::builds::start(&pool, build.id).await;
let _ = fc_common::repo::builds::start(&pool, build.id).await;
// Simulate the build being stuck for a while by manually backdating started_at
sqlx::query("UPDATE builds SET started_at = NOW() - INTERVAL '10 minutes' WHERE id = $1")
.bind(build.id)
.execute(&pool)
.await
.expect("backdate build");
// Simulate the build being stuck for a while by manually backdating
// started_at
sqlx::query(
"UPDATE builds SET started_at = NOW() - INTERVAL '10 minutes' WHERE id = \
$1",
)
.bind(build.id)
.execute(&pool)
.await
.expect("backdate build");
// Reset orphaned builds (older than 5 minutes)
let count = fc_common::repo::builds::reset_orphaned(&pool, 300)
.await
.expect("reset orphaned");
assert!(count >= 1, "should have reset at least 1 orphaned build");
// Reset orphaned builds (older than 5 minutes)
let count = fc_common::repo::builds::reset_orphaned(&pool, 300)
.await
.expect("reset orphaned");
assert!(count >= 1, "should have reset at least 1 orphaned build");
// Verify build is pending again
let reset_build = fc_common::repo::builds::get(&pool, build.id)
.await
.expect("get build");
assert_eq!(reset_build.status, fc_common::models::BuildStatus::Pending);
// Verify build is pending again
let reset_build = fc_common::repo::builds::get(&pool, build.id)
.await
.expect("get build");
assert_eq!(reset_build.status, fc_common::models::BuildStatus::Pending);
// Clean up
let _ = fc_common::repo::projects::delete(&pool, project.id).await;
// Clean up
let _ = fc_common::repo::projects::delete(&pool, project.id).await;
}

View file

@ -1,37 +1,37 @@
[package]
name = "fc-server"
version.workspace = true
edition.workspace = true
authors.workspace = true
license.workspace = true
name = "fc-server"
version.workspace = true
edition.workspace = true
authors.workspace = true
license.workspace = true
repository.workspace = true
[dependencies]
tokio.workspace = true
axum.workspace = true
sqlx.workspace = true
serde.workspace = true
serde_json.workspace = true
uuid.workspace = true
chrono.workspace = true
tracing.workspace = true
anyhow.workspace = true
askama.workspace = true
askama_axum.workspace = true
async-stream.workspace = true
axum.workspace = true
axum-extra.workspace = true
chrono.workspace = true
clap.workspace = true
config.workspace = true
dashmap.workspace = true
futures.workspace = true
hex.workspace = true
hmac.workspace = true
serde.workspace = true
serde_json.workspace = true
sha2.workspace = true
sqlx.workspace = true
thiserror.workspace = true
tokio.workspace = true
tokio-util.workspace = true
tower.workspace = true
tower-http.workspace = true
tracing.workspace = true
tracing-subscriber.workspace = true
anyhow.workspace = true
thiserror.workspace = true
clap.workspace = true
config.workspace = true
tower-http.workspace = true
tower.workspace = true
sha2.workspace = true
hex.workspace = true
hmac.workspace = true
tokio-util.workspace = true
async-stream.workspace = true
futures.workspace = true
axum-extra.workspace = true
dashmap.workspace = true
askama.workspace = true
askama_axum.workspace = true
uuid.workspace = true
# Our crates
fc-common.workspace = true

View file

@ -1,76 +1,79 @@
use axum::{
extract::{FromRequestParts, Request, State},
http::{StatusCode, request::Parts},
middleware::Next,
response::Response,
extract::{FromRequestParts, Request, State},
http::{StatusCode, request::Parts},
middleware::Next,
response::Response,
};
use fc_common::models::ApiKey;
use sha2::{Digest, Sha256};
use crate::state::AppState;
/// Extract and validate an API key from the Authorization header or session cookie.
/// Keys use the format: `Bearer fc_xxxx`. Session cookies use `fc_session=<id>`.
/// Write endpoints (POST/PUT/DELETE/PATCH) require a valid key.
/// Read endpoints (GET/HEAD/OPTIONS) try to extract optionally (for dashboard admin UI).
/// Extract and validate an API key from the Authorization header or session
/// cookie. Keys use the format: `Bearer fc_xxxx`. Session cookies use
/// `fc_session=<id>`. Write endpoints (POST/PUT/DELETE/PATCH) require a valid
/// key. Read endpoints (GET/HEAD/OPTIONS) try to extract optionally (for
/// dashboard admin UI).
pub async fn require_api_key(
State(state): State<AppState>,
mut request: Request,
next: Next,
State(state): State<AppState>,
mut request: Request,
next: Next,
) -> Result<Response, StatusCode> {
let method = request.method().clone();
let is_read = method == axum::http::Method::GET
|| method == axum::http::Method::HEAD
|| method == axum::http::Method::OPTIONS;
let method = request.method().clone();
let is_read = method == axum::http::Method::GET
|| method == axum::http::Method::HEAD
|| method == axum::http::Method::OPTIONS;
let auth_header = request
.headers()
.get("authorization")
.and_then(|v| v.to_str().ok())
.map(String::from);
let auth_header = request
.headers()
.get("authorization")
.and_then(|v| v.to_str().ok())
.map(String::from);
let token = auth_header
.as_deref()
.and_then(|h| h.strip_prefix("Bearer "));
let token = auth_header
.as_deref()
.and_then(|h| h.strip_prefix("Bearer "));
// Try Bearer token first
if let Some(token) = token {
let mut hasher = Sha256::new();
hasher.update(token.as_bytes());
let key_hash = hex::encode(hasher.finalize());
// Try Bearer token first
if let Some(token) = token {
let mut hasher = Sha256::new();
hasher.update(token.as_bytes());
let key_hash = hex::encode(hasher.finalize());
if let Ok(Some(api_key)) =
fc_common::repo::api_keys::get_by_hash(&state.pool, &key_hash).await
{
let pool = state.pool.clone();
let key_id = api_key.id;
tokio::spawn(async move {
let _ = fc_common::repo::api_keys::touch_last_used(&pool, key_id).await;
});
if let Ok(Some(api_key)) =
fc_common::repo::api_keys::get_by_hash(&state.pool, &key_hash).await
{
let pool = state.pool.clone();
let key_id = api_key.id;
tokio::spawn(async move {
let _ = fc_common::repo::api_keys::touch_last_used(&pool, key_id).await;
});
request.extensions_mut().insert(api_key);
return Ok(next.run(request).await);
}
request.extensions_mut().insert(api_key);
return Ok(next.run(request).await);
}
}
// Fall back to session cookie (so dashboard JS fetches work)
if let Some(cookie_header) = request
.headers()
.get("cookie")
.and_then(|v| v.to_str().ok())
&& let Some(session_id) = parse_cookie(cookie_header, "fc_session")
&& let Some(session) = state.sessions.get(&session_id)
&& session.created_at.elapsed() < std::time::Duration::from_secs(24 * 60 * 60) {
request.extensions_mut().insert(session.api_key.clone());
return Ok(next.run(request).await);
}
// Fall back to session cookie (so dashboard JS fetches work)
if let Some(cookie_header) = request
.headers()
.get("cookie")
.and_then(|v| v.to_str().ok())
&& let Some(session_id) = parse_cookie(cookie_header, "fc_session")
&& let Some(session) = state.sessions.get(&session_id)
&& session.created_at.elapsed()
< std::time::Duration::from_secs(24 * 60 * 60)
{
request.extensions_mut().insert(session.api_key.clone());
return Ok(next.run(request).await);
}
// No valid auth found
if is_read {
Ok(next.run(request).await)
} else {
Err(StatusCode::UNAUTHORIZED)
}
// No valid auth found
if is_read {
Ok(next.run(request).await)
} else {
Err(StatusCode::UNAUTHORIZED)
}
}
/// Extractor that requires an authenticated admin user.
@ -78,84 +81,88 @@ pub async fn require_api_key(
pub struct RequireAdmin(pub ApiKey);
impl FromRequestParts<AppState> for RequireAdmin {
type Rejection = StatusCode;
type Rejection = StatusCode;
async fn from_request_parts(
parts: &mut Parts,
_state: &AppState,
) -> Result<Self, Self::Rejection> {
let key = parts
.extensions
.get::<ApiKey>()
.cloned()
.ok_or(StatusCode::UNAUTHORIZED)?;
if key.role == "admin" {
Ok(RequireAdmin(key))
} else {
Err(StatusCode::FORBIDDEN)
}
async fn from_request_parts(
parts: &mut Parts,
_state: &AppState,
) -> Result<Self, Self::Rejection> {
let key = parts
.extensions
.get::<ApiKey>()
.cloned()
.ok_or(StatusCode::UNAUTHORIZED)?;
if key.role == "admin" {
Ok(RequireAdmin(key))
} else {
Err(StatusCode::FORBIDDEN)
}
}
}
/// Extractor that requires one of the specified roles (admin always passes).
/// Use as: `_auth: RequireRole<"cancel-build", "restart-jobs">`
///
/// Since const generics with strings aren't stable, use the helper function instead.
/// Since const generics with strings aren't stable, use the helper function
/// instead.
pub struct RequireRoles(pub ApiKey);
impl RequireRoles {
pub fn check(
extensions: &axum::http::Extensions,
allowed: &[&str],
) -> Result<ApiKey, StatusCode> {
let key = extensions
.get::<ApiKey>()
.cloned()
.ok_or(StatusCode::UNAUTHORIZED)?;
if key.role == "admin" || allowed.contains(&key.role.as_str()) {
Ok(key)
} else {
Err(StatusCode::FORBIDDEN)
}
pub fn check(
extensions: &axum::http::Extensions,
allowed: &[&str],
) -> Result<ApiKey, StatusCode> {
let key = extensions
.get::<ApiKey>()
.cloned()
.ok_or(StatusCode::UNAUTHORIZED)?;
if key.role == "admin" || allowed.contains(&key.role.as_str()) {
Ok(key)
} else {
Err(StatusCode::FORBIDDEN)
}
}
}
/// Session extraction middleware for dashboard routes.
/// Reads `fc_session` cookie and inserts ApiKey into extensions if valid.
pub async fn extract_session(
State(state): State<AppState>,
mut request: Request,
next: Next,
State(state): State<AppState>,
mut request: Request,
next: Next,
) -> Response {
if let Some(cookie_header) = request
.headers()
.get("cookie")
.and_then(|v| v.to_str().ok())
&& let Some(session_id) = parse_cookie(cookie_header, "fc_session")
&& let Some(session) = state.sessions.get(&session_id) {
// Check session expiry (24 hours)
if session.created_at.elapsed() < std::time::Duration::from_secs(24 * 60 * 60) {
request.extensions_mut().insert(session.api_key.clone());
} else {
// Expired, remove it
drop(session);
state.sessions.remove(&session_id);
}
}
next.run(request).await
if let Some(cookie_header) = request
.headers()
.get("cookie")
.and_then(|v| v.to_str().ok())
&& let Some(session_id) = parse_cookie(cookie_header, "fc_session")
&& let Some(session) = state.sessions.get(&session_id)
{
// Check session expiry (24 hours)
if session.created_at.elapsed()
< std::time::Duration::from_secs(24 * 60 * 60)
{
request.extensions_mut().insert(session.api_key.clone());
} else {
// Expired, remove it
drop(session);
state.sessions.remove(&session_id);
}
}
next.run(request).await
}
fn parse_cookie(header: &str, name: &str) -> Option<String> {
header
.split(';')
.filter_map(|pair| {
let pair = pair.trim();
let (k, v) = pair.split_once('=')?;
if k.trim() == name {
Some(v.trim().to_string())
} else {
None
}
})
.next()
header
.split(';')
.filter_map(|pair| {
let pair = pair.trim();
let (k, v) = pair.split_once('=')?;
if k.trim() == name {
Some(v.trim().to_string())
} else {
None
}
})
.next()
}

View file

@ -1,6 +1,6 @@
use axum::{
http::StatusCode,
response::{IntoResponse, Response},
http::StatusCode,
response::{IntoResponse, Response},
};
use fc_common::CiError;
use serde_json::json;
@ -8,75 +8,91 @@ use serde_json::json;
pub struct ApiError(pub CiError);
impl From<CiError> for ApiError {
fn from(err: CiError) -> Self {
ApiError(err)
}
fn from(err: CiError) -> Self {
ApiError(err)
}
}
impl IntoResponse for ApiError {
fn into_response(self) -> Response {
let (status, code, message) = match &self.0 {
CiError::NotFound(msg) => (StatusCode::NOT_FOUND, "NOT_FOUND", msg.clone()),
CiError::Validation(msg) => (StatusCode::BAD_REQUEST, "VALIDATION_ERROR", msg.clone()),
CiError::Conflict(msg) => (StatusCode::CONFLICT, "CONFLICT", msg.clone()),
CiError::Timeout(msg) => (StatusCode::REQUEST_TIMEOUT, "TIMEOUT", msg.clone()),
CiError::Unauthorized(msg) => (StatusCode::UNAUTHORIZED, "UNAUTHORIZED", msg.clone()),
CiError::Forbidden(msg) => (StatusCode::FORBIDDEN, "FORBIDDEN", msg.clone()),
CiError::NixEval(msg) => (
StatusCode::UNPROCESSABLE_ENTITY,
"NIX_EVAL_ERROR",
msg.clone(),
),
CiError::Build(msg) => (StatusCode::UNPROCESSABLE_ENTITY, "BUILD_ERROR", msg.clone()),
CiError::Config(msg) => (
StatusCode::INTERNAL_SERVER_ERROR,
"CONFIG_ERROR",
msg.clone(),
),
CiError::Database(e) => {
tracing::error!(error = %e, "Database error in API handler");
(
StatusCode::INTERNAL_SERVER_ERROR,
"DATABASE_ERROR",
"Internal database error".to_string(),
)
}
CiError::Git(e) => {
tracing::error!(error = %e, "Git error in API handler");
(
StatusCode::INTERNAL_SERVER_ERROR,
"GIT_ERROR",
format!("Git operation failed: {e}"),
)
}
CiError::Serialization(e) => {
tracing::error!(error = %e, "Serialization error in API handler");
(
StatusCode::INTERNAL_SERVER_ERROR,
"SERIALIZATION_ERROR",
format!("Data serialization error: {e}"),
)
}
CiError::Io(e) => {
tracing::error!(error = %e, "IO error in API handler");
(
StatusCode::INTERNAL_SERVER_ERROR,
"IO_ERROR",
format!("IO error: {e}"),
)
}
};
fn into_response(self) -> Response {
let (status, code, message) = match &self.0 {
CiError::NotFound(msg) => {
(StatusCode::NOT_FOUND, "NOT_FOUND", msg.clone())
},
CiError::Validation(msg) => {
(StatusCode::BAD_REQUEST, "VALIDATION_ERROR", msg.clone())
},
CiError::Conflict(msg) => (StatusCode::CONFLICT, "CONFLICT", msg.clone()),
CiError::Timeout(msg) => {
(StatusCode::REQUEST_TIMEOUT, "TIMEOUT", msg.clone())
},
CiError::Unauthorized(msg) => {
(StatusCode::UNAUTHORIZED, "UNAUTHORIZED", msg.clone())
},
CiError::Forbidden(msg) => {
(StatusCode::FORBIDDEN, "FORBIDDEN", msg.clone())
},
CiError::NixEval(msg) => {
(
StatusCode::UNPROCESSABLE_ENTITY,
"NIX_EVAL_ERROR",
msg.clone(),
)
},
CiError::Build(msg) => {
(StatusCode::UNPROCESSABLE_ENTITY, "BUILD_ERROR", msg.clone())
},
CiError::Config(msg) => {
(
StatusCode::INTERNAL_SERVER_ERROR,
"CONFIG_ERROR",
msg.clone(),
)
},
CiError::Database(e) => {
tracing::error!(error = %e, "Database error in API handler");
(
StatusCode::INTERNAL_SERVER_ERROR,
"DATABASE_ERROR",
"Internal database error".to_string(),
)
},
CiError::Git(e) => {
tracing::error!(error = %e, "Git error in API handler");
(
StatusCode::INTERNAL_SERVER_ERROR,
"GIT_ERROR",
format!("Git operation failed: {e}"),
)
},
CiError::Serialization(e) => {
tracing::error!(error = %e, "Serialization error in API handler");
(
StatusCode::INTERNAL_SERVER_ERROR,
"SERIALIZATION_ERROR",
format!("Data serialization error: {e}"),
)
},
CiError::Io(e) => {
tracing::error!(error = %e, "IO error in API handler");
(
StatusCode::INTERNAL_SERVER_ERROR,
"IO_ERROR",
format!("IO error: {e}"),
)
},
};
if status.is_server_error() {
tracing::warn!(
status = %status,
code = code,
"API error response: {}",
message
);
}
let body = axum::Json(json!({ "error": message, "error_code": code }));
(status, body).into_response()
if status.is_server_error() {
tracing::warn!(
status = %status,
code = code,
"API error response: {}",
message
);
}
let body = axum::Json(json!({ "error": message, "error_code": code }));
(status, body).into_response()
}
}

View file

@ -1,8 +1,6 @@
use fc_server::routes;
use fc_server::state;
use clap::Parser;
use fc_common::{Config, Database};
use fc_server::{routes, state};
use state::AppState;
use tokio::net::TcpListener;
@ -10,73 +8,73 @@ use tokio::net::TcpListener;
#[command(name = "fc-server")]
#[command(about = "CI Server - Web API and UI")]
struct Cli {
#[arg(short = 'H', long)]
host: Option<String>,
#[arg(short = 'H', long)]
host: Option<String>,
#[arg(short, long)]
port: Option<u16>,
#[arg(short, long)]
port: Option<u16>,
}
async fn shutdown_signal() {
let ctrl_c = async {
tokio::signal::ctrl_c()
.await
.expect("failed to install Ctrl+C handler");
};
let ctrl_c = async {
tokio::signal::ctrl_c()
.await
.expect("failed to install Ctrl+C handler");
};
#[cfg(unix)]
let terminate = async {
tokio::signal::unix::signal(tokio::signal::unix::SignalKind::terminate())
.expect("failed to install SIGTERM handler")
.recv()
.await;
};
#[cfg(unix)]
let terminate = async {
tokio::signal::unix::signal(tokio::signal::unix::SignalKind::terminate())
.expect("failed to install SIGTERM handler")
.recv()
.await;
};
#[cfg(not(unix))]
let terminate = std::future::pending::<()>();
#[cfg(not(unix))]
let terminate = std::future::pending::<()>();
tokio::select! {
() = ctrl_c => {},
() = terminate => {},
}
tokio::select! {
() = ctrl_c => {},
() = terminate => {},
}
tracing::info!("Shutdown signal received");
tracing::info!("Shutdown signal received");
}
#[tokio::main]
async fn main() -> anyhow::Result<()> {
let config = Config::load()?;
fc_common::init_tracing(&config.tracing);
let config = Config::load()?;
fc_common::init_tracing(&config.tracing);
let cli = Cli::parse();
let cli = Cli::parse();
let host = cli.host.unwrap_or(config.server.host.clone());
let port = cli.port.unwrap_or(config.server.port);
let host = cli.host.unwrap_or(config.server.host.clone());
let port = cli.port.unwrap_or(config.server.port);
let db = Database::new(config.database.clone()).await?;
let db = Database::new(config.database.clone()).await?;
// Bootstrap declarative projects, jobsets, and API keys from config
fc_common::bootstrap::run(db.pool(), &config.declarative).await?;
// Bootstrap declarative projects, jobsets, and API keys from config
fc_common::bootstrap::run(db.pool(), &config.declarative).await?;
let state = AppState {
pool: db.pool().clone(),
config: config.clone(),
sessions: std::sync::Arc::new(dashmap::DashMap::new()),
};
let state = AppState {
pool: db.pool().clone(),
config: config.clone(),
sessions: std::sync::Arc::new(dashmap::DashMap::new()),
};
let app = routes::router(state, &config.server);
let app = routes::router(state, &config.server);
let bind_addr = format!("{host}:{port}");
tracing::info!("Starting CI Server on {}", bind_addr);
let bind_addr = format!("{host}:{port}");
tracing::info!("Starting CI Server on {}", bind_addr);
let listener = TcpListener::bind(&bind_addr).await?;
let app = app.into_make_service_with_connect_info::<std::net::SocketAddr>();
axum::serve(listener, app)
.with_graceful_shutdown(shutdown_signal())
.await?;
let listener = TcpListener::bind(&bind_addr).await?;
let app = app.into_make_service_with_connect_info::<std::net::SocketAddr>();
axum::serve(listener, app)
.with_graceful_shutdown(shutdown_signal())
.await?;
tracing::info!("Server shutting down, closing database pool");
db.close().await;
tracing::info!("Server shutting down, closing database pool");
db.close().await;
Ok(())
Ok(())
}

View file

@ -1,125 +1,132 @@
use axum::{
Json, Router,
extract::{Path, State},
routing::get,
Json,
Router,
extract::{Path, State},
routing::get,
};
use fc_common::{
Validate,
models::{
CreateRemoteBuilder,
RemoteBuilder,
SystemStatus,
UpdateRemoteBuilder,
},
};
use fc_common::Validate;
use fc_common::models::{CreateRemoteBuilder, RemoteBuilder, SystemStatus, UpdateRemoteBuilder};
use uuid::Uuid;
use crate::auth_middleware::RequireAdmin;
use crate::error::ApiError;
use crate::state::AppState;
use crate::{auth_middleware::RequireAdmin, error::ApiError, state::AppState};
async fn list_builders(
State(state): State<AppState>,
State(state): State<AppState>,
) -> Result<Json<Vec<RemoteBuilder>>, ApiError> {
let builders = fc_common::repo::remote_builders::list(&state.pool)
.await
.map_err(ApiError)?;
Ok(Json(builders))
let builders = fc_common::repo::remote_builders::list(&state.pool)
.await
.map_err(ApiError)?;
Ok(Json(builders))
}
async fn get_builder(
State(state): State<AppState>,
Path(id): Path<Uuid>,
State(state): State<AppState>,
Path(id): Path<Uuid>,
) -> Result<Json<RemoteBuilder>, ApiError> {
let builder = fc_common::repo::remote_builders::get(&state.pool, id)
.await
.map_err(ApiError)?;
Ok(Json(builder))
let builder = fc_common::repo::remote_builders::get(&state.pool, id)
.await
.map_err(ApiError)?;
Ok(Json(builder))
}
async fn create_builder(
_auth: RequireAdmin,
State(state): State<AppState>,
Json(input): Json<CreateRemoteBuilder>,
_auth: RequireAdmin,
State(state): State<AppState>,
Json(input): Json<CreateRemoteBuilder>,
) -> Result<Json<RemoteBuilder>, ApiError> {
input
.validate()
.map_err(|msg| ApiError(fc_common::CiError::Validation(msg)))?;
let builder = fc_common::repo::remote_builders::create(&state.pool, input)
.await
.map_err(ApiError)?;
Ok(Json(builder))
input
.validate()
.map_err(|msg| ApiError(fc_common::CiError::Validation(msg)))?;
let builder = fc_common::repo::remote_builders::create(&state.pool, input)
.await
.map_err(ApiError)?;
Ok(Json(builder))
}
async fn update_builder(
_auth: RequireAdmin,
State(state): State<AppState>,
Path(id): Path<Uuid>,
Json(input): Json<UpdateRemoteBuilder>,
_auth: RequireAdmin,
State(state): State<AppState>,
Path(id): Path<Uuid>,
Json(input): Json<UpdateRemoteBuilder>,
) -> Result<Json<RemoteBuilder>, ApiError> {
input
.validate()
.map_err(|msg| ApiError(fc_common::CiError::Validation(msg)))?;
let builder = fc_common::repo::remote_builders::update(&state.pool, id, input)
.await
.map_err(ApiError)?;
Ok(Json(builder))
input
.validate()
.map_err(|msg| ApiError(fc_common::CiError::Validation(msg)))?;
let builder =
fc_common::repo::remote_builders::update(&state.pool, id, input)
.await
.map_err(ApiError)?;
Ok(Json(builder))
}
async fn delete_builder(
_auth: RequireAdmin,
State(state): State<AppState>,
Path(id): Path<Uuid>,
_auth: RequireAdmin,
State(state): State<AppState>,
Path(id): Path<Uuid>,
) -> Result<Json<serde_json::Value>, ApiError> {
fc_common::repo::remote_builders::delete(&state.pool, id)
.await
.map_err(ApiError)?;
Ok(Json(serde_json::json!({"deleted": true})))
fc_common::repo::remote_builders::delete(&state.pool, id)
.await
.map_err(ApiError)?;
Ok(Json(serde_json::json!({"deleted": true})))
}
async fn system_status(
_auth: RequireAdmin,
State(state): State<AppState>,
_auth: RequireAdmin,
State(state): State<AppState>,
) -> Result<Json<SystemStatus>, ApiError> {
let pool = &state.pool;
let pool = &state.pool;
let projects: (i64,) = sqlx::query_as("SELECT COUNT(*) FROM projects")
.fetch_one(pool)
.await
.map_err(|e| ApiError(fc_common::CiError::Database(e)))?;
let jobsets: (i64,) = sqlx::query_as("SELECT COUNT(*) FROM jobsets")
.fetch_one(pool)
.await
.map_err(|e| ApiError(fc_common::CiError::Database(e)))?;
let evaluations: (i64,) = sqlx::query_as("SELECT COUNT(*) FROM evaluations")
.fetch_one(pool)
.await
.map_err(|e| ApiError(fc_common::CiError::Database(e)))?;
let projects: (i64,) = sqlx::query_as("SELECT COUNT(*) FROM projects")
.fetch_one(pool)
.await
.map_err(|e| ApiError(fc_common::CiError::Database(e)))?;
let jobsets: (i64,) = sqlx::query_as("SELECT COUNT(*) FROM jobsets")
.fetch_one(pool)
.await
.map_err(|e| ApiError(fc_common::CiError::Database(e)))?;
let evaluations: (i64,) = sqlx::query_as("SELECT COUNT(*) FROM evaluations")
.fetch_one(pool)
.await
.map_err(|e| ApiError(fc_common::CiError::Database(e)))?;
let stats = fc_common::repo::builds::get_stats(pool)
.await
.map_err(ApiError)?;
let builders = fc_common::repo::remote_builders::count(pool)
.await
.map_err(ApiError)?;
let stats = fc_common::repo::builds::get_stats(pool)
.await
.map_err(ApiError)?;
let builders = fc_common::repo::remote_builders::count(pool)
.await
.map_err(ApiError)?;
let channels: (i64,) = sqlx::query_as("SELECT COUNT(*) FROM channels")
.fetch_one(pool)
.await
.map_err(|e| ApiError(fc_common::CiError::Database(e)))?;
let channels: (i64,) = sqlx::query_as("SELECT COUNT(*) FROM channels")
.fetch_one(pool)
.await
.map_err(|e| ApiError(fc_common::CiError::Database(e)))?;
Ok(Json(SystemStatus {
projects_count: projects.0,
jobsets_count: jobsets.0,
evaluations_count: evaluations.0,
builds_pending: stats.pending_builds.unwrap_or(0),
builds_running: stats.running_builds.unwrap_or(0),
builds_completed: stats.completed_builds.unwrap_or(0),
builds_failed: stats.failed_builds.unwrap_or(0),
remote_builders: builders,
channels_count: channels.0,
}))
Ok(Json(SystemStatus {
projects_count: projects.0,
jobsets_count: jobsets.0,
evaluations_count: evaluations.0,
builds_pending: stats.pending_builds.unwrap_or(0),
builds_running: stats.running_builds.unwrap_or(0),
builds_completed: stats.completed_builds.unwrap_or(0),
builds_failed: stats.failed_builds.unwrap_or(0),
remote_builders: builders,
channels_count: channels.0,
}))
}
pub fn router() -> Router<AppState> {
Router::new()
.route("/admin/builders", get(list_builders).post(create_builder))
.route(
"/admin/builders/{id}",
get(get_builder).put(update_builder).delete(delete_builder),
)
.route("/admin/system", get(system_status))
Router::new()
.route("/admin/builders", get(list_builders).post(create_builder))
.route(
"/admin/builders/{id}",
get(get_builder).put(update_builder).delete(delete_builder),
)
.route("/admin/system", get(system_status))
}

View file

@ -4,95 +4,96 @@ use serde::{Deserialize, Serialize};
use sha2::{Digest, Sha256};
use uuid::Uuid;
use crate::auth_middleware::RequireAdmin;
use crate::error::ApiError;
use crate::state::AppState;
use crate::{auth_middleware::RequireAdmin, error::ApiError, state::AppState};
#[derive(Debug, Deserialize)]
pub struct CreateApiKeyRequest {
pub name: String,
pub role: Option<String>,
pub name: String,
pub role: Option<String>,
}
#[derive(Debug, Serialize)]
pub struct CreateApiKeyResponse {
pub id: Uuid,
pub name: String,
pub key: String,
pub role: String,
pub id: Uuid,
pub name: String,
pub key: String,
pub role: String,
}
#[derive(Debug, Serialize)]
pub struct ApiKeyInfo {
pub id: Uuid,
pub name: String,
pub role: String,
pub created_at: chrono::DateTime<chrono::Utc>,
pub last_used_at: Option<chrono::DateTime<chrono::Utc>>,
pub id: Uuid,
pub name: String,
pub role: String,
pub created_at: chrono::DateTime<chrono::Utc>,
pub last_used_at: Option<chrono::DateTime<chrono::Utc>>,
}
pub fn hash_api_key(key: &str) -> String {
let mut hasher = Sha256::new();
hasher.update(key.as_bytes());
hex::encode(hasher.finalize())
let mut hasher = Sha256::new();
hasher.update(key.as_bytes());
hex::encode(hasher.finalize())
}
async fn create_api_key(
_auth: RequireAdmin,
State(state): State<AppState>,
Json(input): Json<CreateApiKeyRequest>,
_auth: RequireAdmin,
State(state): State<AppState>,
Json(input): Json<CreateApiKeyRequest>,
) -> Result<Json<CreateApiKeyResponse>, ApiError> {
let role = input.role.unwrap_or_else(|| "read-only".to_string());
let role = input.role.unwrap_or_else(|| "read-only".to_string());
// Generate a random API key
let key = format!("fc_{}", Uuid::new_v4().to_string().replace('-', ""));
let key_hash = hash_api_key(&key);
// Generate a random API key
let key = format!("fc_{}", Uuid::new_v4().to_string().replace('-', ""));
let key_hash = hash_api_key(&key);
let api_key = repo::api_keys::create(&state.pool, &input.name, &key_hash, &role)
.await
.map_err(ApiError)?;
let api_key =
repo::api_keys::create(&state.pool, &input.name, &key_hash, &role)
.await
.map_err(ApiError)?;
Ok(Json(CreateApiKeyResponse {
id: api_key.id,
name: api_key.name,
key, // Only returned once at creation time
role: api_key.role,
}))
Ok(Json(CreateApiKeyResponse {
id: api_key.id,
name: api_key.name,
key, // Only returned once at creation time
role: api_key.role,
}))
}
async fn list_api_keys(
_auth: RequireAdmin,
State(state): State<AppState>,
_auth: RequireAdmin,
State(state): State<AppState>,
) -> Result<Json<Vec<ApiKeyInfo>>, ApiError> {
let keys = repo::api_keys::list(&state.pool).await.map_err(ApiError)?;
let keys = repo::api_keys::list(&state.pool).await.map_err(ApiError)?;
let infos: Vec<ApiKeyInfo> = keys
.into_iter()
.map(|k| ApiKeyInfo {
id: k.id,
name: k.name,
role: k.role,
created_at: k.created_at,
last_used_at: k.last_used_at,
})
.collect();
let infos: Vec<ApiKeyInfo> = keys
.into_iter()
.map(|k| {
ApiKeyInfo {
id: k.id,
name: k.name,
role: k.role,
created_at: k.created_at,
last_used_at: k.last_used_at,
}
})
.collect();
Ok(Json(infos))
Ok(Json(infos))
}
async fn delete_api_key(
_auth: RequireAdmin,
State(state): State<AppState>,
axum::extract::Path(id): axum::extract::Path<Uuid>,
_auth: RequireAdmin,
State(state): State<AppState>,
axum::extract::Path(id): axum::extract::Path<Uuid>,
) -> Result<Json<serde_json::Value>, ApiError> {
repo::api_keys::delete(&state.pool, id)
.await
.map_err(ApiError)?;
Ok(Json(serde_json::json!({ "deleted": true })))
repo::api_keys::delete(&state.pool, id)
.await
.map_err(ApiError)?;
Ok(Json(serde_json::json!({ "deleted": true })))
}
pub fn router() -> Router<AppState> {
Router::new()
.route("/api-keys", get(list_api_keys).post(create_api_key))
.route("/api-keys/{id}", axum::routing::delete(delete_api_key))
Router::new()
.route("/api-keys", get(list_api_keys).post(create_api_key))
.route("/api-keys/{id}", axum::routing::delete(delete_api_key))
}

View file

@ -1,171 +1,202 @@
use axum::{
Router,
extract::{Path, State},
http::StatusCode,
response::{IntoResponse, Response},
routing::get,
Router,
extract::{Path, State},
http::StatusCode,
response::{IntoResponse, Response},
routing::get,
};
use crate::error::ApiError;
use crate::state::AppState;
use crate::{error::ApiError, state::AppState};
async fn build_badge(
State(state): State<AppState>,
Path((project_name, jobset_name, job_name)): Path<(String, String, String)>,
State(state): State<AppState>,
Path((project_name, jobset_name, job_name)): Path<(String, String, String)>,
) -> Result<Response, ApiError> {
// Find the project
let project = fc_common::repo::projects::get_by_name(&state.pool, &project_name)
.await
.map_err(ApiError)?;
// Find the project
let project =
fc_common::repo::projects::get_by_name(&state.pool, &project_name)
.await
.map_err(ApiError)?;
// Find the jobset
let jobsets = fc_common::repo::jobsets::list_for_project(&state.pool, project.id, 1000, 0)
.await
.map_err(ApiError)?;
// Find the jobset
let jobsets = fc_common::repo::jobsets::list_for_project(
&state.pool,
project.id,
1000,
0,
)
.await
.map_err(ApiError)?;
let jobset = jobsets.iter().find(|j| j.name == jobset_name);
let jobset = match jobset {
Some(j) => j,
None => {
return Ok(shield_svg("build", "not found", "#9f9f9f").into_response());
}
};
let jobset = jobsets.iter().find(|j| j.name == jobset_name);
let jobset = match jobset {
Some(j) => j,
None => {
return Ok(shield_svg("build", "not found", "#9f9f9f").into_response());
},
};
// Get latest evaluation
let eval = fc_common::repo::evaluations::get_latest(&state.pool, jobset.id)
.await
.map_err(ApiError)?;
// Get latest evaluation
let eval = fc_common::repo::evaluations::get_latest(&state.pool, jobset.id)
.await
.map_err(ApiError)?;
let eval = match eval {
Some(e) => e,
None => {
return Ok(shield_svg("build", "no evaluations", "#9f9f9f").into_response());
}
};
let eval = match eval {
Some(e) => e,
None => {
return Ok(
shield_svg("build", "no evaluations", "#9f9f9f").into_response(),
);
},
};
// Find the build for this job
let builds = fc_common::repo::builds::list_for_evaluation(&state.pool, eval.id)
.await
.map_err(ApiError)?;
// Find the build for this job
let builds =
fc_common::repo::builds::list_for_evaluation(&state.pool, eval.id)
.await
.map_err(ApiError)?;
let build = builds.iter().find(|b| b.job_name == job_name);
let build = builds.iter().find(|b| b.job_name == job_name);
let (label, color) = match build {
Some(b) => match b.status {
fc_common::BuildStatus::Completed => ("passing", "#4c1"),
fc_common::BuildStatus::Failed => ("failing", "#e05d44"),
fc_common::BuildStatus::Running => ("building", "#dfb317"),
fc_common::BuildStatus::Pending => ("queued", "#dfb317"),
fc_common::BuildStatus::Cancelled => ("cancelled", "#9f9f9f"),
},
None => ("not found", "#9f9f9f"),
};
let (label, color) = match build {
Some(b) => {
match b.status {
fc_common::BuildStatus::Completed => ("passing", "#4c1"),
fc_common::BuildStatus::Failed => ("failing", "#e05d44"),
fc_common::BuildStatus::Running => ("building", "#dfb317"),
fc_common::BuildStatus::Pending => ("queued", "#dfb317"),
fc_common::BuildStatus::Cancelled => ("cancelled", "#9f9f9f"),
}
},
None => ("not found", "#9f9f9f"),
};
Ok((
StatusCode::OK,
[
("content-type", "image/svg+xml"),
("cache-control", "no-cache, no-store, must-revalidate"),
],
shield_svg("build", label, color),
Ok(
(
StatusCode::OK,
[
("content-type", "image/svg+xml"),
("cache-control", "no-cache, no-store, must-revalidate"),
],
shield_svg("build", label, color),
)
.into_response())
.into_response(),
)
}
/// Latest successful build redirect
async fn latest_build(
State(state): State<AppState>,
Path((project_name, jobset_name, job_name)): Path<(String, String, String)>,
State(state): State<AppState>,
Path((project_name, jobset_name, job_name)): Path<(String, String, String)>,
) -> Result<Response, ApiError> {
let project = fc_common::repo::projects::get_by_name(&state.pool, &project_name)
.await
.map_err(ApiError)?;
let project =
fc_common::repo::projects::get_by_name(&state.pool, &project_name)
.await
.map_err(ApiError)?;
let jobsets = fc_common::repo::jobsets::list_for_project(&state.pool, project.id, 1000, 0)
.await
.map_err(ApiError)?;
let jobsets = fc_common::repo::jobsets::list_for_project(
&state.pool,
project.id,
1000,
0,
)
.await
.map_err(ApiError)?;
let jobset = jobsets.iter().find(|j| j.name == jobset_name);
let jobset = match jobset {
Some(j) => j,
None => {
return Ok((StatusCode::NOT_FOUND, "Jobset not found").into_response());
}
};
let jobset = jobsets.iter().find(|j| j.name == jobset_name);
let jobset = match jobset {
Some(j) => j,
None => {
return Ok((StatusCode::NOT_FOUND, "Jobset not found").into_response());
},
};
let eval = fc_common::repo::evaluations::get_latest(&state.pool, jobset.id)
.await
.map_err(ApiError)?;
let eval = fc_common::repo::evaluations::get_latest(&state.pool, jobset.id)
.await
.map_err(ApiError)?;
let eval = match eval {
Some(e) => e,
None => {
return Ok((StatusCode::NOT_FOUND, "No evaluations found").into_response());
}
};
let eval = match eval {
Some(e) => e,
None => {
return Ok(
(StatusCode::NOT_FOUND, "No evaluations found").into_response(),
);
},
};
let builds = fc_common::repo::builds::list_for_evaluation(&state.pool, eval.id)
.await
.map_err(ApiError)?;
let builds =
fc_common::repo::builds::list_for_evaluation(&state.pool, eval.id)
.await
.map_err(ApiError)?;
let build = builds.iter().find(|b| b.job_name == job_name);
match build {
Some(b) => Ok(axum::Json(b.clone()).into_response()),
None => Ok((StatusCode::NOT_FOUND, "Build not found").into_response()),
}
let build = builds.iter().find(|b| b.job_name == job_name);
match build {
Some(b) => Ok(axum::Json(b.clone()).into_response()),
None => Ok((StatusCode::NOT_FOUND, "Build not found").into_response()),
}
}
fn shield_svg(subject: &str, status: &str, color: &str) -> String {
let subject_width = subject.len() * 7 + 10;
let status_width = status.len() * 7 + 10;
let total_width = subject_width + status_width;
let subject_x = subject_width / 2;
let status_x = subject_width + status_width / 2;
let subject_width = subject.len() * 7 + 10;
let status_width = status.len() * 7 + 10;
let total_width = subject_width + status_width;
let subject_x = subject_width / 2;
let status_x = subject_width + status_width / 2;
let mut svg = String::new();
svg.push_str(&format!(
"<svg xmlns=\"http://www.w3.org/2000/svg\" width=\"{total_width}\" height=\"20\">\n"
));
svg.push_str(" <linearGradient id=\"b\" x2=\"0\" y2=\"100%\">\n");
svg.push_str(" <stop offset=\"0\" stop-color=\"#bbb\" stop-opacity=\".1\"/>\n");
svg.push_str(" <stop offset=\"1\" stop-opacity=\".1\"/>\n");
svg.push_str(" </linearGradient>\n");
svg.push_str(" <mask id=\"a\">\n");
svg.push_str(&format!(
" <rect width=\"{total_width}\" height=\"20\" rx=\"3\" fill=\"#fff\"/>\n"
));
svg.push_str(" </mask>\n");
svg.push_str(" <g mask=\"url(#a)\">\n");
svg.push_str(&format!(
" <rect width=\"{subject_width}\" height=\"20\" fill=\"#555\"/>\n"
));
svg.push_str(&format!(
" <rect x=\"{subject_width}\" width=\"{status_width}\" height=\"20\" fill=\"{color}\"/>\n"
));
svg.push_str(&format!(
" <rect width=\"{total_width}\" height=\"20\" fill=\"url(#b)\"/>\n"
));
svg.push_str(" </g>\n");
svg.push_str(" <g fill=\"#fff\" text-anchor=\"middle\" font-family=\"DejaVu Sans,Verdana,Geneva,sans-serif\" font-size=\"11\">\n");
svg.push_str(&format!(
" <text x=\"{subject_x}\" y=\"15\" fill=\"#010101\" fill-opacity=\".3\">{subject}</text>\n"
));
svg.push_str(&format!(
" <text x=\"{subject_x}\" y=\"14\">{subject}</text>\n"
));
svg.push_str(&format!(
" <text x=\"{status_x}\" y=\"15\" fill=\"#010101\" fill-opacity=\".3\">{status}</text>\n"
));
svg.push_str(&format!(
" <text x=\"{status_x}\" y=\"14\">{status}</text>\n"
));
svg.push_str(" </g>\n");
svg.push_str("</svg>");
svg
let mut svg = String::new();
svg.push_str(&format!(
"<svg xmlns=\"http://www.w3.org/2000/svg\" width=\"{total_width}\" \
height=\"20\">\n"
));
svg.push_str(" <linearGradient id=\"b\" x2=\"0\" y2=\"100%\">\n");
svg.push_str(
" <stop offset=\"0\" stop-color=\"#bbb\" stop-opacity=\".1\"/>\n",
);
svg.push_str(" <stop offset=\"1\" stop-opacity=\".1\"/>\n");
svg.push_str(" </linearGradient>\n");
svg.push_str(" <mask id=\"a\">\n");
svg.push_str(&format!(
" <rect width=\"{total_width}\" height=\"20\" rx=\"3\" \
fill=\"#fff\"/>\n"
));
svg.push_str(" </mask>\n");
svg.push_str(" <g mask=\"url(#a)\">\n");
svg.push_str(&format!(
" <rect width=\"{subject_width}\" height=\"20\" fill=\"#555\"/>\n"
));
svg.push_str(&format!(
" <rect x=\"{subject_width}\" width=\"{status_width}\" height=\"20\" \
fill=\"{color}\"/>\n"
));
svg.push_str(&format!(
" <rect width=\"{total_width}\" height=\"20\" fill=\"url(#b)\"/>\n"
));
svg.push_str(" </g>\n");
svg.push_str(
" <g fill=\"#fff\" text-anchor=\"middle\" font-family=\"DejaVu \
Sans,Verdana,Geneva,sans-serif\" font-size=\"11\">\n",
);
svg.push_str(&format!(
" <text x=\"{subject_x}\" y=\"15\" fill=\"#010101\" \
fill-opacity=\".3\">{subject}</text>\n"
));
svg.push_str(&format!(
" <text x=\"{subject_x}\" y=\"14\">{subject}</text>\n"
));
svg.push_str(&format!(
" <text x=\"{status_x}\" y=\"15\" fill=\"#010101\" \
fill-opacity=\".3\">{status}</text>\n"
));
svg.push_str(&format!(
" <text x=\"{status_x}\" y=\"14\">{status}</text>\n"
));
svg.push_str(" </g>\n");
svg.push_str("</svg>");
svg
}
pub fn router() -> Router<AppState> {
Router::new()
.route("/job/{project}/{jobset}/{job}/shield", get(build_badge))
.route("/job/{project}/{jobset}/{job}/latest", get(latest_build))
Router::new()
.route("/job/{project}/{jobset}/{job}/shield", get(build_badge))
.route("/job/{project}/{jobset}/{job}/latest", get(latest_build))
}

View file

@ -1,305 +1,321 @@
use axum::{
Json, Router,
body::Body,
extract::{Path, Query, State},
http::{Extensions, StatusCode},
response::{IntoResponse, Response},
routing::{get, post},
Json,
Router,
body::Body,
extract::{Path, Query, State},
http::{Extensions, StatusCode},
response::{IntoResponse, Response},
routing::{get, post},
};
use fc_common::{
Build,
BuildProduct,
BuildStep,
PaginatedResponse,
PaginationParams,
};
use fc_common::{Build, BuildProduct, BuildStep, PaginatedResponse, PaginationParams};
use serde::Deserialize;
use uuid::Uuid;
use crate::auth_middleware::RequireRoles;
use crate::error::ApiError;
use crate::state::AppState;
use crate::{auth_middleware::RequireRoles, error::ApiError, state::AppState};
fn check_role(extensions: &Extensions, allowed: &[&str]) -> Result<(), ApiError> {
RequireRoles::check(extensions, allowed)
.map(|_| ())
.map_err(|s| {
ApiError(if s == StatusCode::FORBIDDEN {
fc_common::CiError::Forbidden("Insufficient permissions".to_string())
} else {
fc_common::CiError::Unauthorized("Authentication required".to_string())
})
})
fn check_role(
extensions: &Extensions,
allowed: &[&str],
) -> Result<(), ApiError> {
RequireRoles::check(extensions, allowed)
.map(|_| ())
.map_err(|s| {
ApiError(if s == StatusCode::FORBIDDEN {
fc_common::CiError::Forbidden("Insufficient permissions".to_string())
} else {
fc_common::CiError::Unauthorized("Authentication required".to_string())
})
})
}
#[derive(Debug, Deserialize)]
struct ListBuildsParams {
evaluation_id: Option<Uuid>,
status: Option<String>,
system: Option<String>,
job_name: Option<String>,
limit: Option<i64>,
offset: Option<i64>,
evaluation_id: Option<Uuid>,
status: Option<String>,
system: Option<String>,
job_name: Option<String>,
limit: Option<i64>,
offset: Option<i64>,
}
async fn list_builds(
State(state): State<AppState>,
Query(params): Query<ListBuildsParams>,
State(state): State<AppState>,
Query(params): Query<ListBuildsParams>,
) -> Result<Json<PaginatedResponse<Build>>, ApiError> {
let pagination = PaginationParams {
limit: params.limit,
offset: params.offset,
};
let limit = pagination.limit();
let offset = pagination.offset();
let items = fc_common::repo::builds::list_filtered(
&state.pool,
params.evaluation_id,
params.status.as_deref(),
params.system.as_deref(),
params.job_name.as_deref(),
limit,
offset,
)
.await
.map_err(ApiError)?;
let total = fc_common::repo::builds::count_filtered(
&state.pool,
params.evaluation_id,
params.status.as_deref(),
params.system.as_deref(),
params.job_name.as_deref(),
)
.await
.map_err(ApiError)?;
Ok(Json(PaginatedResponse {
items,
total,
limit,
offset,
}))
let pagination = PaginationParams {
limit: params.limit,
offset: params.offset,
};
let limit = pagination.limit();
let offset = pagination.offset();
let items = fc_common::repo::builds::list_filtered(
&state.pool,
params.evaluation_id,
params.status.as_deref(),
params.system.as_deref(),
params.job_name.as_deref(),
limit,
offset,
)
.await
.map_err(ApiError)?;
let total = fc_common::repo::builds::count_filtered(
&state.pool,
params.evaluation_id,
params.status.as_deref(),
params.system.as_deref(),
params.job_name.as_deref(),
)
.await
.map_err(ApiError)?;
Ok(Json(PaginatedResponse {
items,
total,
limit,
offset,
}))
}
async fn get_build(
State(state): State<AppState>,
Path(id): Path<Uuid>,
State(state): State<AppState>,
Path(id): Path<Uuid>,
) -> Result<Json<Build>, ApiError> {
let build = fc_common::repo::builds::get(&state.pool, id)
.await
.map_err(ApiError)?;
Ok(Json(build))
let build = fc_common::repo::builds::get(&state.pool, id)
.await
.map_err(ApiError)?;
Ok(Json(build))
}
async fn cancel_build(
extensions: Extensions,
State(state): State<AppState>,
Path(id): Path<Uuid>,
extensions: Extensions,
State(state): State<AppState>,
Path(id): Path<Uuid>,
) -> Result<Json<Vec<Build>>, ApiError> {
check_role(&extensions, &["cancel-build"])?;
let cancelled = fc_common::repo::builds::cancel_cascade(&state.pool, id)
.await
.map_err(ApiError)?;
if cancelled.is_empty() {
return Err(ApiError(fc_common::CiError::NotFound(
"Build not found or not in a cancellable state".to_string(),
)));
}
Ok(Json(cancelled))
check_role(&extensions, &["cancel-build"])?;
let cancelled = fc_common::repo::builds::cancel_cascade(&state.pool, id)
.await
.map_err(ApiError)?;
if cancelled.is_empty() {
return Err(ApiError(fc_common::CiError::NotFound(
"Build not found or not in a cancellable state".to_string(),
)));
}
Ok(Json(cancelled))
}
async fn list_build_steps(
State(state): State<AppState>,
Path(id): Path<Uuid>,
State(state): State<AppState>,
Path(id): Path<Uuid>,
) -> Result<Json<Vec<BuildStep>>, ApiError> {
let steps = fc_common::repo::build_steps::list_for_build(&state.pool, id)
.await
.map_err(ApiError)?;
Ok(Json(steps))
let steps = fc_common::repo::build_steps::list_for_build(&state.pool, id)
.await
.map_err(ApiError)?;
Ok(Json(steps))
}
async fn list_build_products(
State(state): State<AppState>,
Path(id): Path<Uuid>,
State(state): State<AppState>,
Path(id): Path<Uuid>,
) -> Result<Json<Vec<BuildProduct>>, ApiError> {
let products = fc_common::repo::build_products::list_for_build(&state.pool, id)
.await
.map_err(ApiError)?;
Ok(Json(products))
let products =
fc_common::repo::build_products::list_for_build(&state.pool, id)
.await
.map_err(ApiError)?;
Ok(Json(products))
}
async fn build_stats(
State(state): State<AppState>,
State(state): State<AppState>,
) -> Result<Json<fc_common::BuildStats>, ApiError> {
let stats = fc_common::repo::builds::get_stats(&state.pool)
.await
.map_err(ApiError)?;
Ok(Json(stats))
let stats = fc_common::repo::builds::get_stats(&state.pool)
.await
.map_err(ApiError)?;
Ok(Json(stats))
}
async fn recent_builds(State(state): State<AppState>) -> Result<Json<Vec<Build>>, ApiError> {
let builds = fc_common::repo::builds::list_recent(&state.pool, 20)
.await
.map_err(ApiError)?;
Ok(Json(builds))
async fn recent_builds(
State(state): State<AppState>,
) -> Result<Json<Vec<Build>>, ApiError> {
let builds = fc_common::repo::builds::list_recent(&state.pool, 20)
.await
.map_err(ApiError)?;
Ok(Json(builds))
}
async fn list_project_builds(
State(state): State<AppState>,
Path(id): Path<Uuid>,
State(state): State<AppState>,
Path(id): Path<Uuid>,
) -> Result<Json<Vec<Build>>, ApiError> {
let builds = fc_common::repo::builds::list_for_project(&state.pool, id)
.await
.map_err(ApiError)?;
Ok(Json(builds))
let builds = fc_common::repo::builds::list_for_project(&state.pool, id)
.await
.map_err(ApiError)?;
Ok(Json(builds))
}
async fn restart_build(
extensions: Extensions,
State(state): State<AppState>,
Path(id): Path<Uuid>,
extensions: Extensions,
State(state): State<AppState>,
Path(id): Path<Uuid>,
) -> Result<Json<Build>, ApiError> {
check_role(&extensions, &["restart-jobs"])?;
let build = fc_common::repo::builds::restart(&state.pool, id)
.await
.map_err(ApiError)?;
check_role(&extensions, &["restart-jobs"])?;
let build = fc_common::repo::builds::restart(&state.pool, id)
.await
.map_err(ApiError)?;
tracing::info!(
build_id = %id,
job = %build.job_name,
"Build restarted"
);
tracing::info!(
build_id = %id,
job = %build.job_name,
"Build restarted"
);
Ok(Json(build))
Ok(Json(build))
}
async fn bump_build(
extensions: Extensions,
State(state): State<AppState>,
Path(id): Path<Uuid>,
extensions: Extensions,
State(state): State<AppState>,
Path(id): Path<Uuid>,
) -> Result<Json<Build>, ApiError> {
check_role(&extensions, &["bump-to-front"])?;
let build = sqlx::query_as::<_, Build>(
"UPDATE builds SET priority = priority + 10 WHERE id = $1 AND status = 'pending' RETURNING *",
)
.bind(id)
.fetch_optional(&state.pool)
.await
.map_err(|e| ApiError(fc_common::CiError::Database(e)))?
.ok_or_else(|| {
ApiError(fc_common::CiError::Validation(
"Build not found or not in pending state".to_string(),
))
})?;
check_role(&extensions, &["bump-to-front"])?;
let build = sqlx::query_as::<_, Build>(
"UPDATE builds SET priority = priority + 10 WHERE id = $1 AND status = \
'pending' RETURNING *",
)
.bind(id)
.fetch_optional(&state.pool)
.await
.map_err(|e| ApiError(fc_common::CiError::Database(e)))?
.ok_or_else(|| {
ApiError(fc_common::CiError::Validation(
"Build not found or not in pending state".to_string(),
))
})?;
Ok(Json(build))
Ok(Json(build))
}
async fn download_build_product(
State(state): State<AppState>,
Path((build_id, product_id)): Path<(Uuid, Uuid)>,
State(state): State<AppState>,
Path((build_id, product_id)): Path<(Uuid, Uuid)>,
) -> Result<Response, ApiError> {
// Verify build exists
let _build = fc_common::repo::builds::get(&state.pool, build_id)
.await
.map_err(ApiError)?;
// Verify build exists
let _build = fc_common::repo::builds::get(&state.pool, build_id)
.await
.map_err(ApiError)?;
let product = fc_common::repo::build_products::get(&state.pool, product_id)
.await
.map_err(ApiError)?;
let product = fc_common::repo::build_products::get(&state.pool, product_id)
.await
.map_err(ApiError)?;
if product.build_id != build_id {
return Err(ApiError(fc_common::CiError::NotFound(
"Product does not belong to this build".to_string(),
if product.build_id != build_id {
return Err(ApiError(fc_common::CiError::NotFound(
"Product does not belong to this build".to_string(),
)));
}
if !fc_common::validate::is_valid_store_path(&product.path) {
return Err(ApiError(fc_common::CiError::Validation(
"Invalid store path".to_string(),
)));
}
if product.is_directory {
// Stream as NAR using nix store dump-path
let child = tokio::process::Command::new("nix")
.args(["store", "dump-path", &product.path])
.stdout(std::process::Stdio::piped())
.stderr(std::process::Stdio::null())
.spawn();
let mut child = match child {
Ok(c) => c,
Err(e) => {
return Err(ApiError(fc_common::CiError::Build(format!(
"Failed to dump path: {e}"
))));
},
};
let stdout = match child.stdout.take() {
Some(s) => s,
None => {
return Err(ApiError(fc_common::CiError::Build(
"Failed to capture output".to_string(),
)));
}
},
};
if !fc_common::validate::is_valid_store_path(&product.path) {
return Err(ApiError(fc_common::CiError::Validation(
"Invalid store path".to_string(),
)));
}
let stream = tokio_util::io::ReaderStream::new(stdout);
let body = Body::from_stream(stream);
if product.is_directory {
// Stream as NAR using nix store dump-path
let child = tokio::process::Command::new("nix")
.args(["store", "dump-path", &product.path])
.stdout(std::process::Stdio::piped())
.stderr(std::process::Stdio::null())
.spawn();
let filename = product.path.rsplit('/').next().unwrap_or(&product.name);
let mut child = match child {
Ok(c) => c,
Err(e) => {
return Err(ApiError(fc_common::CiError::Build(format!(
"Failed to dump path: {e}"
))));
}
};
Ok(
(
StatusCode::OK,
[
("content-type", "application/x-nix-nar"),
(
"content-disposition",
&format!("attachment; filename=\"{filename}.nar\""),
),
],
body,
)
.into_response(),
)
} else {
// Serve file directly
let file = tokio::fs::File::open(&product.path)
.await
.map_err(|e| ApiError(fc_common::CiError::Io(e)))?;
let stdout = match child.stdout.take() {
Some(s) => s,
None => {
return Err(ApiError(fc_common::CiError::Build(
"Failed to capture output".to_string(),
)));
}
};
let stream = tokio_util::io::ReaderStream::new(file);
let body = Body::from_stream(stream);
let stream = tokio_util::io::ReaderStream::new(stdout);
let body = Body::from_stream(stream);
let content_type = product
.content_type
.as_deref()
.unwrap_or("application/octet-stream");
let filename = product.path.rsplit('/').next().unwrap_or(&product.name);
let filename = product.path.rsplit('/').next().unwrap_or(&product.name);
Ok((
StatusCode::OK,
[
("content-type", "application/x-nix-nar"),
(
"content-disposition",
&format!("attachment; filename=\"{filename}.nar\""),
),
],
body,
)
.into_response())
} else {
// Serve file directly
let file = tokio::fs::File::open(&product.path)
.await
.map_err(|e| ApiError(fc_common::CiError::Io(e)))?;
let stream = tokio_util::io::ReaderStream::new(file);
let body = Body::from_stream(stream);
let content_type = product
.content_type
.as_deref()
.unwrap_or("application/octet-stream");
let filename = product.path.rsplit('/').next().unwrap_or(&product.name);
Ok((
StatusCode::OK,
[
("content-type", content_type),
(
"content-disposition",
&format!("attachment; filename=\"{filename}\""),
),
],
body,
)
.into_response())
}
Ok(
(
StatusCode::OK,
[
("content-type", content_type),
(
"content-disposition",
&format!("attachment; filename=\"{filename}\""),
),
],
body,
)
.into_response(),
)
}
}
pub fn router() -> Router<AppState> {
Router::new()
.route("/builds", get(list_builds))
.route("/builds/stats", get(build_stats))
.route("/builds/recent", get(recent_builds))
.route("/builds/{id}", get(get_build))
.route("/builds/{id}/cancel", post(cancel_build))
.route("/builds/{id}/restart", post(restart_build))
.route("/builds/{id}/bump", post(bump_build))
.route("/builds/{id}/steps", get(list_build_steps))
.route("/builds/{id}/products", get(list_build_products))
.route(
"/builds/{build_id}/products/{product_id}/download",
get(download_build_product),
)
.route("/projects/{id}/builds", get(list_project_builds))
Router::new()
.route("/builds", get(list_builds))
.route("/builds/stats", get(build_stats))
.route("/builds/recent", get(recent_builds))
.route("/builds/{id}", get(get_build))
.route("/builds/{id}/cancel", post(cancel_build))
.route("/builds/{id}/restart", post(restart_build))
.route("/builds/{id}/bump", post(bump_build))
.route("/builds/{id}/steps", get(list_build_steps))
.route("/builds/{id}/products", get(list_build_products))
.route(
"/builds/{build_id}/products/{product_id}/download",
get(download_build_product),
)
.route("/projects/{id}/builds", get(list_project_builds))
}

View file

@ -1,365 +1,369 @@
use axum::{
Router,
body::Body,
extract::{Path, State},
http::StatusCode,
response::{IntoResponse, Response},
routing::get,
Router,
body::Body,
extract::{Path, State},
http::StatusCode,
response::{IntoResponse, Response},
routing::get,
};
use tokio::process::Command;
use crate::error::ApiError;
use crate::state::AppState;
use crate::{error::ApiError, state::AppState};
/// Serve NARInfo for a store path hash.
/// GET /nix-cache/{hash}.narinfo
async fn narinfo(
State(state): State<AppState>,
Path(hash): Path<String>,
State(state): State<AppState>,
Path(hash): Path<String>,
) -> Result<Response, ApiError> {
if !state.config.cache.enabled {
return Ok(StatusCode::NOT_FOUND.into_response());
}
if !state.config.cache.enabled {
return Ok(StatusCode::NOT_FOUND.into_response());
}
// Strip .narinfo suffix if present
let hash = hash.strip_suffix(".narinfo").unwrap_or(&hash);
// Strip .narinfo suffix if present
let hash = hash.strip_suffix(".narinfo").unwrap_or(&hash);
if !fc_common::validate::is_valid_nix_hash(hash) {
return Ok(StatusCode::NOT_FOUND.into_response());
}
if !fc_common::validate::is_valid_nix_hash(hash) {
return Ok(StatusCode::NOT_FOUND.into_response());
}
// Look up the store path from build_products by matching the hash prefix
let product = sqlx::query_as::<_, fc_common::models::BuildProduct>(
"SELECT * FROM build_products WHERE path LIKE $1 LIMIT 1",
)
.bind(format!("/nix/store/{hash}-%"))
.fetch_optional(&state.pool)
.await
.map_err(|e| ApiError(fc_common::CiError::Database(e)))?;
// Look up the store path from build_products by matching the hash prefix
let product = sqlx::query_as::<_, fc_common::models::BuildProduct>(
"SELECT * FROM build_products WHERE path LIKE $1 LIMIT 1",
)
.bind(format!("/nix/store/{hash}-%"))
.fetch_optional(&state.pool)
.await
.map_err(|e| ApiError(fc_common::CiError::Database(e)))?;
let product = match product {
Some(p) => p,
None => return Ok(StatusCode::NOT_FOUND.into_response()),
};
let product = match product {
Some(p) => p,
None => return Ok(StatusCode::NOT_FOUND.into_response()),
};
if !fc_common::validate::is_valid_store_path(&product.path) {
return Ok(StatusCode::NOT_FOUND.into_response());
}
if !fc_common::validate::is_valid_store_path(&product.path) {
return Ok(StatusCode::NOT_FOUND.into_response());
}
// Get narinfo from nix path-info
let output = Command::new("nix")
.args(["path-info", "--json", &product.path])
.output()
.await;
// Get narinfo from nix path-info
let output = Command::new("nix")
.args(["path-info", "--json", &product.path])
.output()
.await;
let output = match output {
Ok(o) if o.status.success() => o,
_ => return Ok(StatusCode::NOT_FOUND.into_response()),
};
let output = match output {
Ok(o) if o.status.success() => o,
_ => return Ok(StatusCode::NOT_FOUND.into_response()),
};
let stdout = String::from_utf8_lossy(&output.stdout);
let parsed: serde_json::Value = match serde_json::from_str(&stdout) {
Ok(v) => v,
Err(_) => return Ok(StatusCode::NOT_FOUND.into_response()),
};
let stdout = String::from_utf8_lossy(&output.stdout);
let parsed: serde_json::Value = match serde_json::from_str(&stdout) {
Ok(v) => v,
Err(_) => return Ok(StatusCode::NOT_FOUND.into_response()),
};
let entry = match parsed.as_array().and_then(|a| a.first()) {
Some(e) => e,
None => return Ok(StatusCode::NOT_FOUND.into_response()),
};
let entry = match parsed.as_array().and_then(|a| a.first()) {
Some(e) => e,
None => return Ok(StatusCode::NOT_FOUND.into_response()),
};
let nar_hash = entry.get("narHash").and_then(|v| v.as_str()).unwrap_or("");
let nar_size = entry.get("narSize").and_then(|v| v.as_u64()).unwrap_or(0);
let store_path = entry
.get("path")
.and_then(|v| v.as_str())
.unwrap_or(&product.path);
let nar_hash = entry.get("narHash").and_then(|v| v.as_str()).unwrap_or("");
let nar_size = entry.get("narSize").and_then(|v| v.as_u64()).unwrap_or(0);
let store_path = entry
.get("path")
.and_then(|v| v.as_str())
.unwrap_or(&product.path);
let refs: Vec<&str> = entry
.get("references")
.and_then(|v| v.as_array())
.map(|arr| {
arr.iter()
.filter_map(|r| r.as_str())
.map(|s| s.strip_prefix("/nix/store/").unwrap_or(s))
.collect()
})
.unwrap_or_default();
let refs: Vec<&str> = entry
.get("references")
.and_then(|v| v.as_array())
.map(|arr| {
arr
.iter()
.filter_map(|r| r.as_str())
.map(|s| s.strip_prefix("/nix/store/").unwrap_or(s))
.collect()
})
.unwrap_or_default();
// Extract deriver
let deriver = entry
.get("deriver")
.and_then(|v| v.as_str())
.map(|d| d.strip_prefix("/nix/store/").unwrap_or(d));
// Extract deriver
let deriver = entry
.get("deriver")
.and_then(|v| v.as_str())
.map(|d| d.strip_prefix("/nix/store/").unwrap_or(d));
// Extract content-addressable hash
let ca = entry.get("ca").and_then(|v| v.as_str());
// Extract content-addressable hash
let ca = entry.get("ca").and_then(|v| v.as_str());
let file_hash = nar_hash;
let file_hash = nar_hash;
let mut narinfo_text = format!(
"StorePath: {store_path}\n\
URL: nar/{hash}.nar.zst\n\
Compression: zstd\n\
FileHash: {file_hash}\n\
FileSize: {nar_size}\n\
NarHash: {nar_hash}\n\
NarSize: {nar_size}\n\
References: {refs}\n",
store_path = store_path,
hash = hash,
file_hash = file_hash,
nar_size = nar_size,
nar_hash = nar_hash,
refs = refs.join(" "),
);
let mut narinfo_text = format!(
"StorePath: {store_path}\nURL: nar/{hash}.nar.zst\nCompression: \
zstd\nFileHash: {file_hash}\nFileSize: {nar_size}\nNarHash: \
{nar_hash}\nNarSize: {nar_size}\nReferences: {refs}\n",
store_path = store_path,
hash = hash,
file_hash = file_hash,
nar_size = nar_size,
nar_hash = nar_hash,
refs = refs.join(" "),
);
if let Some(deriver) = deriver {
narinfo_text.push_str(&format!("Deriver: {deriver}\n"));
}
if let Some(ca) = ca {
narinfo_text.push_str(&format!("CA: {ca}\n"));
}
if let Some(deriver) = deriver {
narinfo_text.push_str(&format!("Deriver: {deriver}\n"));
}
if let Some(ca) = ca {
narinfo_text.push_str(&format!("CA: {ca}\n"));
}
// Optionally sign if secret key is configured
let narinfo_text = if let Some(ref key_file) = state.config.cache.secret_key_file {
if key_file.exists() {
sign_narinfo(&narinfo_text, key_file).await
} else {
narinfo_text
}
} else {
// Optionally sign if secret key is configured
let narinfo_text =
if let Some(ref key_file) = state.config.cache.secret_key_file {
if key_file.exists() {
sign_narinfo(&narinfo_text, key_file).await
} else {
narinfo_text
}
} else {
narinfo_text
};
Ok((
StatusCode::OK,
[("content-type", "text/x-nix-narinfo")],
narinfo_text,
Ok(
(
StatusCode::OK,
[("content-type", "text/x-nix-narinfo")],
narinfo_text,
)
.into_response())
.into_response(),
)
}
/// Sign narinfo using nix store sign command
async fn sign_narinfo(narinfo: &str, key_file: &std::path::Path) -> String {
let store_path = narinfo
.lines()
.find(|l| l.starts_with("StorePath: "))
.and_then(|l| l.strip_prefix("StorePath: "));
let store_path = narinfo
.lines()
.find(|l| l.starts_with("StorePath: "))
.and_then(|l| l.strip_prefix("StorePath: "));
let store_path = match store_path {
Some(p) => p,
None => return narinfo.to_string(),
};
let store_path = match store_path {
Some(p) => p,
None => return narinfo.to_string(),
};
let output = Command::new("nix")
.args([
"store",
"sign",
"--key-file",
&key_file.to_string_lossy(),
store_path,
])
let output = Command::new("nix")
.args([
"store",
"sign",
"--key-file",
&key_file.to_string_lossy(),
store_path,
])
.output()
.await;
match output {
Ok(o) if o.status.success() => {
let re_output = Command::new("nix")
.args(["path-info", "--json", store_path])
.output()
.await;
match output {
Ok(o) if o.status.success() => {
let re_output = Command::new("nix")
.args(["path-info", "--json", store_path])
.output()
.await;
if let Ok(o) = re_output
&& let Ok(parsed) = serde_json::from_slice::<serde_json::Value>(&o.stdout)
&& let Some(sigs) = parsed
.as_array()
.and_then(|a| a.first())
.and_then(|e| e.get("signatures"))
.and_then(|v| v.as_array())
{
let sig_lines: Vec<String> = sigs
.iter()
.filter_map(|s| s.as_str())
.map(|s| format!("Sig: {s}"))
.collect();
if !sig_lines.is_empty() {
return format!("{narinfo}{}\n", sig_lines.join("\n"));
}
}
narinfo.to_string()
if let Ok(o) = re_output
&& let Ok(parsed) =
serde_json::from_slice::<serde_json::Value>(&o.stdout)
&& let Some(sigs) = parsed
.as_array()
.and_then(|a| a.first())
.and_then(|e| e.get("signatures"))
.and_then(|v| v.as_array())
{
let sig_lines: Vec<String> = sigs
.iter()
.filter_map(|s| s.as_str())
.map(|s| format!("Sig: {s}"))
.collect();
if !sig_lines.is_empty() {
return format!("{narinfo}{}\n", sig_lines.join("\n"));
}
_ => narinfo.to_string(),
}
}
narinfo.to_string()
},
_ => narinfo.to_string(),
}
}
/// Serve a compressed NAR file for a store path.
/// GET /nix-cache/nar/{hash}.nar.zst
async fn serve_nar_zst(
State(state): State<AppState>,
Path(hash): Path<String>,
State(state): State<AppState>,
Path(hash): Path<String>,
) -> Result<Response, ApiError> {
if !state.config.cache.enabled {
return Ok(StatusCode::NOT_FOUND.into_response());
}
if !state.config.cache.enabled {
return Ok(StatusCode::NOT_FOUND.into_response());
}
let hash = hash
.strip_suffix(".nar.zst")
.or_else(|| hash.strip_suffix(".nar"))
.unwrap_or(&hash);
let hash = hash
.strip_suffix(".nar.zst")
.or_else(|| hash.strip_suffix(".nar"))
.unwrap_or(&hash);
if !fc_common::validate::is_valid_nix_hash(hash) {
return Ok(StatusCode::NOT_FOUND.into_response());
}
if !fc_common::validate::is_valid_nix_hash(hash) {
return Ok(StatusCode::NOT_FOUND.into_response());
}
let product = sqlx::query_as::<_, fc_common::models::BuildProduct>(
"SELECT * FROM build_products WHERE path LIKE $1 LIMIT 1",
)
.bind(format!("/nix/store/{hash}-%"))
.fetch_optional(&state.pool)
.await
.map_err(|e| ApiError(fc_common::CiError::Database(e)))?;
let product = sqlx::query_as::<_, fc_common::models::BuildProduct>(
"SELECT * FROM build_products WHERE path LIKE $1 LIMIT 1",
)
.bind(format!("/nix/store/{hash}-%"))
.fetch_optional(&state.pool)
.await
.map_err(|e| ApiError(fc_common::CiError::Database(e)))?;
let product = match product {
Some(p) => p,
None => return Ok(StatusCode::NOT_FOUND.into_response()),
};
let product = match product {
Some(p) => p,
None => return Ok(StatusCode::NOT_FOUND.into_response()),
};
if !fc_common::validate::is_valid_store_path(&product.path) {
return Ok(StatusCode::NOT_FOUND.into_response());
}
if !fc_common::validate::is_valid_store_path(&product.path) {
return Ok(StatusCode::NOT_FOUND.into_response());
}
// Use two piped processes instead of sh -c to prevent command injection
let mut nix_child = std::process::Command::new("nix")
.args(["store", "dump-path", &product.path])
.stdout(std::process::Stdio::piped())
.stderr(std::process::Stdio::null())
.spawn()
.map_err(|_| {
ApiError(fc_common::CiError::Build(
"Failed to start nix store dump-path".to_string(),
))
})?;
// Use two piped processes instead of sh -c to prevent command injection
let mut nix_child = std::process::Command::new("nix")
.args(["store", "dump-path", &product.path])
.stdout(std::process::Stdio::piped())
.stderr(std::process::Stdio::null())
.spawn()
.map_err(|_| {
ApiError(fc_common::CiError::Build(
"Failed to start nix store dump-path".to_string(),
))
})?;
let nix_stdout = match nix_child.stdout.take() {
Some(s) => s,
None => return Ok(StatusCode::INTERNAL_SERVER_ERROR.into_response()),
};
let nix_stdout = match nix_child.stdout.take() {
Some(s) => s,
None => return Ok(StatusCode::INTERNAL_SERVER_ERROR.into_response()),
};
let mut zstd_child = Command::new("zstd")
.arg("-c")
.stdin(nix_stdout)
.stdout(std::process::Stdio::piped())
.stderr(std::process::Stdio::null())
.spawn()
.map_err(|_| {
ApiError(fc_common::CiError::Build(
"Failed to start zstd compression".to_string(),
))
})?;
let mut zstd_child = Command::new("zstd")
.arg("-c")
.stdin(nix_stdout)
.stdout(std::process::Stdio::piped())
.stderr(std::process::Stdio::null())
.spawn()
.map_err(|_| {
ApiError(fc_common::CiError::Build(
"Failed to start zstd compression".to_string(),
))
})?;
let zstd_stdout = match zstd_child.stdout.take() {
Some(s) => s,
None => return Ok(StatusCode::INTERNAL_SERVER_ERROR.into_response()),
};
let zstd_stdout = match zstd_child.stdout.take() {
Some(s) => s,
None => return Ok(StatusCode::INTERNAL_SERVER_ERROR.into_response()),
};
let stream = tokio_util::io::ReaderStream::new(zstd_stdout);
let body = Body::from_stream(stream);
let stream = tokio_util::io::ReaderStream::new(zstd_stdout);
let body = Body::from_stream(stream);
Ok((StatusCode::OK, [("content-type", "application/zstd")], body).into_response())
Ok(
(StatusCode::OK, [("content-type", "application/zstd")], body)
.into_response(),
)
}
/// Serve an uncompressed NAR file for a store path (legacy).
/// GET /nix-cache/nar/{hash}.nar
async fn serve_nar(
State(state): State<AppState>,
Path(hash): Path<String>,
State(state): State<AppState>,
Path(hash): Path<String>,
) -> Result<Response, ApiError> {
if !state.config.cache.enabled {
return Ok(StatusCode::NOT_FOUND.into_response());
}
if !state.config.cache.enabled {
return Ok(StatusCode::NOT_FOUND.into_response());
}
let hash = hash.strip_suffix(".nar").unwrap_or(&hash);
let hash = hash.strip_suffix(".nar").unwrap_or(&hash);
if !fc_common::validate::is_valid_nix_hash(hash) {
return Ok(StatusCode::NOT_FOUND.into_response());
}
if !fc_common::validate::is_valid_nix_hash(hash) {
return Ok(StatusCode::NOT_FOUND.into_response());
}
let product = sqlx::query_as::<_, fc_common::models::BuildProduct>(
"SELECT * FROM build_products WHERE path LIKE $1 LIMIT 1",
let product = sqlx::query_as::<_, fc_common::models::BuildProduct>(
"SELECT * FROM build_products WHERE path LIKE $1 LIMIT 1",
)
.bind(format!("/nix/store/{hash}-%"))
.fetch_optional(&state.pool)
.await
.map_err(|e| ApiError(fc_common::CiError::Database(e)))?;
let product = match product {
Some(p) => p,
None => return Ok(StatusCode::NOT_FOUND.into_response()),
};
if !fc_common::validate::is_valid_store_path(&product.path) {
return Ok(StatusCode::NOT_FOUND.into_response());
}
let child = Command::new("nix")
.args(["store", "dump-path", &product.path])
.stdout(std::process::Stdio::piped())
.stderr(std::process::Stdio::null())
.spawn();
let mut child = match child {
Ok(c) => c,
Err(_) => return Ok(StatusCode::INTERNAL_SERVER_ERROR.into_response()),
};
let stdout = match child.stdout.take() {
Some(s) => s,
None => return Ok(StatusCode::INTERNAL_SERVER_ERROR.into_response()),
};
let stream = tokio_util::io::ReaderStream::new(stdout);
let body = Body::from_stream(stream);
Ok(
(
StatusCode::OK,
[("content-type", "application/x-nix-nar")],
body,
)
.bind(format!("/nix/store/{hash}-%"))
.fetch_optional(&state.pool)
.await
.map_err(|e| ApiError(fc_common::CiError::Database(e)))?;
let product = match product {
Some(p) => p,
None => return Ok(StatusCode::NOT_FOUND.into_response()),
};
if !fc_common::validate::is_valid_store_path(&product.path) {
return Ok(StatusCode::NOT_FOUND.into_response());
}
let child = Command::new("nix")
.args(["store", "dump-path", &product.path])
.stdout(std::process::Stdio::piped())
.stderr(std::process::Stdio::null())
.spawn();
let mut child = match child {
Ok(c) => c,
Err(_) => return Ok(StatusCode::INTERNAL_SERVER_ERROR.into_response()),
};
let stdout = match child.stdout.take() {
Some(s) => s,
None => return Ok(StatusCode::INTERNAL_SERVER_ERROR.into_response()),
};
let stream = tokio_util::io::ReaderStream::new(stdout);
let body = Body::from_stream(stream);
Ok((
StatusCode::OK,
[("content-type", "application/x-nix-nar")],
body,
)
.into_response())
.into_response(),
)
}
/// Combined NAR handler — dispatches to zstd or plain based on suffix.
/// GET /nix-cache/nar/{hash} where hash includes .nar.zst or .nar suffix
async fn serve_nar_combined(
state: State<AppState>,
path: Path<String>,
state: State<AppState>,
path: Path<String>,
) -> Result<Response, ApiError> {
let hash_raw = path.0.clone();
if hash_raw.ends_with(".nar.zst") {
serve_nar_zst(state, path).await
} else if hash_raw.ends_with(".nar") {
serve_nar(state, path).await
} else {
Ok(StatusCode::NOT_FOUND.into_response())
}
let hash_raw = path.0.clone();
if hash_raw.ends_with(".nar.zst") {
serve_nar_zst(state, path).await
} else if hash_raw.ends_with(".nar") {
serve_nar(state, path).await
} else {
Ok(StatusCode::NOT_FOUND.into_response())
}
}
/// Nix binary cache info endpoint.
/// GET /nix-cache/nix-cache-info
async fn cache_info(State(state): State<AppState>) -> Response {
if !state.config.cache.enabled {
return StatusCode::NOT_FOUND.into_response();
}
if !state.config.cache.enabled {
return StatusCode::NOT_FOUND.into_response();
}
let info = "StoreDir: /nix/store\nWantMassQuery: 1\nPriority: 30\n";
let info = "StoreDir: /nix/store\nWantMassQuery: 1\nPriority: 30\n";
(StatusCode::OK, [("content-type", "text/plain")], info).into_response()
(StatusCode::OK, [("content-type", "text/plain")], info).into_response()
}
pub fn router() -> Router<AppState> {
Router::new()
.route("/nix-cache/nix-cache-info", get(cache_info))
.route("/nix-cache/{hash}", get(narinfo))
.route("/nix-cache/nar/{hash}", get(serve_nar_combined))
Router::new()
.route("/nix-cache/nix-cache-info", get(cache_info))
.route("/nix-cache/{hash}", get(narinfo))
.route("/nix-cache/nar/{hash}", get(serve_nar_combined))
}

View file

@ -1,89 +1,94 @@
use axum::{
Json, Router,
extract::{Path, State},
routing::{get, post},
Json,
Router,
extract::{Path, State},
routing::{get, post},
};
use fc_common::{
Validate,
models::{Channel, CreateChannel},
};
use fc_common::Validate;
use fc_common::models::{Channel, CreateChannel};
use uuid::Uuid;
use crate::auth_middleware::RequireAdmin;
use crate::error::ApiError;
use crate::state::AppState;
use crate::{auth_middleware::RequireAdmin, error::ApiError, state::AppState};
async fn list_channels(State(state): State<AppState>) -> Result<Json<Vec<Channel>>, ApiError> {
let channels = fc_common::repo::channels::list_all(&state.pool)
.await
.map_err(ApiError)?;
Ok(Json(channels))
async fn list_channels(
State(state): State<AppState>,
) -> Result<Json<Vec<Channel>>, ApiError> {
let channels = fc_common::repo::channels::list_all(&state.pool)
.await
.map_err(ApiError)?;
Ok(Json(channels))
}
async fn list_project_channels(
State(state): State<AppState>,
Path(project_id): Path<Uuid>,
State(state): State<AppState>,
Path(project_id): Path<Uuid>,
) -> Result<Json<Vec<Channel>>, ApiError> {
let channels = fc_common::repo::channels::list_for_project(&state.pool, project_id)
.await
.map_err(ApiError)?;
Ok(Json(channels))
let channels =
fc_common::repo::channels::list_for_project(&state.pool, project_id)
.await
.map_err(ApiError)?;
Ok(Json(channels))
}
async fn get_channel(
State(state): State<AppState>,
Path(id): Path<Uuid>,
State(state): State<AppState>,
Path(id): Path<Uuid>,
) -> Result<Json<Channel>, ApiError> {
let channel = fc_common::repo::channels::get(&state.pool, id)
.await
.map_err(ApiError)?;
Ok(Json(channel))
let channel = fc_common::repo::channels::get(&state.pool, id)
.await
.map_err(ApiError)?;
Ok(Json(channel))
}
async fn create_channel(
_auth: RequireAdmin,
State(state): State<AppState>,
Json(input): Json<CreateChannel>,
_auth: RequireAdmin,
State(state): State<AppState>,
Json(input): Json<CreateChannel>,
) -> Result<Json<Channel>, ApiError> {
input
.validate()
.map_err(|msg| ApiError(fc_common::CiError::Validation(msg)))?;
let channel = fc_common::repo::channels::create(&state.pool, input)
.await
.map_err(ApiError)?;
Ok(Json(channel))
input
.validate()
.map_err(|msg| ApiError(fc_common::CiError::Validation(msg)))?;
let channel = fc_common::repo::channels::create(&state.pool, input)
.await
.map_err(ApiError)?;
Ok(Json(channel))
}
async fn delete_channel(
_auth: RequireAdmin,
State(state): State<AppState>,
Path(id): Path<Uuid>,
_auth: RequireAdmin,
State(state): State<AppState>,
Path(id): Path<Uuid>,
) -> Result<Json<serde_json::Value>, ApiError> {
fc_common::repo::channels::delete(&state.pool, id)
.await
.map_err(ApiError)?;
Ok(Json(serde_json::json!({"deleted": true})))
fc_common::repo::channels::delete(&state.pool, id)
.await
.map_err(ApiError)?;
Ok(Json(serde_json::json!({"deleted": true})))
}
async fn promote_channel(
_auth: RequireAdmin,
State(state): State<AppState>,
Path((channel_id, eval_id)): Path<(Uuid, Uuid)>,
_auth: RequireAdmin,
State(state): State<AppState>,
Path((channel_id, eval_id)): Path<(Uuid, Uuid)>,
) -> Result<Json<Channel>, ApiError> {
let channel = fc_common::repo::channels::promote(&state.pool, channel_id, eval_id)
.await
.map_err(ApiError)?;
Ok(Json(channel))
let channel =
fc_common::repo::channels::promote(&state.pool, channel_id, eval_id)
.await
.map_err(ApiError)?;
Ok(Json(channel))
}
pub fn router() -> Router<AppState> {
Router::new()
.route("/channels", get(list_channels).post(create_channel))
.route("/channels/{id}", get(get_channel).delete(delete_channel))
.route(
"/channels/{channel_id}/promote/{eval_id}",
post(promote_channel),
)
.route(
"/projects/{project_id}/channels",
get(list_project_channels),
)
Router::new()
.route("/channels", get(list_channels).post(create_channel))
.route("/channels/{id}", get(get_channel).delete(delete_channel))
.route(
"/channels/{channel_id}/promote/{eval_id}",
post(promote_channel),
)
.route(
"/projects/{project_id}/channels",
get(list_project_channels),
)
}

File diff suppressed because it is too large Load diff

View file

@ -1,212 +1,220 @@
use axum::{
Json, Router,
extract::{Path, Query, State},
http::Extensions,
routing::{get, post},
};
use fc_common::{CreateEvaluation, Evaluation, PaginatedResponse, PaginationParams, Validate};
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use axum::{
Json,
Router,
extract::{Path, Query, State},
http::Extensions,
routing::{get, post},
};
use fc_common::{
CreateEvaluation,
Evaluation,
PaginatedResponse,
PaginationParams,
Validate,
};
use serde::{Deserialize, Serialize};
use uuid::Uuid;
use crate::auth_middleware::RequireRoles;
use crate::error::ApiError;
use crate::state::AppState;
use crate::{auth_middleware::RequireRoles, error::ApiError, state::AppState};
#[derive(Debug, Deserialize)]
struct ListEvaluationsParams {
jobset_id: Option<Uuid>,
status: Option<String>,
limit: Option<i64>,
offset: Option<i64>,
jobset_id: Option<Uuid>,
status: Option<String>,
limit: Option<i64>,
offset: Option<i64>,
}
async fn list_evaluations(
State(state): State<AppState>,
Query(params): Query<ListEvaluationsParams>,
State(state): State<AppState>,
Query(params): Query<ListEvaluationsParams>,
) -> Result<Json<PaginatedResponse<Evaluation>>, ApiError> {
let pagination = PaginationParams {
limit: params.limit,
offset: params.offset,
};
let limit = pagination.limit();
let offset = pagination.offset();
let items = fc_common::repo::evaluations::list_filtered(
&state.pool,
params.jobset_id,
params.status.as_deref(),
limit,
offset,
)
.await
.map_err(ApiError)?;
let total = fc_common::repo::evaluations::count_filtered(
&state.pool,
params.jobset_id,
params.status.as_deref(),
)
.await
.map_err(ApiError)?;
Ok(Json(PaginatedResponse {
items,
total,
limit,
offset,
}))
let pagination = PaginationParams {
limit: params.limit,
offset: params.offset,
};
let limit = pagination.limit();
let offset = pagination.offset();
let items = fc_common::repo::evaluations::list_filtered(
&state.pool,
params.jobset_id,
params.status.as_deref(),
limit,
offset,
)
.await
.map_err(ApiError)?;
let total = fc_common::repo::evaluations::count_filtered(
&state.pool,
params.jobset_id,
params.status.as_deref(),
)
.await
.map_err(ApiError)?;
Ok(Json(PaginatedResponse {
items,
total,
limit,
offset,
}))
}
async fn get_evaluation(
State(state): State<AppState>,
Path(id): Path<Uuid>,
State(state): State<AppState>,
Path(id): Path<Uuid>,
) -> Result<Json<Evaluation>, ApiError> {
let evaluation = fc_common::repo::evaluations::get(&state.pool, id)
.await
.map_err(ApiError)?;
Ok(Json(evaluation))
let evaluation = fc_common::repo::evaluations::get(&state.pool, id)
.await
.map_err(ApiError)?;
Ok(Json(evaluation))
}
async fn trigger_evaluation(
extensions: Extensions,
State(state): State<AppState>,
Json(input): Json<CreateEvaluation>,
extensions: Extensions,
State(state): State<AppState>,
Json(input): Json<CreateEvaluation>,
) -> Result<Json<Evaluation>, ApiError> {
RequireRoles::check(&extensions, &["eval-jobset"]).map_err(|s| {
ApiError(if s == axum::http::StatusCode::FORBIDDEN {
fc_common::CiError::Forbidden("Insufficient permissions".to_string())
} else {
fc_common::CiError::Unauthorized("Authentication required".to_string())
})
})?;
input
.validate()
.map_err(|msg| ApiError(fc_common::CiError::Validation(msg)))?;
let evaluation = fc_common::repo::evaluations::create(&state.pool, input)
.await
.map_err(ApiError)?;
Ok(Json(evaluation))
RequireRoles::check(&extensions, &["eval-jobset"]).map_err(|s| {
ApiError(if s == axum::http::StatusCode::FORBIDDEN {
fc_common::CiError::Forbidden("Insufficient permissions".to_string())
} else {
fc_common::CiError::Unauthorized("Authentication required".to_string())
})
})?;
input
.validate()
.map_err(|msg| ApiError(fc_common::CiError::Validation(msg)))?;
let evaluation = fc_common::repo::evaluations::create(&state.pool, input)
.await
.map_err(ApiError)?;
Ok(Json(evaluation))
}
#[derive(Debug, Deserialize)]
struct CompareParams {
to: Uuid,
to: Uuid,
}
#[derive(Debug, Serialize)]
struct EvalComparison {
from_id: Uuid,
to_id: Uuid,
new_jobs: Vec<JobDiff>,
removed_jobs: Vec<JobDiff>,
changed_jobs: Vec<JobChange>,
unchanged_count: usize,
from_id: Uuid,
to_id: Uuid,
new_jobs: Vec<JobDiff>,
removed_jobs: Vec<JobDiff>,
changed_jobs: Vec<JobChange>,
unchanged_count: usize,
}
#[derive(Debug, Serialize)]
struct JobDiff {
job_name: String,
system: Option<String>,
drv_path: String,
status: String,
job_name: String,
system: Option<String>,
drv_path: String,
status: String,
}
#[derive(Debug, Serialize)]
struct JobChange {
job_name: String,
system: Option<String>,
old_drv: String,
new_drv: String,
old_status: String,
new_status: String,
job_name: String,
system: Option<String>,
old_drv: String,
new_drv: String,
old_status: String,
new_status: String,
}
async fn compare_evaluations(
State(state): State<AppState>,
Path(id): Path<Uuid>,
Query(params): Query<CompareParams>,
State(state): State<AppState>,
Path(id): Path<Uuid>,
Query(params): Query<CompareParams>,
) -> Result<Json<EvalComparison>, ApiError> {
// Verify both evaluations exist
let _from_eval = fc_common::repo::evaluations::get(&state.pool, id)
.await
.map_err(ApiError)?;
let _to_eval = fc_common::repo::evaluations::get(&state.pool, params.to)
.await
.map_err(ApiError)?;
// Verify both evaluations exist
let _from_eval = fc_common::repo::evaluations::get(&state.pool, id)
.await
.map_err(ApiError)?;
let _to_eval = fc_common::repo::evaluations::get(&state.pool, params.to)
.await
.map_err(ApiError)?;
let from_builds = fc_common::repo::builds::list_for_evaluation(&state.pool, id)
.await
.map_err(ApiError)?;
let to_builds = fc_common::repo::builds::list_for_evaluation(&state.pool, params.to)
.await
.map_err(ApiError)?;
let from_builds =
fc_common::repo::builds::list_for_evaluation(&state.pool, id)
.await
.map_err(ApiError)?;
let to_builds =
fc_common::repo::builds::list_for_evaluation(&state.pool, params.to)
.await
.map_err(ApiError)?;
let from_map: HashMap<&str, &fc_common::Build> = from_builds
.iter()
.map(|b| (b.job_name.as_str(), b))
.collect();
let to_map: HashMap<&str, &fc_common::Build> =
to_builds.iter().map(|b| (b.job_name.as_str(), b)).collect();
let from_map: HashMap<&str, &fc_common::Build> = from_builds
.iter()
.map(|b| (b.job_name.as_str(), b))
.collect();
let to_map: HashMap<&str, &fc_common::Build> =
to_builds.iter().map(|b| (b.job_name.as_str(), b)).collect();
let mut new_jobs = Vec::new();
let mut removed_jobs = Vec::new();
let mut changed_jobs = Vec::new();
let mut unchanged_count = 0;
let mut new_jobs = Vec::new();
let mut removed_jobs = Vec::new();
let mut changed_jobs = Vec::new();
let mut unchanged_count = 0;
// Jobs in `to` but not in `from` are new
for (name, build) in &to_map {
if !from_map.contains_key(name) {
new_jobs.push(JobDiff {
job_name: name.to_string(),
system: build.system.clone(),
drv_path: build.drv_path.clone(),
status: format!("{:?}", build.status),
});
}
// Jobs in `to` but not in `from` are new
for (name, build) in &to_map {
if !from_map.contains_key(name) {
new_jobs.push(JobDiff {
job_name: name.to_string(),
system: build.system.clone(),
drv_path: build.drv_path.clone(),
status: format!("{:?}", build.status),
});
}
}
// Jobs in `from` but not in `to` are removed
for (name, build) in &from_map {
if !to_map.contains_key(name) {
removed_jobs.push(JobDiff {
job_name: name.to_string(),
system: build.system.clone(),
drv_path: build.drv_path.clone(),
status: format!("{:?}", build.status),
});
}
// Jobs in `from` but not in `to` are removed
for (name, build) in &from_map {
if !to_map.contains_key(name) {
removed_jobs.push(JobDiff {
job_name: name.to_string(),
system: build.system.clone(),
drv_path: build.drv_path.clone(),
status: format!("{:?}", build.status),
});
}
}
// Jobs in both: compare derivation paths
for (name, from_build) in &from_map {
if let Some(to_build) = to_map.get(name) {
if from_build.drv_path != to_build.drv_path {
changed_jobs.push(JobChange {
job_name: name.to_string(),
system: to_build.system.clone(),
old_drv: from_build.drv_path.clone(),
new_drv: to_build.drv_path.clone(),
old_status: format!("{:?}", from_build.status),
new_status: format!("{:?}", to_build.status),
});
} else {
unchanged_count += 1;
}
}
// Jobs in both: compare derivation paths
for (name, from_build) in &from_map {
if let Some(to_build) = to_map.get(name) {
if from_build.drv_path != to_build.drv_path {
changed_jobs.push(JobChange {
job_name: name.to_string(),
system: to_build.system.clone(),
old_drv: from_build.drv_path.clone(),
new_drv: to_build.drv_path.clone(),
old_status: format!("{:?}", from_build.status),
new_status: format!("{:?}", to_build.status),
});
} else {
unchanged_count += 1;
}
}
}
Ok(Json(EvalComparison {
from_id: id,
to_id: params.to,
new_jobs,
removed_jobs,
changed_jobs,
unchanged_count,
}))
Ok(Json(EvalComparison {
from_id: id,
to_id: params.to,
new_jobs,
removed_jobs,
changed_jobs,
unchanged_count,
}))
}
pub fn router() -> Router<AppState> {
Router::new()
.route("/evaluations", get(list_evaluations))
.route("/evaluations/{id}", get(get_evaluation))
.route("/evaluations/{id}/compare", get(compare_evaluations))
.route("/evaluations/trigger", post(trigger_evaluation))
Router::new()
.route("/evaluations", get(list_evaluations))
.route("/evaluations/{id}", get(get_evaluation))
.route("/evaluations/{id}/compare", get(compare_evaluations))
.route("/evaluations/trigger", post(trigger_evaluation))
}

View file

@ -5,24 +5,24 @@ use crate::state::AppState;
#[derive(Serialize)]
struct HealthResponse {
status: &'static str,
database: bool,
status: &'static str,
database: bool,
}
async fn health_check(State(state): State<AppState>) -> Json<HealthResponse> {
let db_ok = sqlx::query_scalar::<_, i32>("SELECT 1")
.fetch_one(&state.pool)
.await
.is_ok();
let db_ok = sqlx::query_scalar::<_, i32>("SELECT 1")
.fetch_one(&state.pool)
.await
.is_ok();
let status = if db_ok { "ok" } else { "degraded" };
let status = if db_ok { "ok" } else { "degraded" };
Json(HealthResponse {
status,
database: db_ok,
})
Json(HealthResponse {
status,
database: db_ok,
})
}
pub fn router() -> Router<AppState> {
Router::new().route("/health", get(health_check))
Router::new().route("/health", get(health_check))
}

View file

@ -1,114 +1,114 @@
use axum::{
Json, Router,
extract::{Path, State},
routing::get,
Json,
Router,
extract::{Path, State},
routing::get,
};
use fc_common::{Jobset, JobsetInput, UpdateJobset, Validate};
use serde::Deserialize;
use uuid::Uuid;
use crate::auth_middleware::RequireAdmin;
use crate::error::ApiError;
use crate::state::AppState;
use crate::{auth_middleware::RequireAdmin, error::ApiError, state::AppState};
async fn get_jobset(
State(state): State<AppState>,
Path((_project_id, id)): Path<(Uuid, Uuid)>,
State(state): State<AppState>,
Path((_project_id, id)): Path<(Uuid, Uuid)>,
) -> Result<Json<Jobset>, ApiError> {
let jobset = fc_common::repo::jobsets::get(&state.pool, id)
.await
.map_err(ApiError)?;
Ok(Json(jobset))
let jobset = fc_common::repo::jobsets::get(&state.pool, id)
.await
.map_err(ApiError)?;
Ok(Json(jobset))
}
async fn update_jobset(
_auth: RequireAdmin,
State(state): State<AppState>,
Path((_project_id, id)): Path<(Uuid, Uuid)>,
Json(input): Json<UpdateJobset>,
_auth: RequireAdmin,
State(state): State<AppState>,
Path((_project_id, id)): Path<(Uuid, Uuid)>,
Json(input): Json<UpdateJobset>,
) -> Result<Json<Jobset>, ApiError> {
input
.validate()
.map_err(|msg| ApiError(fc_common::CiError::Validation(msg)))?;
let jobset = fc_common::repo::jobsets::update(&state.pool, id, input)
.await
.map_err(ApiError)?;
Ok(Json(jobset))
input
.validate()
.map_err(|msg| ApiError(fc_common::CiError::Validation(msg)))?;
let jobset = fc_common::repo::jobsets::update(&state.pool, id, input)
.await
.map_err(ApiError)?;
Ok(Json(jobset))
}
async fn delete_jobset(
_auth: RequireAdmin,
State(state): State<AppState>,
Path((_project_id, id)): Path<(Uuid, Uuid)>,
_auth: RequireAdmin,
State(state): State<AppState>,
Path((_project_id, id)): Path<(Uuid, Uuid)>,
) -> Result<Json<serde_json::Value>, ApiError> {
fc_common::repo::jobsets::delete(&state.pool, id)
.await
.map_err(ApiError)?;
Ok(Json(serde_json::json!({ "deleted": true })))
fc_common::repo::jobsets::delete(&state.pool, id)
.await
.map_err(ApiError)?;
Ok(Json(serde_json::json!({ "deleted": true })))
}
// --- Jobset input routes ---
async fn list_jobset_inputs(
State(state): State<AppState>,
Path((_project_id, jobset_id)): Path<(Uuid, Uuid)>,
State(state): State<AppState>,
Path((_project_id, jobset_id)): Path<(Uuid, Uuid)>,
) -> Result<Json<Vec<JobsetInput>>, ApiError> {
let inputs = fc_common::repo::jobset_inputs::list_for_jobset(&state.pool, jobset_id)
.await
.map_err(ApiError)?;
Ok(Json(inputs))
let inputs =
fc_common::repo::jobset_inputs::list_for_jobset(&state.pool, jobset_id)
.await
.map_err(ApiError)?;
Ok(Json(inputs))
}
#[derive(Debug, Deserialize)]
struct CreateJobsetInputRequest {
name: String,
input_type: String,
value: String,
revision: Option<String>,
name: String,
input_type: String,
value: String,
revision: Option<String>,
}
async fn create_jobset_input(
_auth: RequireAdmin,
State(state): State<AppState>,
Path((_project_id, jobset_id)): Path<(Uuid, Uuid)>,
Json(body): Json<CreateJobsetInputRequest>,
_auth: RequireAdmin,
State(state): State<AppState>,
Path((_project_id, jobset_id)): Path<(Uuid, Uuid)>,
Json(body): Json<CreateJobsetInputRequest>,
) -> Result<Json<JobsetInput>, ApiError> {
let input = fc_common::repo::jobset_inputs::create(
&state.pool,
jobset_id,
&body.name,
&body.input_type,
&body.value,
body.revision.as_deref(),
)
.await
.map_err(ApiError)?;
Ok(Json(input))
let input = fc_common::repo::jobset_inputs::create(
&state.pool,
jobset_id,
&body.name,
&body.input_type,
&body.value,
body.revision.as_deref(),
)
.await
.map_err(ApiError)?;
Ok(Json(input))
}
async fn delete_jobset_input(
_auth: RequireAdmin,
State(state): State<AppState>,
Path((_project_id, _jobset_id, input_id)): Path<(Uuid, Uuid, Uuid)>,
_auth: RequireAdmin,
State(state): State<AppState>,
Path((_project_id, _jobset_id, input_id)): Path<(Uuid, Uuid, Uuid)>,
) -> Result<Json<serde_json::Value>, ApiError> {
fc_common::repo::jobset_inputs::delete(&state.pool, input_id)
.await
.map_err(ApiError)?;
Ok(Json(serde_json::json!({ "deleted": true })))
fc_common::repo::jobset_inputs::delete(&state.pool, input_id)
.await
.map_err(ApiError)?;
Ok(Json(serde_json::json!({ "deleted": true })))
}
pub fn router() -> Router<AppState> {
Router::new()
.route(
"/projects/{project_id}/jobsets/{id}",
get(get_jobset).put(update_jobset).delete(delete_jobset),
)
.route(
"/projects/{project_id}/jobsets/{jobset_id}/inputs",
get(list_jobset_inputs).post(create_jobset_input),
)
.route(
"/projects/{project_id}/jobsets/{jobset_id}/inputs/{input_id}",
axum::routing::delete(delete_jobset_input),
)
Router::new()
.route(
"/projects/{project_id}/jobsets/{id}",
get(get_jobset).put(update_jobset).delete(delete_jobset),
)
.route(
"/projects/{project_id}/jobsets/{jobset_id}/inputs",
get(list_jobset_inputs).post(create_jobset_input),
)
.route(
"/projects/{project_id}/jobsets/{jobset_id}/inputs/{input_id}",
axum::routing::delete(delete_jobset_input),
)
}

View file

@ -1,125 +1,142 @@
use axum::response::sse::{Event, KeepAlive};
use axum::{
Router,
extract::{Path, State},
http::StatusCode,
response::{IntoResponse, Response, Sse},
routing::get,
Router,
extract::{Path, State},
http::StatusCode,
response::{
IntoResponse,
Response,
Sse,
sse::{Event, KeepAlive},
},
routing::get,
};
use uuid::Uuid;
use crate::error::ApiError;
use crate::state::AppState;
use crate::{error::ApiError, state::AppState};
async fn get_build_log(
State(state): State<AppState>,
Path(id): Path<Uuid>,
State(state): State<AppState>,
Path(id): Path<Uuid>,
) -> Result<Response, ApiError> {
// Verify build exists
let _build = fc_common::repo::builds::get(&state.pool, id)
.await
.map_err(ApiError)?;
// Verify build exists
let _build = fc_common::repo::builds::get(&state.pool, id)
.await
.map_err(ApiError)?;
let log_storage = fc_common::log_storage::LogStorage::new(state.config.logs.log_dir.clone())
.map_err(|e| ApiError(fc_common::CiError::Io(e)))?;
let log_storage =
fc_common::log_storage::LogStorage::new(state.config.logs.log_dir.clone())
.map_err(|e| ApiError(fc_common::CiError::Io(e)))?;
match log_storage.read_log(&id) {
Ok(Some(content)) => Ok((
StatusCode::OK,
[("content-type", "text/plain; charset=utf-8")],
content,
match log_storage.read_log(&id) {
Ok(Some(content)) => {
Ok(
(
StatusCode::OK,
[("content-type", "text/plain; charset=utf-8")],
content,
)
.into_response()),
Ok(None) => Ok((StatusCode::NOT_FOUND, "No log available for this build").into_response()),
Err(e) => Err(ApiError(fc_common::CiError::Io(e))),
}
.into_response(),
)
},
Ok(None) => {
Ok(
(StatusCode::NOT_FOUND, "No log available for this build")
.into_response(),
)
},
Err(e) => Err(ApiError(fc_common::CiError::Io(e))),
}
}
async fn stream_build_log(
State(state): State<AppState>,
Path(id): Path<Uuid>,
) -> Result<Sse<impl futures::Stream<Item = Result<Event, std::convert::Infallible>>>, ApiError> {
let build = fc_common::repo::builds::get(&state.pool, id)
.await
.map_err(ApiError)?;
State(state): State<AppState>,
Path(id): Path<Uuid>,
) -> Result<
Sse<impl futures::Stream<Item = Result<Event, std::convert::Infallible>>>,
ApiError,
> {
let build = fc_common::repo::builds::get(&state.pool, id)
.await
.map_err(ApiError)?;
let log_storage = fc_common::log_storage::LogStorage::new(state.config.logs.log_dir.clone())
.map_err(|e| ApiError(fc_common::CiError::Io(e)))?;
let log_storage =
fc_common::log_storage::LogStorage::new(state.config.logs.log_dir.clone())
.map_err(|e| ApiError(fc_common::CiError::Io(e)))?;
let active_path = log_storage.log_path_for_active(&id);
let final_path = log_storage.log_path(&id);
let pool = state.pool.clone();
let build_id = build.id;
let active_path = log_storage.log_path_for_active(&id);
let final_path = log_storage.log_path(&id);
let pool = state.pool.clone();
let build_id = build.id;
let stream = async_stream::stream! {
use tokio::io::{AsyncBufReadExt, BufReader};
let stream = async_stream::stream! {
use tokio::io::{AsyncBufReadExt, BufReader};
// Determine which file to read
let path = if active_path.exists() {
active_path.clone()
} else if final_path.exists() {
final_path.clone()
} else {
// Wait for the file to appear
let mut found = false;
for _ in 0..30 {
tokio::time::sleep(std::time::Duration::from_secs(1)).await;
if active_path.exists() || final_path.exists() {
found = true;
break;
}
}
if !found {
yield Ok(Event::default().data("No log file available"));
return;
}
if active_path.exists() { active_path.clone() } else { final_path.clone() }
};
// Determine which file to read
let path = if active_path.exists() {
active_path.clone()
} else if final_path.exists() {
final_path.clone()
} else {
// Wait for the file to appear
let mut found = false;
for _ in 0..30 {
tokio::time::sleep(std::time::Duration::from_secs(1)).await;
if active_path.exists() || final_path.exists() {
found = true;
break;
}
}
if !found {
yield Ok(Event::default().data("No log file available"));
return;
}
if active_path.exists() { active_path.clone() } else { final_path.clone() }
};
let file = match tokio::fs::File::open(&path).await {
Ok(f) => f,
Err(_) => {
yield Ok(Event::default().data("Failed to open log file"));
return;
}
};
let file = match tokio::fs::File::open(&path).await {
Ok(f) => f,
Err(_) => {
yield Ok(Event::default().data("Failed to open log file"));
return;
}
};
let mut reader = BufReader::new(file);
let mut line = String::new();
let mut consecutive_empty = 0u32;
let mut reader = BufReader::new(file);
let mut line = String::new();
let mut consecutive_empty = 0u32;
loop {
line.clear();
match reader.read_line(&mut line).await {
Ok(0) => {
// EOF — check if build is still running
consecutive_empty += 1;
if consecutive_empty > 5 {
// Check build status
if let Ok(b) = fc_common::repo::builds::get(&pool, build_id).await
&& b.status != fc_common::models::BuildStatus::Running
&& b.status != fc_common::models::BuildStatus::Pending {
yield Ok(Event::default().event("done").data("Build completed"));
return;
}
consecutive_empty = 0;
}
tokio::time::sleep(std::time::Duration::from_millis(500)).await;
}
Ok(_) => {
consecutive_empty = 0;
yield Ok(Event::default().data(line.trim_end()));
}
Err(_) => return,
}
}
};
loop {
line.clear();
match reader.read_line(&mut line).await {
Ok(0) => {
// EOF — check if build is still running
consecutive_empty += 1;
if consecutive_empty > 5 {
// Check build status
if let Ok(b) = fc_common::repo::builds::get(&pool, build_id).await
&& b.status != fc_common::models::BuildStatus::Running
&& b.status != fc_common::models::BuildStatus::Pending {
yield Ok(Event::default().event("done").data("Build completed"));
return;
}
consecutive_empty = 0;
}
tokio::time::sleep(std::time::Duration::from_millis(500)).await;
}
Ok(_) => {
consecutive_empty = 0;
yield Ok(Event::default().data(line.trim_end()));
}
Err(_) => return,
}
}
};
Ok(Sse::new(stream).keep_alive(KeepAlive::default()))
Ok(Sse::new(stream).keep_alive(KeepAlive::default()))
}
pub fn router() -> Router<AppState> {
Router::new()
.route("/builds/{id}/log", get(get_build_log))
.route("/builds/{id}/log/stream", get(stream_build_log))
Router::new()
.route("/builds/{id}/log", get(get_build_log))
.route("/builds/{id}/log/stream", get(stream_build_log))
}

View file

@ -1,188 +1,198 @@
use axum::{
Router,
extract::State,
http::StatusCode,
response::{IntoResponse, Response},
routing::get,
Router,
extract::State,
http::StatusCode,
response::{IntoResponse, Response},
routing::get,
};
use crate::state::AppState;
async fn prometheus_metrics(State(state): State<AppState>) -> Response {
let stats = match fc_common::repo::builds::get_stats(&state.pool).await {
Ok(s) => s,
Err(_) => {
return StatusCode::INTERNAL_SERVER_ERROR.into_response();
}
};
let stats = match fc_common::repo::builds::get_stats(&state.pool).await {
Ok(s) => s,
Err(_) => {
return StatusCode::INTERNAL_SERVER_ERROR.into_response();
},
};
let eval_count: i64 = match sqlx::query_as::<_, (i64,)>("SELECT COUNT(*) FROM evaluations")
.fetch_one(&state.pool)
.await
let eval_count: i64 =
match sqlx::query_as::<_, (i64,)>("SELECT COUNT(*) FROM evaluations")
.fetch_one(&state.pool)
.await
{
Ok(row) => row.0,
Err(_) => 0,
Ok(row) => row.0,
Err(_) => 0,
};
let eval_by_status: Vec<(String, i64)> =
sqlx::query_as("SELECT status::text, COUNT(*) FROM evaluations GROUP BY status")
.fetch_all(&state.pool)
.await
.unwrap_or_default();
let eval_by_status: Vec<(String, i64)> = sqlx::query_as(
"SELECT status::text, COUNT(*) FROM evaluations GROUP BY status",
)
.fetch_all(&state.pool)
.await
.unwrap_or_default();
let (project_count, channel_count, builder_count): (i64, i64, i64) = sqlx::query_as(
"SELECT \
(SELECT COUNT(*) FROM projects), \
(SELECT COUNT(*) FROM channels), \
(SELECT COUNT(*) FROM remote_builders WHERE enabled = true)",
let (project_count, channel_count, builder_count): (i64, i64, i64) =
sqlx::query_as(
"SELECT (SELECT COUNT(*) FROM projects), (SELECT COUNT(*) FROM \
channels), (SELECT COUNT(*) FROM remote_builders WHERE enabled = true)",
)
.fetch_one(&state.pool)
.await
.unwrap_or((0, 0, 0));
// Per-project build counts
let per_project: Vec<(String, i64, i64)> = sqlx::query_as(
"SELECT p.name, \
COUNT(*) FILTER (WHERE b.status = 'completed'), \
COUNT(*) FILTER (WHERE b.status = 'failed') \
FROM builds b \
JOIN evaluations e ON b.evaluation_id = e.id \
JOIN jobsets j ON e.jobset_id = j.id \
JOIN projects p ON j.project_id = p.id \
GROUP BY p.name",
)
.fetch_all(&state.pool)
.await
.unwrap_or_default();
// Per-project build counts
let per_project: Vec<(String, i64, i64)> = sqlx::query_as(
"SELECT p.name, COUNT(*) FILTER (WHERE b.status = 'completed'), COUNT(*) \
FILTER (WHERE b.status = 'failed') FROM builds b JOIN evaluations e ON \
b.evaluation_id = e.id JOIN jobsets j ON e.jobset_id = j.id JOIN \
projects p ON j.project_id = p.id GROUP BY p.name",
)
.fetch_all(&state.pool)
.await
.unwrap_or_default();
// Build duration percentiles (single query)
let (duration_p50, duration_p95, duration_p99): (Option<f64>, Option<f64>, Option<f64>) =
sqlx::query_as(
"SELECT \
(PERCENTILE_CONT(0.5) WITHIN GROUP (ORDER BY \
EXTRACT(EPOCH FROM (completed_at - started_at)))), \
(PERCENTILE_CONT(0.95) WITHIN GROUP (ORDER BY \
EXTRACT(EPOCH FROM (completed_at - started_at)))), \
(PERCENTILE_CONT(0.99) WITHIN GROUP (ORDER BY \
EXTRACT(EPOCH FROM (completed_at - started_at)))) \
FROM builds WHERE completed_at IS NOT NULL AND started_at IS NOT NULL",
)
.fetch_one(&state.pool)
.await
.unwrap_or((None, None, None));
// Build duration percentiles (single query)
let (duration_p50, duration_p95, duration_p99): (
Option<f64>,
Option<f64>,
Option<f64>,
) = sqlx::query_as(
"SELECT (PERCENTILE_CONT(0.5) WITHIN GROUP (ORDER BY EXTRACT(EPOCH FROM \
(completed_at - started_at)))), (PERCENTILE_CONT(0.95) WITHIN GROUP \
(ORDER BY EXTRACT(EPOCH FROM (completed_at - started_at)))), \
(PERCENTILE_CONT(0.99) WITHIN GROUP (ORDER BY EXTRACT(EPOCH FROM \
(completed_at - started_at)))) FROM builds WHERE completed_at IS NOT \
NULL AND started_at IS NOT NULL",
)
.fetch_one(&state.pool)
.await
.unwrap_or((None, None, None));
let mut output = String::new();
let mut output = String::new();
// Build counts by status
output.push_str("# HELP fc_builds_total Total number of builds by status\n");
output.push_str("# TYPE fc_builds_total gauge\n");
output.push_str(&format!(
"fc_builds_total{{status=\"completed\"}} {}\n",
stats.completed_builds.unwrap_or(0)
));
output.push_str(&format!(
"fc_builds_total{{status=\"failed\"}} {}\n",
stats.failed_builds.unwrap_or(0)
));
output.push_str(&format!(
"fc_builds_total{{status=\"running\"}} {}\n",
stats.running_builds.unwrap_or(0)
));
output.push_str(&format!(
"fc_builds_total{{status=\"pending\"}} {}\n",
stats.pending_builds.unwrap_or(0)
));
output.push_str(&format!(
"fc_builds_total{{status=\"all\"}} {}\n",
stats.total_builds.unwrap_or(0)
));
// Build counts by status
output.push_str("# HELP fc_builds_total Total number of builds by status\n");
output.push_str("# TYPE fc_builds_total gauge\n");
output.push_str(&format!(
"fc_builds_total{{status=\"completed\"}} {}\n",
stats.completed_builds.unwrap_or(0)
));
output.push_str(&format!(
"fc_builds_total{{status=\"failed\"}} {}\n",
stats.failed_builds.unwrap_or(0)
));
output.push_str(&format!(
"fc_builds_total{{status=\"running\"}} {}\n",
stats.running_builds.unwrap_or(0)
));
output.push_str(&format!(
"fc_builds_total{{status=\"pending\"}} {}\n",
stats.pending_builds.unwrap_or(0)
));
output.push_str(&format!(
"fc_builds_total{{status=\"all\"}} {}\n",
stats.total_builds.unwrap_or(0)
));
// Build duration stats
output.push_str("\n# HELP fc_builds_avg_duration_seconds Average build duration in seconds\n");
output.push_str("# TYPE fc_builds_avg_duration_seconds gauge\n");
output.push_str(&format!(
"fc_builds_avg_duration_seconds {:.2}\n",
stats.avg_duration_seconds.unwrap_or(0.0)
));
// Build duration stats
output.push_str(
"\n# HELP fc_builds_avg_duration_seconds Average build duration in \
seconds\n",
);
output.push_str("# TYPE fc_builds_avg_duration_seconds gauge\n");
output.push_str(&format!(
"fc_builds_avg_duration_seconds {:.2}\n",
stats.avg_duration_seconds.unwrap_or(0.0)
));
output.push_str("\n# HELP fc_builds_duration_seconds Build duration percentiles\n");
output.push_str("# TYPE fc_builds_duration_seconds gauge\n");
if let Some(p50) = duration_p50 {
output.push_str(&format!(
"fc_builds_duration_seconds{{quantile=\"0.5\"}} {p50:.2}\n"
));
output.push_str(
"\n# HELP fc_builds_duration_seconds Build duration percentiles\n",
);
output.push_str("# TYPE fc_builds_duration_seconds gauge\n");
if let Some(p50) = duration_p50 {
output.push_str(&format!(
"fc_builds_duration_seconds{{quantile=\"0.5\"}} {p50:.2}\n"
));
}
if let Some(p95) = duration_p95 {
output.push_str(&format!(
"fc_builds_duration_seconds{{quantile=\"0.95\"}} {p95:.2}\n"
));
}
if let Some(p99) = duration_p99 {
output.push_str(&format!(
"fc_builds_duration_seconds{{quantile=\"0.99\"}} {p99:.2}\n"
));
}
// Evaluations
output
.push_str("\n# HELP fc_evaluations_total Total number of evaluations\n");
output.push_str("# TYPE fc_evaluations_total gauge\n");
output.push_str(&format!("fc_evaluations_total {}\n", eval_count));
output.push_str("\n# HELP fc_evaluations_by_status Evaluations by status\n");
output.push_str("# TYPE fc_evaluations_by_status gauge\n");
for (status, count) in &eval_by_status {
output.push_str(&format!(
"fc_evaluations_by_status{{status=\"{status}\"}} {count}\n"
));
}
// Queue depth (pending builds)
output
.push_str("\n# HELP fc_queue_depth Number of pending builds in queue\n");
output.push_str("# TYPE fc_queue_depth gauge\n");
output.push_str(&format!(
"fc_queue_depth {}\n",
stats.pending_builds.unwrap_or(0)
));
// Infrastructure
output.push_str("\n# HELP fc_projects_total Total number of projects\n");
output.push_str("# TYPE fc_projects_total gauge\n");
output.push_str(&format!("fc_projects_total {project_count}\n"));
output.push_str("\n# HELP fc_channels_total Total number of channels\n");
output.push_str("# TYPE fc_channels_total gauge\n");
output.push_str(&format!("fc_channels_total {channel_count}\n"));
output
.push_str("\n# HELP fc_remote_builders_active Active remote builders\n");
output.push_str("# TYPE fc_remote_builders_active gauge\n");
output.push_str(&format!("fc_remote_builders_active {builder_count}\n"));
// Per-project build counts
if !per_project.is_empty() {
output.push_str(
"\n# HELP fc_project_builds_completed Completed builds per project\n",
);
output.push_str("# TYPE fc_project_builds_completed gauge\n");
for (name, completed, _) in &per_project {
output.push_str(&format!(
"fc_project_builds_completed{{project=\"{name}\"}} {completed}\n"
));
}
if let Some(p95) = duration_p95 {
output.push_str(&format!(
"fc_builds_duration_seconds{{quantile=\"0.95\"}} {p95:.2}\n"
));
}
if let Some(p99) = duration_p99 {
output.push_str(&format!(
"fc_builds_duration_seconds{{quantile=\"0.99\"}} {p99:.2}\n"
));
output.push_str(
"\n# HELP fc_project_builds_failed Failed builds per project\n",
);
output.push_str("# TYPE fc_project_builds_failed gauge\n");
for (name, _, failed) in &per_project {
output.push_str(&format!(
"fc_project_builds_failed{{project=\"{name}\"}} {failed}\n"
));
}
}
// Evaluations
output.push_str("\n# HELP fc_evaluations_total Total number of evaluations\n");
output.push_str("# TYPE fc_evaluations_total gauge\n");
output.push_str(&format!("fc_evaluations_total {}\n", eval_count));
output.push_str("\n# HELP fc_evaluations_by_status Evaluations by status\n");
output.push_str("# TYPE fc_evaluations_by_status gauge\n");
for (status, count) in &eval_by_status {
output.push_str(&format!(
"fc_evaluations_by_status{{status=\"{status}\"}} {count}\n"
));
}
// Queue depth (pending builds)
output.push_str("\n# HELP fc_queue_depth Number of pending builds in queue\n");
output.push_str("# TYPE fc_queue_depth gauge\n");
output.push_str(&format!(
"fc_queue_depth {}\n",
stats.pending_builds.unwrap_or(0)
));
// Infrastructure
output.push_str("\n# HELP fc_projects_total Total number of projects\n");
output.push_str("# TYPE fc_projects_total gauge\n");
output.push_str(&format!("fc_projects_total {project_count}\n"));
output.push_str("\n# HELP fc_channels_total Total number of channels\n");
output.push_str("# TYPE fc_channels_total gauge\n");
output.push_str(&format!("fc_channels_total {channel_count}\n"));
output.push_str("\n# HELP fc_remote_builders_active Active remote builders\n");
output.push_str("# TYPE fc_remote_builders_active gauge\n");
output.push_str(&format!("fc_remote_builders_active {builder_count}\n"));
// Per-project build counts
if !per_project.is_empty() {
output.push_str("\n# HELP fc_project_builds_completed Completed builds per project\n");
output.push_str("# TYPE fc_project_builds_completed gauge\n");
for (name, completed, _) in &per_project {
output.push_str(&format!(
"fc_project_builds_completed{{project=\"{name}\"}} {completed}\n"
));
}
output.push_str("\n# HELP fc_project_builds_failed Failed builds per project\n");
output.push_str("# TYPE fc_project_builds_failed gauge\n");
for (name, _, failed) in &per_project {
output.push_str(&format!(
"fc_project_builds_failed{{project=\"{name}\"}} {failed}\n"
));
}
}
(
StatusCode::OK,
[("content-type", "text/plain; version=0.0.4; charset=utf-8")],
output,
)
.into_response()
(
StatusCode::OK,
[("content-type", "text/plain; version=0.0.4; charset=utf-8")],
output,
)
.into_response()
}
pub fn router() -> Router<AppState> {
Router::new().route("/metrics", get(prometheus_metrics))
Router::new().route("/metrics", get(prometheus_metrics))
}

View file

@ -14,111 +14,115 @@ pub mod projects;
pub mod search;
pub mod webhooks;
use std::net::IpAddr;
use std::sync::Arc;
use std::time::Instant;
use std::{net::IpAddr, sync::Arc, time::Instant};
use axum::Router;
use axum::extract::ConnectInfo;
use axum::http::{HeaderValue, Request, StatusCode};
use axum::middleware::{self, Next};
use axum::response::{IntoResponse, Response};
use axum::routing::get;
use axum::{
Router,
body::Body,
extract::ConnectInfo,
http::{HeaderValue, Request, StatusCode, header},
middleware::{self, Next},
response::{IntoResponse, Response},
routing::get,
};
use dashmap::DashMap;
use fc_common::config::ServerConfig;
use tower_http::cors::{AllowOrigin, CorsLayer};
use tower_http::limit::RequestBodyLimitLayer;
use tower_http::set_header::SetResponseHeaderLayer;
use tower_http::trace::TraceLayer;
use tower_http::{
cors::{AllowOrigin, CorsLayer},
limit::RequestBodyLimitLayer,
set_header::SetResponseHeaderLayer,
trace::TraceLayer,
};
use axum::body::Body;
use axum::http::header;
use crate::auth_middleware::{extract_session, require_api_key};
use crate::state::AppState;
use crate::{
auth_middleware::{extract_session, require_api_key},
state::AppState,
};
static STYLE_CSS: &str = include_str!("../../static/style.css");
struct RateLimitState {
requests: DashMap<IpAddr, Vec<Instant>>,
_rps: u64,
burst: u32,
last_cleanup: std::sync::atomic::AtomicU64,
requests: DashMap<IpAddr, Vec<Instant>>,
_rps: u64,
burst: u32,
last_cleanup: std::sync::atomic::AtomicU64,
}
async fn rate_limit_middleware(
ConnectInfo(addr): ConnectInfo<std::net::SocketAddr>,
request: Request<axum::body::Body>,
next: Next,
ConnectInfo(addr): ConnectInfo<std::net::SocketAddr>,
request: Request<axum::body::Body>,
next: Next,
) -> Response {
let state = request.extensions().get::<Arc<RateLimitState>>().cloned();
let state = request.extensions().get::<Arc<RateLimitState>>().cloned();
if let Some(rl) = state {
let ip = addr.ip();
let now = Instant::now();
let window = std::time::Duration::from_secs(1);
if let Some(rl) = state {
let ip = addr.ip();
let now = Instant::now();
let window = std::time::Duration::from_secs(1);
// Periodic cleanup of stale entries (every 60 seconds)
let now_secs = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap_or_default()
.as_secs();
let last = rl.last_cleanup.load(std::sync::atomic::Ordering::Relaxed);
if now_secs - last > 60
&& rl
.last_cleanup
.compare_exchange(
last,
now_secs,
std::sync::atomic::Ordering::SeqCst,
std::sync::atomic::Ordering::Relaxed,
)
.is_ok()
{
rl.requests.retain(|_, v| {
v.retain(|t| now.duration_since(*t) < std::time::Duration::from_secs(10));
!v.is_empty()
});
}
let mut entry = rl.requests.entry(ip).or_default();
entry.retain(|t| now.duration_since(*t) < window);
if entry.len() >= rl.burst as usize {
return StatusCode::TOO_MANY_REQUESTS.into_response();
}
entry.push(now);
drop(entry);
// Periodic cleanup of stale entries (every 60 seconds)
let now_secs = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap_or_default()
.as_secs();
let last = rl.last_cleanup.load(std::sync::atomic::Ordering::Relaxed);
if now_secs - last > 60
&& rl
.last_cleanup
.compare_exchange(
last,
now_secs,
std::sync::atomic::Ordering::SeqCst,
std::sync::atomic::Ordering::Relaxed,
)
.is_ok()
{
rl.requests.retain(|_, v| {
v.retain(|t| {
now.duration_since(*t) < std::time::Duration::from_secs(10)
});
!v.is_empty()
});
}
next.run(request).await
let mut entry = rl.requests.entry(ip).or_default();
entry.retain(|t| now.duration_since(*t) < window);
if entry.len() >= rl.burst as usize {
return StatusCode::TOO_MANY_REQUESTS.into_response();
}
entry.push(now);
drop(entry);
}
next.run(request).await
}
async fn serve_style_css() -> Response {
Response::builder()
.header(header::CONTENT_TYPE, "text/css")
.header(header::CACHE_CONTROL, "public, max-age=3600")
.body(Body::from(STYLE_CSS))
.unwrap()
.into_response()
Response::builder()
.header(header::CONTENT_TYPE, "text/css")
.header(header::CACHE_CONTROL, "public, max-age=3600")
.body(Body::from(STYLE_CSS))
.unwrap()
.into_response()
}
pub fn router(state: AppState, config: &ServerConfig) -> Router {
let cors_layer = if config.cors_permissive {
CorsLayer::permissive()
} else if config.allowed_origins.is_empty() {
CorsLayer::new()
} else {
let origins: Vec<HeaderValue> = config
.allowed_origins
.iter()
.filter_map(|o| o.parse().ok())
.collect();
CorsLayer::new().allow_origin(AllowOrigin::list(origins))
};
let cors_layer = if config.cors_permissive {
CorsLayer::permissive()
} else if config.allowed_origins.is_empty() {
CorsLayer::new()
} else {
let origins: Vec<HeaderValue> = config
.allowed_origins
.iter()
.filter_map(|o| o.parse().ok())
.collect();
CorsLayer::new().allow_origin(AllowOrigin::list(origins))
};
let mut app = Router::new()
let mut app = Router::new()
// Static assets
.route("/static/style.css", get(serve_style_css))
// Dashboard routes with session extraction middleware
@ -169,18 +173,20 @@ pub fn router(state: AppState, config: &ServerConfig) -> Router {
HeaderValue::from_static("strict-origin-when-cross-origin"),
));
// Add rate limiting if configured
if let (Some(rps), Some(burst)) = (config.rate_limit_rps, config.rate_limit_burst) {
let rl_state = Arc::new(RateLimitState {
requests: DashMap::new(),
_rps: rps,
burst,
last_cleanup: std::sync::atomic::AtomicU64::new(0),
});
app = app
.layer(axum::Extension(rl_state))
.layer(middleware::from_fn(rate_limit_middleware));
}
// Add rate limiting if configured
if let (Some(rps), Some(burst)) =
(config.rate_limit_rps, config.rate_limit_burst)
{
let rl_state = Arc::new(RateLimitState {
requests: DashMap::new(),
_rps: rps,
burst,
last_cleanup: std::sync::atomic::AtomicU64::new(0),
});
app = app
.layer(axum::Extension(rl_state))
.layer(middleware::from_fn(rate_limit_middleware));
}
app.with_state(state)
app.with_state(state)
}

View file

@ -1,259 +1,270 @@
use axum::{
Json, Router,
extract::{Path, Query, State},
http::Extensions,
routing::{get, post},
Json,
Router,
extract::{Path, Query, State},
http::Extensions,
routing::{get, post},
};
use fc_common::nix_probe;
use fc_common::{
CreateJobset, CreateProject, Jobset, PaginatedResponse, PaginationParams, Project,
UpdateProject, Validate,
CreateJobset,
CreateProject,
Jobset,
PaginatedResponse,
PaginationParams,
Project,
UpdateProject,
Validate,
nix_probe,
};
use serde::Deserialize;
use uuid::Uuid;
use crate::auth_middleware::{RequireAdmin, RequireRoles};
use crate::error::ApiError;
use crate::state::AppState;
use crate::{
auth_middleware::{RequireAdmin, RequireRoles},
error::ApiError,
state::AppState,
};
async fn list_projects(
State(state): State<AppState>,
Query(pagination): Query<PaginationParams>,
State(state): State<AppState>,
Query(pagination): Query<PaginationParams>,
) -> Result<Json<PaginatedResponse<Project>>, ApiError> {
let limit = pagination.limit();
let offset = pagination.offset();
let items = fc_common::repo::projects::list(&state.pool, limit, offset)
.await
.map_err(ApiError)?;
let total = fc_common::repo::projects::count(&state.pool)
.await
.map_err(ApiError)?;
Ok(Json(PaginatedResponse {
items,
total,
limit,
offset,
}))
let limit = pagination.limit();
let offset = pagination.offset();
let items = fc_common::repo::projects::list(&state.pool, limit, offset)
.await
.map_err(ApiError)?;
let total = fc_common::repo::projects::count(&state.pool)
.await
.map_err(ApiError)?;
Ok(Json(PaginatedResponse {
items,
total,
limit,
offset,
}))
}
async fn create_project(
extensions: Extensions,
State(state): State<AppState>,
Json(input): Json<CreateProject>,
extensions: Extensions,
State(state): State<AppState>,
Json(input): Json<CreateProject>,
) -> Result<Json<Project>, ApiError> {
RequireRoles::check(&extensions, &["create-projects"]).map_err(|s| {
ApiError(if s == axum::http::StatusCode::FORBIDDEN {
fc_common::CiError::Forbidden("Insufficient permissions".to_string())
} else {
fc_common::CiError::Unauthorized("Authentication required".to_string())
})
})?;
input
.validate()
.map_err(|msg| ApiError(fc_common::CiError::Validation(msg)))?;
let project = fc_common::repo::projects::create(&state.pool, input)
.await
.map_err(ApiError)?;
Ok(Json(project))
RequireRoles::check(&extensions, &["create-projects"]).map_err(|s| {
ApiError(if s == axum::http::StatusCode::FORBIDDEN {
fc_common::CiError::Forbidden("Insufficient permissions".to_string())
} else {
fc_common::CiError::Unauthorized("Authentication required".to_string())
})
})?;
input
.validate()
.map_err(|msg| ApiError(fc_common::CiError::Validation(msg)))?;
let project = fc_common::repo::projects::create(&state.pool, input)
.await
.map_err(ApiError)?;
Ok(Json(project))
}
async fn get_project(
State(state): State<AppState>,
Path(id): Path<Uuid>,
State(state): State<AppState>,
Path(id): Path<Uuid>,
) -> Result<Json<Project>, ApiError> {
let project = fc_common::repo::projects::get(&state.pool, id)
.await
.map_err(ApiError)?;
Ok(Json(project))
let project = fc_common::repo::projects::get(&state.pool, id)
.await
.map_err(ApiError)?;
Ok(Json(project))
}
async fn update_project(
_auth: RequireAdmin,
State(state): State<AppState>,
Path(id): Path<Uuid>,
Json(input): Json<UpdateProject>,
_auth: RequireAdmin,
State(state): State<AppState>,
Path(id): Path<Uuid>,
Json(input): Json<UpdateProject>,
) -> Result<Json<Project>, ApiError> {
input
.validate()
.map_err(|msg| ApiError(fc_common::CiError::Validation(msg)))?;
let project = fc_common::repo::projects::update(&state.pool, id, input)
.await
.map_err(ApiError)?;
Ok(Json(project))
input
.validate()
.map_err(|msg| ApiError(fc_common::CiError::Validation(msg)))?;
let project = fc_common::repo::projects::update(&state.pool, id, input)
.await
.map_err(ApiError)?;
Ok(Json(project))
}
async fn delete_project(
_auth: RequireAdmin,
State(state): State<AppState>,
Path(id): Path<Uuid>,
_auth: RequireAdmin,
State(state): State<AppState>,
Path(id): Path<Uuid>,
) -> Result<Json<serde_json::Value>, ApiError> {
fc_common::repo::projects::delete(&state.pool, id)
.await
.map_err(ApiError)?;
Ok(Json(serde_json::json!({ "deleted": true })))
fc_common::repo::projects::delete(&state.pool, id)
.await
.map_err(ApiError)?;
Ok(Json(serde_json::json!({ "deleted": true })))
}
async fn list_project_jobsets(
State(state): State<AppState>,
Path(id): Path<Uuid>,
Query(pagination): Query<PaginationParams>,
State(state): State<AppState>,
Path(id): Path<Uuid>,
Query(pagination): Query<PaginationParams>,
) -> Result<Json<PaginatedResponse<Jobset>>, ApiError> {
let limit = pagination.limit();
let offset = pagination.offset();
let items = fc_common::repo::jobsets::list_for_project(&state.pool, id, limit, offset)
.await
.map_err(ApiError)?;
let total = fc_common::repo::jobsets::count_for_project(&state.pool, id)
.await
.map_err(ApiError)?;
Ok(Json(PaginatedResponse {
items,
total,
limit,
offset,
}))
let limit = pagination.limit();
let offset = pagination.offset();
let items =
fc_common::repo::jobsets::list_for_project(&state.pool, id, limit, offset)
.await
.map_err(ApiError)?;
let total = fc_common::repo::jobsets::count_for_project(&state.pool, id)
.await
.map_err(ApiError)?;
Ok(Json(PaginatedResponse {
items,
total,
limit,
offset,
}))
}
#[derive(Debug, Deserialize)]
struct CreateJobsetBody {
name: String,
nix_expression: String,
enabled: Option<bool>,
flake_mode: Option<bool>,
check_interval: Option<i32>,
name: String,
nix_expression: String,
enabled: Option<bool>,
flake_mode: Option<bool>,
check_interval: Option<i32>,
}
async fn create_project_jobset(
extensions: Extensions,
State(state): State<AppState>,
Path(project_id): Path<Uuid>,
Json(body): Json<CreateJobsetBody>,
extensions: Extensions,
State(state): State<AppState>,
Path(project_id): Path<Uuid>,
Json(body): Json<CreateJobsetBody>,
) -> Result<Json<Jobset>, ApiError> {
RequireRoles::check(&extensions, &["create-projects"]).map_err(|s| {
ApiError(if s == axum::http::StatusCode::FORBIDDEN {
fc_common::CiError::Forbidden("Insufficient permissions".to_string())
} else {
fc_common::CiError::Unauthorized("Authentication required".to_string())
})
})?;
let input = CreateJobset {
project_id,
name: body.name,
nix_expression: body.nix_expression,
enabled: body.enabled,
flake_mode: body.flake_mode,
check_interval: body.check_interval,
branch: None,
scheduling_shares: None,
};
input
.validate()
.map_err(|msg| ApiError(fc_common::CiError::Validation(msg)))?;
let jobset = fc_common::repo::jobsets::create(&state.pool, input)
.await
.map_err(ApiError)?;
Ok(Json(jobset))
RequireRoles::check(&extensions, &["create-projects"]).map_err(|s| {
ApiError(if s == axum::http::StatusCode::FORBIDDEN {
fc_common::CiError::Forbidden("Insufficient permissions".to_string())
} else {
fc_common::CiError::Unauthorized("Authentication required".to_string())
})
})?;
let input = CreateJobset {
project_id,
name: body.name,
nix_expression: body.nix_expression,
enabled: body.enabled,
flake_mode: body.flake_mode,
check_interval: body.check_interval,
branch: None,
scheduling_shares: None,
};
input
.validate()
.map_err(|msg| ApiError(fc_common::CiError::Validation(msg)))?;
let jobset = fc_common::repo::jobsets::create(&state.pool, input)
.await
.map_err(ApiError)?;
Ok(Json(jobset))
}
#[derive(Debug, Deserialize)]
struct ProbeRequest {
repository_url: String,
revision: Option<String>,
repository_url: String,
revision: Option<String>,
}
async fn probe_repository(
_extensions: Extensions,
Json(body): Json<ProbeRequest>,
_extensions: Extensions,
Json(body): Json<ProbeRequest>,
) -> Result<Json<nix_probe::FlakeProbeResult>, ApiError> {
let result = nix_probe::probe_flake(&body.repository_url, body.revision.as_deref())
.await
.map_err(ApiError)?;
Ok(Json(result))
let result =
nix_probe::probe_flake(&body.repository_url, body.revision.as_deref())
.await
.map_err(ApiError)?;
Ok(Json(result))
}
#[derive(Debug, Deserialize)]
struct SetupJobsetInput {
name: String,
nix_expression: String,
#[allow(dead_code)]
description: Option<String>,
name: String,
nix_expression: String,
#[allow(dead_code)]
description: Option<String>,
}
#[derive(Debug, Deserialize)]
struct SetupProjectRequest {
repository_url: String,
name: String,
description: Option<String>,
jobsets: Vec<SetupJobsetInput>,
repository_url: String,
name: String,
description: Option<String>,
jobsets: Vec<SetupJobsetInput>,
}
#[derive(serde::Serialize)]
struct SetupProjectResponse {
project: Project,
jobsets: Vec<Jobset>,
project: Project,
jobsets: Vec<Jobset>,
}
async fn setup_project(
extensions: Extensions,
State(state): State<AppState>,
Json(body): Json<SetupProjectRequest>,
extensions: Extensions,
State(state): State<AppState>,
Json(body): Json<SetupProjectRequest>,
) -> Result<Json<SetupProjectResponse>, ApiError> {
RequireRoles::check(&extensions, &["create-projects"]).map_err(|s| {
ApiError(if s == axum::http::StatusCode::FORBIDDEN {
fc_common::CiError::Forbidden("Insufficient permissions".to_string())
} else {
fc_common::CiError::Unauthorized("Authentication required".to_string())
})
})?;
RequireRoles::check(&extensions, &["create-projects"]).map_err(|s| {
ApiError(if s == axum::http::StatusCode::FORBIDDEN {
fc_common::CiError::Forbidden("Insufficient permissions".to_string())
} else {
fc_common::CiError::Unauthorized("Authentication required".to_string())
})
})?;
let create_project = CreateProject {
name: body.name,
repository_url: body.repository_url,
description: body.description,
let create_project = CreateProject {
name: body.name,
repository_url: body.repository_url,
description: body.description,
};
create_project
.validate()
.map_err(|msg| ApiError(fc_common::CiError::Validation(msg)))?;
let project = fc_common::repo::projects::create(&state.pool, create_project)
.await
.map_err(ApiError)?;
let mut jobsets = Vec::new();
for js_input in body.jobsets {
let input = CreateJobset {
project_id: project.id,
name: js_input.name,
nix_expression: js_input.nix_expression,
enabled: Some(true),
flake_mode: Some(true),
check_interval: None,
branch: None,
scheduling_shares: None,
};
create_project
.validate()
.map_err(|msg| ApiError(fc_common::CiError::Validation(msg)))?;
input
.validate()
.map_err(|msg| ApiError(fc_common::CiError::Validation(msg)))?;
let jobset = fc_common::repo::jobsets::create(&state.pool, input)
.await
.map_err(ApiError)?;
jobsets.push(jobset);
}
let project = fc_common::repo::projects::create(&state.pool, create_project)
.await
.map_err(ApiError)?;
let mut jobsets = Vec::new();
for js_input in body.jobsets {
let input = CreateJobset {
project_id: project.id,
name: js_input.name,
nix_expression: js_input.nix_expression,
enabled: Some(true),
flake_mode: Some(true),
check_interval: None,
branch: None,
scheduling_shares: None,
};
input
.validate()
.map_err(|msg| ApiError(fc_common::CiError::Validation(msg)))?;
let jobset = fc_common::repo::jobsets::create(&state.pool, input)
.await
.map_err(ApiError)?;
jobsets.push(jobset);
}
Ok(Json(SetupProjectResponse { project, jobsets }))
Ok(Json(SetupProjectResponse { project, jobsets }))
}
pub fn router() -> Router<AppState> {
Router::new()
.route("/projects", get(list_projects).post(create_project))
.route("/projects/probe", post(probe_repository))
.route("/projects/setup", post(setup_project))
.route(
"/projects/{id}",
get(get_project).put(update_project).delete(delete_project),
)
.route(
"/projects/{id}/jobsets",
get(list_project_jobsets).post(create_project_jobset),
)
Router::new()
.route("/projects", get(list_projects).post(create_project))
.route("/projects/probe", post(probe_repository))
.route("/projects/setup", post(setup_project))
.route(
"/projects/{id}",
get(get_project).put(update_project).delete(delete_project),
)
.route(
"/projects/{id}/jobsets",
get(list_project_jobsets).post(create_project_jobset),
)
}

View file

@ -1,58 +1,60 @@
use axum::{
Json, Router,
extract::{Query, State},
routing::get,
Json,
Router,
extract::{Query, State},
routing::get,
};
use fc_common::models::{Build, Project};
use serde::{Deserialize, Serialize};
use crate::error::ApiError;
use crate::state::AppState;
use crate::{error::ApiError, state::AppState};
#[derive(Debug, Deserialize)]
struct SearchParams {
q: String,
q: String,
}
#[derive(Debug, Serialize)]
struct SearchResults {
projects: Vec<Project>,
builds: Vec<Build>,
projects: Vec<Project>,
builds: Vec<Build>,
}
async fn search(
State(state): State<AppState>,
Query(params): Query<SearchParams>,
State(state): State<AppState>,
Query(params): Query<SearchParams>,
) -> Result<Json<SearchResults>, ApiError> {
let query = params.q.trim();
if query.is_empty() || query.len() > 256 {
return Ok(Json(SearchResults {
projects: vec![],
builds: vec![],
}));
}
let query = params.q.trim();
if query.is_empty() || query.len() > 256 {
return Ok(Json(SearchResults {
projects: vec![],
builds: vec![],
}));
}
let pattern = format!("%{query}%");
let pattern = format!("%{query}%");
let projects = sqlx::query_as::<_, Project>(
"SELECT * FROM projects WHERE name ILIKE $1 OR description ILIKE $1 ORDER BY name LIMIT 20",
)
.bind(&pattern)
.fetch_all(&state.pool)
.await
.map_err(|e| ApiError(fc_common::CiError::Database(e)))?;
let projects = sqlx::query_as::<_, Project>(
"SELECT * FROM projects WHERE name ILIKE $1 OR description ILIKE $1 ORDER \
BY name LIMIT 20",
)
.bind(&pattern)
.fetch_all(&state.pool)
.await
.map_err(|e| ApiError(fc_common::CiError::Database(e)))?;
let builds = sqlx::query_as::<_, Build>(
"SELECT * FROM builds WHERE job_name ILIKE $1 OR drv_path ILIKE $1 ORDER BY created_at DESC LIMIT 20",
)
.bind(&pattern)
.fetch_all(&state.pool)
.await
.map_err(|e| ApiError(fc_common::CiError::Database(e)))?;
let builds = sqlx::query_as::<_, Build>(
"SELECT * FROM builds WHERE job_name ILIKE $1 OR drv_path ILIKE $1 ORDER \
BY created_at DESC LIMIT 20",
)
.bind(&pattern)
.fetch_all(&state.pool)
.await
.map_err(|e| ApiError(fc_common::CiError::Database(e)))?;
Ok(Json(SearchResults { projects, builds }))
Ok(Json(SearchResults { projects, builds }))
}
pub fn router() -> Router<AppState> {
Router::new().route("/search", get(search))
Router::new().route("/search", get(search))
}

View file

@ -1,302 +1,313 @@
use axum::{
Json, Router,
body::Bytes,
extract::{Path, State},
http::{HeaderMap, StatusCode},
routing::post,
Json,
Router,
body::Bytes,
extract::{Path, State},
http::{HeaderMap, StatusCode},
routing::post,
};
use fc_common::models::CreateEvaluation;
use fc_common::repo;
use fc_common::{models::CreateEvaluation, repo};
use serde::{Deserialize, Serialize};
use uuid::Uuid;
use crate::error::ApiError;
use crate::state::AppState;
use crate::{error::ApiError, state::AppState};
#[derive(Debug, Serialize)]
struct WebhookResponse {
accepted: bool,
message: String,
accepted: bool,
message: String,
}
#[allow(dead_code)]
#[derive(Debug, Deserialize)]
struct GithubPushPayload {
#[serde(alias = "ref")]
git_ref: Option<String>,
after: Option<String>,
repository: Option<GithubRepo>,
#[serde(alias = "ref")]
git_ref: Option<String>,
after: Option<String>,
repository: Option<GithubRepo>,
}
#[allow(dead_code)]
#[derive(Debug, Deserialize)]
struct GithubRepo {
clone_url: Option<String>,
html_url: Option<String>,
clone_url: Option<String>,
html_url: Option<String>,
}
#[allow(dead_code)]
#[derive(Debug, Deserialize)]
struct GiteaPushPayload {
#[serde(alias = "ref")]
git_ref: Option<String>,
after: Option<String>,
repository: Option<GiteaRepo>,
#[serde(alias = "ref")]
git_ref: Option<String>,
after: Option<String>,
repository: Option<GiteaRepo>,
}
#[allow(dead_code)]
#[derive(Debug, Deserialize)]
struct GiteaRepo {
clone_url: Option<String>,
html_url: Option<String>,
clone_url: Option<String>,
html_url: Option<String>,
}
/// Verify HMAC-SHA256 webhook signature.
/// The `secret` parameter is the raw webhook secret stored in DB.
fn verify_signature(secret: &str, body: &[u8], signature: &str) -> bool {
use hmac::{Hmac, Mac};
use sha2::Sha256;
use hmac::{Hmac, Mac};
use sha2::Sha256;
let Ok(mut mac) = Hmac::<Sha256>::new_from_slice(secret.as_bytes()) else {
return false;
};
mac.update(body);
let Ok(mut mac) = Hmac::<Sha256>::new_from_slice(secret.as_bytes()) else {
return false;
};
mac.update(body);
// Parse the hex signature (strip "sha256=" prefix if present)
let hex_sig = signature
.strip_prefix("sha256=")
.or_else(|| signature.strip_prefix("sha1="))
.unwrap_or(signature);
// Parse the hex signature (strip "sha256=" prefix if present)
let hex_sig = signature
.strip_prefix("sha256=")
.or_else(|| signature.strip_prefix("sha1="))
.unwrap_or(signature);
let Ok(sig_bytes) = hex::decode(hex_sig) else {
return false;
};
let Ok(sig_bytes) = hex::decode(hex_sig) else {
return false;
};
mac.verify_slice(&sig_bytes).is_ok()
mac.verify_slice(&sig_bytes).is_ok()
}
async fn handle_github_push(
State(state): State<AppState>,
Path(project_id): Path<Uuid>,
headers: HeaderMap,
body: Bytes,
State(state): State<AppState>,
Path(project_id): Path<Uuid>,
headers: HeaderMap,
body: Bytes,
) -> Result<(StatusCode, Json<WebhookResponse>), ApiError> {
// Check webhook config exists
let webhook_config =
repo::webhook_configs::get_by_project_and_forge(&state.pool, project_id, "github")
.await
.map_err(ApiError)?;
// Check webhook config exists
let webhook_config = repo::webhook_configs::get_by_project_and_forge(
&state.pool,
project_id,
"github",
)
.await
.map_err(ApiError)?;
let webhook_config = match webhook_config {
Some(c) => c,
None => {
return Ok((
StatusCode::NOT_FOUND,
Json(WebhookResponse {
accepted: false,
message: "No GitHub webhook configured for this project".to_string(),
}),
));
}
};
let webhook_config = match webhook_config {
Some(c) => c,
None => {
return Ok((
StatusCode::NOT_FOUND,
Json(WebhookResponse {
accepted: false,
message: "No GitHub webhook configured for this project".to_string(),
}),
));
},
};
// Verify signature if secret is configured
if let Some(ref secret_hash) = webhook_config.secret_hash {
let signature = headers
.get("x-hub-signature-256")
.and_then(|v| v.to_str().ok())
.unwrap_or("");
// Verify signature if secret is configured
if let Some(ref secret_hash) = webhook_config.secret_hash {
let signature = headers
.get("x-hub-signature-256")
.and_then(|v| v.to_str().ok())
.unwrap_or("");
if !verify_signature(secret_hash, &body, signature) {
return Ok((
StatusCode::UNAUTHORIZED,
Json(WebhookResponse {
accepted: false,
message: "Invalid webhook signature".to_string(),
}),
));
}
if !verify_signature(secret_hash, &body, signature) {
return Ok((
StatusCode::UNAUTHORIZED,
Json(WebhookResponse {
accepted: false,
message: "Invalid webhook signature".to_string(),
}),
));
}
}
// Parse payload
let payload: GithubPushPayload = serde_json::from_slice(&body).map_err(|e| {
ApiError(fc_common::CiError::Validation(format!(
"Invalid payload: {e}"
)))
// Parse payload
let payload: GithubPushPayload =
serde_json::from_slice(&body).map_err(|e| {
ApiError(fc_common::CiError::Validation(format!(
"Invalid payload: {e}"
)))
})?;
let commit = payload.after.unwrap_or_default();
if commit.is_empty() || commit == "0000000000000000000000000000000000000000" {
return Ok((
StatusCode::OK,
Json(WebhookResponse {
accepted: true,
message: "Branch deletion event, skipping".to_string(),
}),
));
let commit = payload.after.unwrap_or_default();
if commit.is_empty() || commit == "0000000000000000000000000000000000000000" {
return Ok((
StatusCode::OK,
Json(WebhookResponse {
accepted: true,
message: "Branch deletion event, skipping".to_string(),
}),
));
}
// Find matching jobsets for this project and trigger evaluations
let jobsets =
repo::jobsets::list_for_project(&state.pool, project_id, 1000, 0)
.await
.map_err(ApiError)?;
let mut triggered = 0;
for jobset in &jobsets {
if !jobset.enabled {
continue;
}
// Find matching jobsets for this project and trigger evaluations
let jobsets = repo::jobsets::list_for_project(&state.pool, project_id, 1000, 0)
.await
.map_err(ApiError)?;
let mut triggered = 0;
for jobset in &jobsets {
if !jobset.enabled {
continue;
}
match repo::evaluations::create(
&state.pool,
CreateEvaluation {
jobset_id: jobset.id,
commit_hash: commit.clone(),
},
)
.await
{
Ok(_) => triggered += 1,
Err(fc_common::CiError::Conflict(_)) => {} // already exists
Err(e) => tracing::warn!("Failed to create evaluation: {e}"),
}
match repo::evaluations::create(&state.pool, CreateEvaluation {
jobset_id: jobset.id,
commit_hash: commit.clone(),
})
.await
{
Ok(_) => triggered += 1,
Err(fc_common::CiError::Conflict(_)) => {}, // already exists
Err(e) => tracing::warn!("Failed to create evaluation: {e}"),
}
}
Ok((
StatusCode::OK,
Json(WebhookResponse {
accepted: true,
message: format!("Triggered {triggered} evaluations for commit {commit}"),
}),
))
Ok((
StatusCode::OK,
Json(WebhookResponse {
accepted: true,
message: format!(
"Triggered {triggered} evaluations for commit {commit}"
),
}),
))
}
async fn handle_gitea_push(
State(state): State<AppState>,
Path(project_id): Path<Uuid>,
headers: HeaderMap,
body: Bytes,
State(state): State<AppState>,
Path(project_id): Path<Uuid>,
headers: HeaderMap,
body: Bytes,
) -> Result<(StatusCode, Json<WebhookResponse>), ApiError> {
// Check webhook config exists
let forge_type = if headers.get("x-forgejo-event").is_some() {
// Check webhook config exists
let forge_type = if headers.get("x-forgejo-event").is_some() {
"forgejo"
} else {
"gitea"
};
let webhook_config = repo::webhook_configs::get_by_project_and_forge(
&state.pool,
project_id,
forge_type,
)
.await
.map_err(ApiError)?;
// Fall back to the other type if not found
let webhook_config = match webhook_config {
Some(c) => c,
None => {
let alt = if forge_type == "gitea" {
"forgejo"
} else {
} else {
"gitea"
};
let webhook_config =
repo::webhook_configs::get_by_project_and_forge(&state.pool, project_id, forge_type)
.await
.map_err(ApiError)?;
// Fall back to the other type if not found
let webhook_config = match webhook_config {
};
match repo::webhook_configs::get_by_project_and_forge(
&state.pool,
project_id,
alt,
)
.await
.map_err(ApiError)?
{
Some(c) => c,
None => {
let alt = if forge_type == "gitea" {
"forgejo"
} else {
"gitea"
};
match repo::webhook_configs::get_by_project_and_forge(&state.pool, project_id, alt)
.await
.map_err(ApiError)?
{
Some(c) => c,
None => {
return Ok((
StatusCode::NOT_FOUND,
Json(WebhookResponse {
accepted: false,
message: "No Gitea/Forgejo webhook configured for this project"
.to_string(),
}),
));
}
}
}
};
return Ok((
StatusCode::NOT_FOUND,
Json(WebhookResponse {
accepted: false,
message: "No Gitea/Forgejo webhook configured for this project"
.to_string(),
}),
));
},
}
},
};
// Verify signature if configured
if let Some(ref secret_hash) = webhook_config.secret_hash {
let signature = headers
.get("x-gitea-signature")
.or_else(|| headers.get("x-forgejo-signature"))
.and_then(|v| v.to_str().ok())
.unwrap_or("");
// Verify signature if configured
if let Some(ref secret_hash) = webhook_config.secret_hash {
let signature = headers
.get("x-gitea-signature")
.or_else(|| headers.get("x-forgejo-signature"))
.and_then(|v| v.to_str().ok())
.unwrap_or("");
if !verify_signature(secret_hash, &body, signature) {
return Ok((
StatusCode::UNAUTHORIZED,
Json(WebhookResponse {
accepted: false,
message: "Invalid webhook signature".to_string(),
}),
));
}
if !verify_signature(secret_hash, &body, signature) {
return Ok((
StatusCode::UNAUTHORIZED,
Json(WebhookResponse {
accepted: false,
message: "Invalid webhook signature".to_string(),
}),
));
}
}
let payload: GiteaPushPayload = serde_json::from_slice(&body).map_err(|e| {
ApiError(fc_common::CiError::Validation(format!(
"Invalid payload: {e}"
)))
let payload: GiteaPushPayload =
serde_json::from_slice(&body).map_err(|e| {
ApiError(fc_common::CiError::Validation(format!(
"Invalid payload: {e}"
)))
})?;
let commit = payload.after.unwrap_or_default();
if commit.is_empty() || commit == "0000000000000000000000000000000000000000" {
return Ok((
StatusCode::OK,
Json(WebhookResponse {
accepted: true,
message: "Branch deletion event, skipping".to_string(),
}),
));
let commit = payload.after.unwrap_or_default();
if commit.is_empty() || commit == "0000000000000000000000000000000000000000" {
return Ok((
StatusCode::OK,
Json(WebhookResponse {
accepted: true,
message: "Branch deletion event, skipping".to_string(),
}),
));
}
let jobsets =
repo::jobsets::list_for_project(&state.pool, project_id, 1000, 0)
.await
.map_err(ApiError)?;
let mut triggered = 0;
for jobset in &jobsets {
if !jobset.enabled {
continue;
}
let jobsets = repo::jobsets::list_for_project(&state.pool, project_id, 1000, 0)
.await
.map_err(ApiError)?;
let mut triggered = 0;
for jobset in &jobsets {
if !jobset.enabled {
continue;
}
match repo::evaluations::create(
&state.pool,
CreateEvaluation {
jobset_id: jobset.id,
commit_hash: commit.clone(),
},
)
.await
{
Ok(_) => triggered += 1,
Err(fc_common::CiError::Conflict(_)) => {}
Err(e) => tracing::warn!("Failed to create evaluation: {e}"),
}
match repo::evaluations::create(&state.pool, CreateEvaluation {
jobset_id: jobset.id,
commit_hash: commit.clone(),
})
.await
{
Ok(_) => triggered += 1,
Err(fc_common::CiError::Conflict(_)) => {},
Err(e) => tracing::warn!("Failed to create evaluation: {e}"),
}
}
Ok((
StatusCode::OK,
Json(WebhookResponse {
accepted: true,
message: format!("Triggered {triggered} evaluations for commit {commit}"),
}),
))
Ok((
StatusCode::OK,
Json(WebhookResponse {
accepted: true,
message: format!(
"Triggered {triggered} evaluations for commit {commit}"
),
}),
))
}
pub fn router() -> Router<AppState> {
Router::new()
.route(
"/api/v1/webhooks/{project_id}/github",
post(handle_github_push),
)
.route(
"/api/v1/webhooks/{project_id}/gitea",
post(handle_gitea_push),
)
.route(
"/api/v1/webhooks/{project_id}/forgejo",
post(handle_gitea_push),
)
Router::new()
.route(
"/api/v1/webhooks/{project_id}/github",
post(handle_github_push),
)
.route(
"/api/v1/webhooks/{project_id}/gitea",
post(handle_gitea_push),
)
.route(
"/api/v1/webhooks/{project_id}/forgejo",
post(handle_gitea_push),
)
}

View file

@ -1,19 +1,17 @@
use std::sync::Arc;
use std::time::Instant;
use std::{sync::Arc, time::Instant};
use dashmap::DashMap;
use fc_common::config::Config;
use fc_common::models::ApiKey;
use fc_common::{config::Config, models::ApiKey};
use sqlx::PgPool;
pub struct SessionData {
pub api_key: ApiKey,
pub created_at: Instant,
pub api_key: ApiKey,
pub created_at: Instant,
}
#[derive(Clone)]
pub struct AppState {
pub pool: PgPool,
pub config: Config,
pub sessions: Arc<DashMap<String, SessionData>>,
pub pool: PgPool,
pub config: Config,
pub sessions: Arc<DashMap<String, SessionData>>,
}

File diff suppressed because it is too large Load diff

View file

@ -4,333 +4,331 @@
//!
//! Nix-dependent steps are skipped if nix is not available.
use axum::body::Body;
use axum::http::{Request, StatusCode};
use axum::{
body::Body,
http::{Request, StatusCode},
};
use fc_common::models::*;
use tower::ServiceExt;
async fn get_pool() -> Option<sqlx::PgPool> {
let url = match std::env::var("TEST_DATABASE_URL") {
Ok(url) => url,
Err(_) => {
println!("Skipping E2E test: TEST_DATABASE_URL not set");
return None;
}
};
let url = match std::env::var("TEST_DATABASE_URL") {
Ok(url) => url,
Err(_) => {
println!("Skipping E2E test: TEST_DATABASE_URL not set");
return None;
},
};
let pool = sqlx::postgres::PgPoolOptions::new()
.max_connections(5)
.connect(&url)
.await
.ok()?;
let pool = sqlx::postgres::PgPoolOptions::new()
.max_connections(5)
.connect(&url)
.await
.ok()?;
sqlx::migrate!("../common/migrations")
.run(&pool)
.await
.ok()?;
sqlx::migrate!("../common/migrations")
.run(&pool)
.await
.ok()?;
Some(pool)
Some(pool)
}
#[tokio::test]
async fn test_e2e_project_eval_build_flow() {
let pool = match get_pool().await {
Some(p) => p,
None => return,
};
let pool = match get_pool().await {
Some(p) => p,
None => return,
};
// 1. Create a project
let project_name = format!("e2e-test-{}", uuid::Uuid::new_v4());
let project = fc_common::repo::projects::create(
&pool,
CreateProject {
name: project_name.clone(),
description: Some("E2E test project".to_string()),
repository_url: "https://github.com/test/e2e".to_string(),
},
// 1. Create a project
let project_name = format!("e2e-test-{}", uuid::Uuid::new_v4());
let project = fc_common::repo::projects::create(&pool, CreateProject {
name: project_name.clone(),
description: Some("E2E test project".to_string()),
repository_url: "https://github.com/test/e2e".to_string(),
})
.await
.expect("create project");
assert_eq!(project.name, project_name);
// 2. Create a jobset
let jobset = fc_common::repo::jobsets::create(&pool, CreateJobset {
project_id: project.id,
name: "default".to_string(),
nix_expression: "packages".to_string(),
enabled: Some(true),
flake_mode: Some(true),
check_interval: Some(300),
branch: None,
scheduling_shares: None,
})
.await
.expect("create jobset");
assert_eq!(jobset.project_id, project.id);
assert!(jobset.enabled);
// 3. Verify active jobsets include our new one
let active = fc_common::repo::jobsets::list_active(&pool)
.await
.expect("list active");
assert!(
active.iter().any(|j| j.id == jobset.id),
"new jobset should be in active list"
);
// 4. Create an evaluation
let eval = fc_common::repo::evaluations::create(&pool, CreateEvaluation {
jobset_id: jobset.id,
commit_hash: "e2e0000000000000000000000000000000000000".to_string(),
})
.await
.expect("create evaluation");
assert_eq!(eval.jobset_id, jobset.id);
assert_eq!(eval.status, EvaluationStatus::Pending);
// 5. Mark evaluation as running
fc_common::repo::evaluations::update_status(
&pool,
eval.id,
EvaluationStatus::Running,
None,
)
.await
.expect("update eval status");
// 6. Create builds as if nix evaluation found jobs
let build1 = fc_common::repo::builds::create(&pool, CreateBuild {
evaluation_id: eval.id,
job_name: "hello".to_string(),
drv_path: "/nix/store/e2e000-hello.drv".to_string(),
system: Some("x86_64-linux".to_string()),
outputs: Some(serde_json::json!({"out": "/nix/store/e2e000-hello"})),
is_aggregate: Some(false),
constituents: None,
})
.await
.expect("create build 1");
let build2 = fc_common::repo::builds::create(&pool, CreateBuild {
evaluation_id: eval.id,
job_name: "world".to_string(),
drv_path: "/nix/store/e2e000-world.drv".to_string(),
system: Some("x86_64-linux".to_string()),
outputs: Some(serde_json::json!({"out": "/nix/store/e2e000-world"})),
is_aggregate: Some(false),
constituents: None,
})
.await
.expect("create build 2");
assert_eq!(build1.status, BuildStatus::Pending);
assert_eq!(build2.status, BuildStatus::Pending);
// 7. Create build dependency (hello depends on world)
fc_common::repo::build_dependencies::create(&pool, build1.id, build2.id)
.await
.expect("create dependency");
// 8. Verify dependency check: build1 deps NOT complete (world is still
// pending)
let deps_complete =
fc_common::repo::build_dependencies::all_deps_completed(&pool, build1.id)
.await
.expect("check deps");
assert!(!deps_complete, "deps should NOT be complete yet");
// 9. Complete build2 (world)
fc_common::repo::builds::start(&pool, build2.id)
.await
.expect("start build2");
fc_common::repo::builds::complete(
&pool,
build2.id,
BuildStatus::Completed,
None,
Some("/nix/store/e2e000-world"),
None,
)
.await
.expect("complete build2");
// 10. Now build1 deps should be complete
let deps_complete =
fc_common::repo::build_dependencies::all_deps_completed(&pool, build1.id)
.await
.expect("check deps again");
assert!(deps_complete, "deps should be complete after build2 done");
// 11. Complete build1 (hello)
fc_common::repo::builds::start(&pool, build1.id)
.await
.expect("start build1");
let step = fc_common::repo::build_steps::create(&pool, CreateBuildStep {
build_id: build1.id,
step_number: 1,
command: "nix build /nix/store/e2e000-hello.drv".to_string(),
})
.await
.expect("create step");
fc_common::repo::build_steps::complete(
&pool,
step.id,
0,
Some("built!"),
None,
)
.await
.expect("complete step");
fc_common::repo::build_products::create(&pool, CreateBuildProduct {
build_id: build1.id,
name: "out".to_string(),
path: "/nix/store/e2e000-hello".to_string(),
sha256_hash: Some("abcdef1234567890".to_string()),
file_size: Some(12345),
content_type: None,
is_directory: true,
})
.await
.expect("create product");
fc_common::repo::builds::complete(
&pool,
build1.id,
BuildStatus::Completed,
None,
Some("/nix/store/e2e000-hello"),
None,
)
.await
.expect("complete build1");
// 12. Mark evaluation as completed
fc_common::repo::evaluations::update_status(
&pool,
eval.id,
EvaluationStatus::Completed,
None,
)
.await
.expect("complete eval");
// 13. Verify everything is in the expected state
let final_eval = fc_common::repo::evaluations::get(&pool, eval.id)
.await
.expect("get eval");
assert_eq!(final_eval.status, EvaluationStatus::Completed);
let final_build1 = fc_common::repo::builds::get(&pool, build1.id)
.await
.expect("get build1");
assert_eq!(final_build1.status, BuildStatus::Completed);
assert_eq!(
final_build1.build_output_path.as_deref(),
Some("/nix/store/e2e000-hello")
);
let products =
fc_common::repo::build_products::list_for_build(&pool, build1.id)
.await
.expect("list products");
assert_eq!(products.len(), 1);
assert_eq!(products[0].name, "out");
let steps = fc_common::repo::build_steps::list_for_build(&pool, build1.id)
.await
.expect("list steps");
assert_eq!(steps.len(), 1);
assert_eq!(steps[0].exit_code, Some(0));
// 14. Verify build stats reflect our changes
let stats = fc_common::repo::builds::get_stats(&pool)
.await
.expect("get stats");
assert!(stats.completed_builds.unwrap_or(0) >= 2);
// 15. Create a channel and verify it works
let channel = fc_common::repo::channels::create(&pool, CreateChannel {
project_id: project.id,
name: "stable".to_string(),
jobset_id: jobset.id,
})
.await
.expect("create channel");
let channels = fc_common::repo::channels::list_all(&pool)
.await
.expect("list channels");
assert!(channels.iter().any(|c| c.id == channel.id));
// 16. Test the HTTP API layer
let config = fc_common::config::Config::default();
let server_config = config.server.clone();
let state = fc_server::state::AppState {
pool: pool.clone(),
config,
sessions: std::sync::Arc::new(dashmap::DashMap::new()),
};
let app = fc_server::routes::router(state, &server_config);
// GET /health
let resp = app
.clone()
.oneshot(
Request::builder()
.uri("/health")
.body(Body::empty())
.unwrap(),
)
.await
.expect("create project");
.unwrap();
assert_eq!(resp.status(), StatusCode::OK);
assert_eq!(project.name, project_name);
// 2. Create a jobset
let jobset = fc_common::repo::jobsets::create(
&pool,
CreateJobset {
project_id: project.id,
name: "default".to_string(),
nix_expression: "packages".to_string(),
enabled: Some(true),
flake_mode: Some(true),
check_interval: Some(300),
branch: None,
scheduling_shares: None,
},
// GET /api/v1/projects/{id}
let resp = app
.clone()
.oneshot(
Request::builder()
.uri(format!("/api/v1/projects/{}", project.id))
.body(Body::empty())
.unwrap(),
)
.await
.expect("create jobset");
.unwrap();
assert_eq!(resp.status(), StatusCode::OK);
assert_eq!(jobset.project_id, project.id);
assert!(jobset.enabled);
// 3. Verify active jobsets include our new one
let active = fc_common::repo::jobsets::list_active(&pool)
.await
.expect("list active");
assert!(
active.iter().any(|j| j.id == jobset.id),
"new jobset should be in active list"
);
// 4. Create an evaluation
let eval = fc_common::repo::evaluations::create(
&pool,
CreateEvaluation {
jobset_id: jobset.id,
commit_hash: "e2e0000000000000000000000000000000000000".to_string(),
},
// GET /api/v1/builds/{id}
let resp = app
.clone()
.oneshot(
Request::builder()
.uri(format!("/api/v1/builds/{}", build1.id))
.body(Body::empty())
.unwrap(),
)
.await
.expect("create evaluation");
.unwrap();
assert_eq!(resp.status(), StatusCode::OK);
assert_eq!(eval.jobset_id, jobset.id);
assert_eq!(eval.status, EvaluationStatus::Pending);
// 5. Mark evaluation as running
fc_common::repo::evaluations::update_status(&pool, eval.id, EvaluationStatus::Running, None)
.await
.expect("update eval status");
// 6. Create builds as if nix evaluation found jobs
let build1 = fc_common::repo::builds::create(
&pool,
CreateBuild {
evaluation_id: eval.id,
job_name: "hello".to_string(),
drv_path: "/nix/store/e2e000-hello.drv".to_string(),
system: Some("x86_64-linux".to_string()),
outputs: Some(serde_json::json!({"out": "/nix/store/e2e000-hello"})),
is_aggregate: Some(false),
constituents: None,
},
)
// GET / (dashboard)
let resp = app
.clone()
.oneshot(Request::builder().uri("/").body(Body::empty()).unwrap())
.await
.expect("create build 1");
let build2 = fc_common::repo::builds::create(
&pool,
CreateBuild {
evaluation_id: eval.id,
job_name: "world".to_string(),
drv_path: "/nix/store/e2e000-world.drv".to_string(),
system: Some("x86_64-linux".to_string()),
outputs: Some(serde_json::json!({"out": "/nix/store/e2e000-world"})),
is_aggregate: Some(false),
constituents: None,
},
)
.unwrap();
assert_eq!(resp.status(), StatusCode::OK);
let body = axum::body::to_bytes(resp.into_body(), usize::MAX)
.await
.expect("create build 2");
.unwrap();
let body_str = String::from_utf8(body.to_vec()).unwrap();
assert!(body_str.contains("Dashboard"));
assert_eq!(build1.status, BuildStatus::Pending);
assert_eq!(build2.status, BuildStatus::Pending);
// 7. Create build dependency (hello depends on world)
fc_common::repo::build_dependencies::create(&pool, build1.id, build2.id)
.await
.expect("create dependency");
// 8. Verify dependency check: build1 deps NOT complete (world is still pending)
let deps_complete = fc_common::repo::build_dependencies::all_deps_completed(&pool, build1.id)
.await
.expect("check deps");
assert!(!deps_complete, "deps should NOT be complete yet");
// 9. Complete build2 (world)
fc_common::repo::builds::start(&pool, build2.id)
.await
.expect("start build2");
fc_common::repo::builds::complete(
&pool,
build2.id,
BuildStatus::Completed,
None,
Some("/nix/store/e2e000-world"),
None,
)
.await
.expect("complete build2");
// 10. Now build1 deps should be complete
let deps_complete = fc_common::repo::build_dependencies::all_deps_completed(&pool, build1.id)
.await
.expect("check deps again");
assert!(deps_complete, "deps should be complete after build2 done");
// 11. Complete build1 (hello)
fc_common::repo::builds::start(&pool, build1.id)
.await
.expect("start build1");
let step = fc_common::repo::build_steps::create(
&pool,
CreateBuildStep {
build_id: build1.id,
step_number: 1,
command: "nix build /nix/store/e2e000-hello.drv".to_string(),
},
)
.await
.expect("create step");
fc_common::repo::build_steps::complete(&pool, step.id, 0, Some("built!"), None)
.await
.expect("complete step");
fc_common::repo::build_products::create(
&pool,
CreateBuildProduct {
build_id: build1.id,
name: "out".to_string(),
path: "/nix/store/e2e000-hello".to_string(),
sha256_hash: Some("abcdef1234567890".to_string()),
file_size: Some(12345),
content_type: None,
is_directory: true,
},
)
.await
.expect("create product");
fc_common::repo::builds::complete(
&pool,
build1.id,
BuildStatus::Completed,
None,
Some("/nix/store/e2e000-hello"),
None,
)
.await
.expect("complete build1");
// 12. Mark evaluation as completed
fc_common::repo::evaluations::update_status(&pool, eval.id, EvaluationStatus::Completed, None)
.await
.expect("complete eval");
// 13. Verify everything is in the expected state
let final_eval = fc_common::repo::evaluations::get(&pool, eval.id)
.await
.expect("get eval");
assert_eq!(final_eval.status, EvaluationStatus::Completed);
let final_build1 = fc_common::repo::builds::get(&pool, build1.id)
.await
.expect("get build1");
assert_eq!(final_build1.status, BuildStatus::Completed);
assert_eq!(
final_build1.build_output_path.as_deref(),
Some("/nix/store/e2e000-hello")
);
let products = fc_common::repo::build_products::list_for_build(&pool, build1.id)
.await
.expect("list products");
assert_eq!(products.len(), 1);
assert_eq!(products[0].name, "out");
let steps = fc_common::repo::build_steps::list_for_build(&pool, build1.id)
.await
.expect("list steps");
assert_eq!(steps.len(), 1);
assert_eq!(steps[0].exit_code, Some(0));
// 14. Verify build stats reflect our changes
let stats = fc_common::repo::builds::get_stats(&pool)
.await
.expect("get stats");
assert!(stats.completed_builds.unwrap_or(0) >= 2);
// 15. Create a channel and verify it works
let channel = fc_common::repo::channels::create(
&pool,
CreateChannel {
project_id: project.id,
name: "stable".to_string(),
jobset_id: jobset.id,
},
)
.await
.expect("create channel");
let channels = fc_common::repo::channels::list_all(&pool)
.await
.expect("list channels");
assert!(channels.iter().any(|c| c.id == channel.id));
// 16. Test the HTTP API layer
let config = fc_common::config::Config::default();
let server_config = config.server.clone();
let state = fc_server::state::AppState {
pool: pool.clone(),
config,
sessions: std::sync::Arc::new(dashmap::DashMap::new()),
};
let app = fc_server::routes::router(state, &server_config);
// GET /health
let resp = app
.clone()
.oneshot(
Request::builder()
.uri("/health")
.body(Body::empty())
.unwrap(),
)
.await
.unwrap();
assert_eq!(resp.status(), StatusCode::OK);
// GET /api/v1/projects/{id}
let resp = app
.clone()
.oneshot(
Request::builder()
.uri(format!("/api/v1/projects/{}", project.id))
.body(Body::empty())
.unwrap(),
)
.await
.unwrap();
assert_eq!(resp.status(), StatusCode::OK);
// GET /api/v1/builds/{id}
let resp = app
.clone()
.oneshot(
Request::builder()
.uri(format!("/api/v1/builds/{}", build1.id))
.body(Body::empty())
.unwrap(),
)
.await
.unwrap();
assert_eq!(resp.status(), StatusCode::OK);
// GET / (dashboard)
let resp = app
.clone()
.oneshot(Request::builder().uri("/").body(Body::empty()).unwrap())
.await
.unwrap();
assert_eq!(resp.status(), StatusCode::OK);
let body = axum::body::to_bytes(resp.into_body(), usize::MAX)
.await
.unwrap();
let body_str = String::from_utf8(body.to_vec()).unwrap();
assert!(body_str.contains("Dashboard"));
// Clean up
let _ = fc_common::repo::projects::delete(&pool, project.id).await;
// Clean up
let _ = fc_common::repo::projects::delete(&pool, project.id).await;
}

32
fc.toml
View file

@ -2,30 +2,30 @@
# This file contains default configuration for all FC CI components
[database]
url = "postgresql://fc_ci:password@localhost/fc_ci"
max_connections = 20
min_connections = 5
connect_timeout = 30
idle_timeout = 600
max_lifetime = 1800
idle_timeout = 600
max_connections = 20
max_lifetime = 1800
min_connections = 5
url = "postgresql://fc_ci:password@localhost/fc_ci"
[server]
host = "127.0.0.1"
port = 3000
allowed_origins = [ ]
host = "127.0.0.1"
max_body_size = 10485760 # 10MB
port = 3000
request_timeout = 30
max_body_size = 10485760 # 10MB
allowed_origins = []
[evaluator]
allow_ifd = false
git_timeout = 600
nix_timeout = 1800
poll_interval = 60
git_timeout = 600
nix_timeout = 1800
work_dir = "/tmp/fc-evaluator"
restrict_eval = true
allow_ifd = false
work_dir = "/tmp/fc-evaluator"
[queue_runner]
workers = 4
poll_interval = 5
build_timeout = 3600
work_dir = "/tmp/fc-queue-runner"
poll_interval = 5
work_dir = "/tmp/fc-queue-runner"
workers = 4