crates/common: add bootstrap, tracing_init, and nix_probe modules

Signed-off-by: NotAShelf <raf@notashelf.dev>
Change-Id: Ifbc17b000a4fb4a10e05ac9405582a366a6a6964
This commit is contained in:
raf 2026-02-02 01:22:40 +03:00
commit be9caa0b61
Signed by: NotAShelf
GPG key ID: 29D95B64378DB4BF
4 changed files with 581 additions and 0 deletions

View file

@ -0,0 +1,95 @@
//! Declarative bootstrap: upsert projects, jobsets, and API keys from config.
//!
//! Called once on server startup to reconcile declarative configuration
//! with database state. Uses upsert semantics so repeated runs are idempotent.
use sha2::{Digest, Sha256};
use sqlx::PgPool;
use crate::config::DeclarativeConfig;
use crate::error::Result;
use crate::models::{CreateJobset, CreateProject};
use crate::repo;
/// Bootstrap declarative configuration into the database.
///
/// This function is idempotent: running it multiple times with the same config
/// produces the same database state. It upserts (insert or update) all
/// configured projects, jobsets, and API keys.
pub async fn run(pool: &PgPool, config: &DeclarativeConfig) -> Result<()> {
if config.projects.is_empty() && config.api_keys.is_empty() {
return Ok(());
}
let n_projects = config.projects.len();
let n_jobsets: usize = config.projects.iter().map(|p| p.jobsets.len()).sum();
let n_keys = config.api_keys.len();
tracing::info!(
projects = n_projects,
jobsets = n_jobsets,
api_keys = n_keys,
"Bootstrapping declarative configuration"
);
// Upsert projects and their jobsets
for decl_project in &config.projects {
let project = repo::projects::upsert(
pool,
CreateProject {
name: decl_project.name.clone(),
repository_url: decl_project.repository_url.clone(),
description: decl_project.description.clone(),
},
)
.await?;
tracing::info!(
project = %project.name,
id = %project.id,
"Upserted declarative project"
);
for decl_jobset in &decl_project.jobsets {
let jobset = repo::jobsets::upsert(
pool,
CreateJobset {
project_id: project.id,
name: decl_jobset.name.clone(),
nix_expression: decl_jobset.nix_expression.clone(),
enabled: Some(decl_jobset.enabled),
flake_mode: Some(decl_jobset.flake_mode),
check_interval: Some(decl_jobset.check_interval),
branch: None,
scheduling_shares: None,
},
)
.await?;
tracing::info!(
project = %project.name,
jobset = %jobset.name,
"Upserted declarative jobset"
);
}
}
// Upsert API keys
for decl_key in &config.api_keys {
let mut hasher = Sha256::new();
hasher.update(decl_key.key.as_bytes());
let key_hash = hex::encode(hasher.finalize());
let api_key =
repo::api_keys::upsert(pool, &decl_key.name, &key_hash, &decl_key.role).await?;
tracing::info!(
name = %api_key.name,
role = %api_key.role,
"Upserted declarative API key"
);
}
tracing::info!("Declarative bootstrap complete");
Ok(())
}

View file

@ -11,6 +11,9 @@ pub mod models;
pub mod notifications; pub mod notifications;
pub mod repo; pub mod repo;
pub mod bootstrap;
pub mod nix_probe;
pub mod tracing_init;
pub mod validate; pub mod validate;
pub use config::*; pub use config::*;
@ -18,4 +21,5 @@ pub use database::*;
pub use error::*; pub use error::*;
pub use migrate::*; pub use migrate::*;
pub use models::*; pub use models::*;
pub use tracing_init::init_tracing;
pub use validate::Validate; pub use validate::Validate;

View file

@ -0,0 +1,431 @@
//! Flake probe: auto-discover what a Nix flake repository provides.
use serde::{Deserialize, Serialize};
use crate::CiError;
use crate::error::Result;
/// Result of probing a flake repository.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct FlakeProbeResult {
pub is_flake: bool,
pub outputs: Vec<FlakeOutput>,
pub suggested_jobsets: Vec<SuggestedJobset>,
pub metadata: FlakeMetadata,
pub error: Option<String>,
}
/// A discovered flake output attribute.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct FlakeOutput {
pub path: String,
pub output_type: String,
pub systems: Vec<String>,
}
/// A suggested jobset configuration based on discovered outputs.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SuggestedJobset {
pub name: String,
pub nix_expression: String,
pub description: String,
pub priority: u8,
}
/// Metadata extracted from the flake.
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
pub struct FlakeMetadata {
pub description: Option<String>,
pub url: Option<String>,
}
/// Maximum output size we'll parse from `nix flake show --json` (10 MB).
const MAX_OUTPUT_SIZE: usize = 10 * 1024 * 1024;
/// Convert a repository URL to a nix flake reference.
///
/// GitHub and GitLab URLs are converted to their native flake ref formats
/// (`github:owner/repo`, `gitlab:owner/repo`). Other HTTPS URLs get a
/// `git+` prefix so nix clones via git rather than trying to unpack an
/// archive. URLs that are already valid flake refs are returned as-is.
fn to_flake_ref(url: &str) -> String {
let url_trimmed = url.trim().trim_end_matches('/');
// Already a flake ref (github:, gitlab:, git+, path:, sourcehut:, etc.)
if url_trimmed.contains(':')
&& !url_trimmed.starts_with("http://")
&& !url_trimmed.starts_with("https://")
{
return url_trimmed.to_string();
}
// Extract host + path from HTTP(S) URLs
let without_scheme = url_trimmed
.strip_prefix("https://")
.or_else(|| url_trimmed.strip_prefix("http://"))
.unwrap_or(url_trimmed);
let without_dotgit = without_scheme.trim_end_matches(".git");
// github.com/owner/repo → github:owner/repo
if let Some(path) = without_dotgit.strip_prefix("github.com/") {
return format!("github:{path}");
}
// gitlab.com/owner/repo → gitlab:owner/repo
if let Some(path) = without_dotgit.strip_prefix("gitlab.com/") {
return format!("gitlab:{path}");
}
// Any other HTTPS/HTTP URL: prefix with git+ so nix clones it
if url_trimmed.starts_with("https://") || url_trimmed.starts_with("http://") {
return format!("git+{url_trimmed}");
}
url_trimmed.to_string()
}
/// Probe a flake repository to discover its outputs and suggest jobsets.
pub async fn probe_flake(repo_url: &str, revision: Option<&str>) -> Result<FlakeProbeResult> {
let base_ref = to_flake_ref(repo_url);
let flake_ref = if let Some(rev) = revision {
format!("{base_ref}?rev={rev}")
} else {
base_ref
};
let output = tokio::time::timeout(std::time::Duration::from_secs(60), async {
tokio::process::Command::new("nix")
.args([
"--extra-experimental-features",
"nix-command flakes",
"flake",
"show",
"--json",
"--no-write-lock-file",
&flake_ref,
])
.output()
.await
})
.await
.map_err(|_| CiError::Timeout("Flake probe timed out after 60s".to_string()))?
.map_err(|e| CiError::NixEval(format!("Failed to run nix flake show: {e}")))?;
if !output.status.success() {
let stderr = String::from_utf8_lossy(&output.stderr);
// Check for common non-flake case
if stderr.contains("does not provide attribute") || stderr.contains("has no 'flake.nix'") {
return Ok(FlakeProbeResult {
is_flake: false,
outputs: Vec::new(),
suggested_jobsets: Vec::new(),
metadata: FlakeMetadata::default(),
error: Some("Repository does not contain a flake.nix".to_string()),
});
}
if stderr.contains("denied")
|| stderr.contains("not accessible")
|| stderr.contains("authentication")
{
return Err(CiError::NixEval(
"Repository not accessible. Check URL and permissions.".to_string(),
));
}
return Err(CiError::NixEval(format!("nix flake show failed: {stderr}")));
}
let stdout = String::from_utf8_lossy(&output.stdout);
if stdout.len() > MAX_OUTPUT_SIZE {
// For huge repos like nixpkgs, we still parse but only top-level
tracing::warn!(
"Flake show output exceeds {}MB, parsing top-level only",
MAX_OUTPUT_SIZE / (1024 * 1024)
);
}
let raw: serde_json::Value = serde_json::from_str(&stdout[..stdout.len().min(MAX_OUTPUT_SIZE)])
.map_err(|e| CiError::NixEval(format!("Failed to parse flake show output: {e}")))?;
let top = match raw.as_object() {
Some(obj) => obj,
None => {
return Err(CiError::NixEval(
"Unexpected flake show output format".to_string(),
));
}
};
let mut outputs = Vec::new();
let mut suggested_jobsets = Vec::new();
// Known output types and their detection
let output_types: &[(&str, &str, &str, u8)] = &[
("hydraJobs", "derivation", "CI Jobs (hydraJobs)", 10),
("checks", "derivation", "Checks", 7),
("packages", "derivation", "Packages", 6),
("devShells", "derivation", "Development Shells", 3),
(
"nixosConfigurations",
"configuration",
"NixOS Configurations",
4,
),
("nixosModules", "module", "NixOS Modules", 2),
("overlays", "overlay", "Overlays", 1),
(
"legacyPackages",
"derivation",
"Legacy Packages (nixpkgs-style)",
5,
),
];
for &(key, output_type, description, priority) in output_types {
if let Some(val) = top.get(key) {
let systems = extract_systems(val);
outputs.push(FlakeOutput {
path: key.to_string(),
output_type: output_type.to_string(),
systems: systems.clone(),
});
// Generate suggested jobset
let nix_expression = match key {
"hydraJobs" => "hydraJobs".to_string(),
"checks" => "checks".to_string(),
"packages" => "packages".to_string(),
"devShells" => "devShells".to_string(),
"legacyPackages" => "legacyPackages".to_string(),
_ => continue, // Don't suggest jobsets for non-buildable outputs
};
suggested_jobsets.push(SuggestedJobset {
name: key.to_string(),
nix_expression,
description: description.to_string(),
priority,
});
}
}
// Sort jobsets by priority (highest first)
suggested_jobsets.sort_by(|a, b| b.priority.cmp(&a.priority));
// Extract metadata from the flake
let metadata = FlakeMetadata {
description: top
.get("description")
.and_then(|v| v.as_str())
.map(|s| s.to_string()),
url: Some(repo_url.to_string()),
};
Ok(FlakeProbeResult {
is_flake: true,
outputs,
suggested_jobsets,
metadata,
error: None,
})
}
/// Extract system names from a flake output value (e.g., `packages.x86_64-linux`).
pub(crate) fn extract_systems(val: &serde_json::Value) -> Vec<String> {
let mut systems = Vec::new();
if let Some(obj) = val.as_object() {
for key in obj.keys() {
// System names follow the pattern `arch-os` (e.g., x86_64-linux, aarch64-darwin)
if key.contains('-') && (key.contains("linux") || key.contains("darwin")) {
systems.push(key.clone());
}
}
}
systems.sort();
systems
}
#[cfg(test)]
mod tests {
use super::*;
use serde_json::json;
#[test]
fn test_extract_systems_typical_flake() {
let val = json!({
"x86_64-linux": { "hello": {} },
"aarch64-linux": { "hello": {} },
"x86_64-darwin": { "hello": {} }
});
let systems = extract_systems(&val);
assert_eq!(
systems,
vec!["aarch64-linux", "x86_64-darwin", "x86_64-linux"]
);
}
#[test]
fn test_extract_systems_empty_object() {
let val = json!({});
assert!(extract_systems(&val).is_empty());
}
#[test]
fn test_extract_systems_non_system_keys_ignored() {
let val = json!({
"x86_64-linux": {},
"default": {},
"lib": {},
"overlay": {}
});
let systems = extract_systems(&val);
assert_eq!(systems, vec!["x86_64-linux"]);
}
#[test]
fn test_extract_systems_non_object_value() {
let val = json!("string");
assert!(extract_systems(&val).is_empty());
let val = json!(null);
assert!(extract_systems(&val).is_empty());
}
#[test]
fn test_flake_probe_result_serialization() {
let result = FlakeProbeResult {
is_flake: true,
outputs: vec![FlakeOutput {
path: "packages".to_string(),
output_type: "derivation".to_string(),
systems: vec!["x86_64-linux".to_string()],
}],
suggested_jobsets: vec![SuggestedJobset {
name: "packages".to_string(),
nix_expression: "packages".to_string(),
description: "Packages".to_string(),
priority: 6,
}],
metadata: FlakeMetadata {
description: Some("A test flake".to_string()),
url: Some("https://github.com/test/repo".to_string()),
},
error: None,
};
let json = serde_json::to_string(&result).unwrap();
let parsed: FlakeProbeResult = serde_json::from_str(&json).unwrap();
assert!(parsed.is_flake);
assert_eq!(parsed.outputs.len(), 1);
assert_eq!(parsed.suggested_jobsets.len(), 1);
assert_eq!(parsed.suggested_jobsets[0].priority, 6);
assert_eq!(parsed.metadata.description.as_deref(), Some("A test flake"));
}
#[test]
fn test_flake_probe_result_not_a_flake() {
let result = FlakeProbeResult {
is_flake: false,
outputs: Vec::new(),
suggested_jobsets: Vec::new(),
metadata: FlakeMetadata::default(),
error: Some("Repository does not contain a flake.nix".to_string()),
};
let json = serde_json::to_string(&result).unwrap();
let parsed: FlakeProbeResult = serde_json::from_str(&json).unwrap();
assert!(!parsed.is_flake);
assert!(parsed.error.is_some());
}
#[test]
fn test_to_flake_ref_github_https() {
assert_eq!(
to_flake_ref("https://github.com/notashelf/rags"),
"github:notashelf/rags"
);
assert_eq!(
to_flake_ref("https://github.com/NixOS/nixpkgs"),
"github:NixOS/nixpkgs"
);
assert_eq!(
to_flake_ref("https://github.com/owner/repo.git"),
"github:owner/repo"
);
assert_eq!(
to_flake_ref("http://github.com/owner/repo"),
"github:owner/repo"
);
assert_eq!(
to_flake_ref("https://github.com/owner/repo/"),
"github:owner/repo"
);
}
#[test]
fn test_to_flake_ref_gitlab_https() {
assert_eq!(
to_flake_ref("https://gitlab.com/owner/repo"),
"gitlab:owner/repo"
);
assert_eq!(
to_flake_ref("https://gitlab.com/group/subgroup/repo.git"),
"gitlab:group/subgroup/repo"
);
}
#[test]
fn test_to_flake_ref_already_flake_ref() {
assert_eq!(to_flake_ref("github:owner/repo"), "github:owner/repo");
assert_eq!(to_flake_ref("gitlab:owner/repo"), "gitlab:owner/repo");
assert_eq!(
to_flake_ref("git+https://example.com/repo.git"),
"git+https://example.com/repo.git"
);
assert_eq!(
to_flake_ref("path:/some/local/path"),
"path:/some/local/path"
);
assert_eq!(to_flake_ref("sourcehut:~user/repo"), "sourcehut:~user/repo");
}
#[test]
fn test_to_flake_ref_other_https() {
assert_eq!(
to_flake_ref("https://codeberg.org/owner/repo"),
"git+https://codeberg.org/owner/repo"
);
assert_eq!(
to_flake_ref("https://sr.ht/~user/repo"),
"git+https://sr.ht/~user/repo"
);
}
#[test]
fn test_suggested_jobset_ordering() {
let mut jobsets = vec![
SuggestedJobset {
name: "packages".to_string(),
nix_expression: "packages".to_string(),
description: "Packages".to_string(),
priority: 6,
},
SuggestedJobset {
name: "hydraJobs".to_string(),
nix_expression: "hydraJobs".to_string(),
description: "CI Jobs".to_string(),
priority: 10,
},
SuggestedJobset {
name: "checks".to_string(),
nix_expression: "checks".to_string(),
description: "Checks".to_string(),
priority: 7,
},
];
jobsets.sort_by(|a, b| b.priority.cmp(&a.priority));
assert_eq!(jobsets[0].name, "hydraJobs");
assert_eq!(jobsets[1].name, "checks");
assert_eq!(jobsets[2].name, "packages");
}
}

View file

@ -0,0 +1,51 @@
//! Tracing initialization helper for all FC daemons.
use tracing_subscriber::EnvFilter;
use tracing_subscriber::fmt;
use crate::config::TracingConfig;
/// Initialize the global tracing subscriber based on configuration.
///
/// Respects `RUST_LOG` environment variable as an override. If `RUST_LOG` is
/// not set, falls back to the configured level.
pub fn init_tracing(config: &TracingConfig) {
let env_filter =
EnvFilter::try_from_default_env().unwrap_or_else(|_| EnvFilter::new(&config.level));
match config.format.as_str() {
"json" => {
let builder = fmt()
.json()
.with_target(config.show_targets)
.with_env_filter(env_filter);
if config.show_timestamps {
builder.init();
} else {
builder.without_time().init();
}
}
"full" => {
let builder = fmt()
.with_target(config.show_targets)
.with_env_filter(env_filter);
if config.show_timestamps {
builder.init();
} else {
builder.without_time().init();
}
}
_ => {
// "compact" or any other value
let builder = fmt()
.compact()
.with_target(config.show_targets)
.with_env_filter(env_filter);
if config.show_timestamps {
builder.init();
} else {
builder.without_time().init();
}
}
}
}