meta: add database migrations; set up migration CLI

Signed-off-by: NotAShelf <raf@notashelf.dev>
Change-Id: I0cbc9798243134d36f788099ecc3ee5a6a6a6964
This commit is contained in:
raf 2025-11-02 21:04:11 +03:00
commit a4c3cd1517
Signed by: NotAShelf
GPG key ID: 29D95B64378DB4BF
9 changed files with 366 additions and 0 deletions

3
Cargo.lock generated
View file

@ -532,11 +532,14 @@ version = "0.1.0"
dependencies = [ dependencies = [
"anyhow", "anyhow",
"chrono", "chrono",
"clap",
"git2", "git2",
"serde", "serde",
"serde_json", "serde_json",
"sqlx", "sqlx",
"thiserror", "thiserror",
"tracing",
"tracing-subscriber",
"uuid", "uuid",
] ]

View file

@ -15,3 +15,6 @@ chrono.workspace = true
anyhow.workspace = true anyhow.workspace = true
thiserror.workspace = true thiserror.workspace = true
git2.workspace = true git2.workspace = true
tracing.workspace = true
tracing-subscriber.workspace = true
clap.workspace = true

View file

@ -0,0 +1,151 @@
-- Initial schema for FC
-- Creates all core tables for the CI system
-- Enable UUID extension for UUID generation
CREATE EXTENSION IF NOT EXISTS "uuid-ossp";
-- Projects: stores repository configurations
CREATE TABLE projects (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
name VARCHAR(255) NOT NULL UNIQUE,
description TEXT,
repository_url TEXT NOT NULL,
created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
updated_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW()
);
-- Jobsets: Contains build configurations for each project
CREATE TABLE jobsets (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
project_id UUID NOT NULL REFERENCES projects(id) ON DELETE CASCADE,
name VARCHAR(255) NOT NULL,
nix_expression TEXT NOT NULL,
enabled BOOLEAN NOT NULL DEFAULT true,
created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
updated_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
UNIQUE(project_id, name)
);
-- Evaluations: Tracks Nix evaluation results for each jobset
CREATE TABLE evaluations (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
jobset_id UUID NOT NULL REFERENCES jobsets(id) ON DELETE CASCADE,
commit_hash VARCHAR(40) NOT NULL,
evaluation_time TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
status TEXT NOT NULL CHECK (status IN ('pending', 'running', 'completed', 'failed')),
error_message TEXT,
UNIQUE(jobset_id, commit_hash)
);
-- Builds: Individual build jobs with their status
CREATE TABLE builds (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
evaluation_id UUID NOT NULL REFERENCES evaluations(id) ON DELETE CASCADE,
job_name VARCHAR(255) NOT NULL,
drv_path TEXT NOT NULL,
status TEXT NOT NULL CHECK (status IN ('pending', 'running', 'completed', 'failed', 'cancelled')),
started_at TIMESTAMP WITH TIME ZONE,
completed_at TIMESTAMP WITH TIME ZONE,
log_path TEXT,
build_output_path TEXT,
error_message TEXT,
created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
UNIQUE(evaluation_id, job_name)
);
-- Build products: Stores output artifacts and metadata
CREATE TABLE build_products (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
build_id UUID NOT NULL REFERENCES builds(id) ON DELETE CASCADE,
name VARCHAR(255) NOT NULL,
path TEXT NOT NULL,
sha256_hash VARCHAR(64),
file_size BIGINT,
content_type VARCHAR(100),
is_directory BOOLEAN NOT NULL DEFAULT false,
created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW()
);
-- Build steps: Detailed build execution logs and timing
CREATE TABLE build_steps (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
build_id UUID NOT NULL REFERENCES builds(id) ON DELETE CASCADE,
step_number INTEGER NOT NULL,
command TEXT NOT NULL,
output TEXT,
error_output TEXT,
started_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
completed_at TIMESTAMP WITH TIME ZONE,
exit_code INTEGER,
UNIQUE(build_id, step_number)
);
-- Projects indexes
CREATE INDEX idx_projects_name ON projects(name);
CREATE INDEX idx_projects_created_at ON projects(created_at);
-- Jobsets indexes
CREATE INDEX idx_jobsets_project_id ON jobsets(project_id);
CREATE INDEX idx_jobsets_enabled ON jobsets(enabled);
CREATE INDEX idx_jobsets_name ON jobsets(name);
-- Evaluations indexes
CREATE INDEX idx_evaluations_jobset_id ON evaluations(jobset_id);
CREATE INDEX idx_evaluations_commit_hash ON evaluations(commit_hash);
CREATE INDEX idx_evaluations_status ON evaluations(status);
CREATE INDEX idx_evaluations_evaluation_time ON evaluations(evaluation_time);
-- Builds indexes
CREATE INDEX idx_builds_evaluation_id ON builds(evaluation_id);
CREATE INDEX idx_builds_status ON builds(status);
CREATE INDEX idx_builds_job_name ON builds(job_name);
CREATE INDEX idx_builds_started_at ON builds(started_at);
CREATE INDEX idx_builds_completed_at ON builds(completed_at);
-- Build products indexes
CREATE INDEX idx_build_products_build_id ON build_products(build_id);
CREATE INDEX idx_build_products_name ON build_products(name);
-- Build steps indexes
CREATE INDEX idx_build_steps_build_id ON build_steps(build_id);
CREATE INDEX idx_build_steps_started_at ON build_steps(started_at);
-- Create trigger functions for updated_at timestamps
CREATE OR REPLACE FUNCTION update_updated_at_column()
RETURNS TRIGGER AS $$
BEGIN
NEW.updated_at = NOW();
RETURN NEW;
END;
$$ language 'plpgsql';
-- Create triggers for automatic updated_at updates
CREATE TRIGGER update_projects_updated_at
BEFORE UPDATE ON projects
FOR EACH ROW EXECUTE FUNCTION update_updated_at_column();
CREATE TRIGGER update_jobsets_updated_at
BEFORE UPDATE ON jobsets
FOR EACH ROW EXECUTE FUNCTION update_updated_at_column();
-- Create view for active jobsets (jobsets that are enabled and belong to active projects)
CREATE VIEW active_jobsets AS
SELECT
j.*,
p.name as project_name,
p.repository_url
FROM jobsets j
JOIN projects p ON j.project_id = p.id
WHERE j.enabled = true;
-- Create view for build statistics
CREATE VIEW build_stats AS
SELECT
COUNT(*) as total_builds,
COUNT(CASE WHEN status = 'completed' THEN 1 END) as completed_builds,
COUNT(CASE WHEN status = 'failed' THEN 1 END) as failed_builds,
COUNT(CASE WHEN status = 'running' THEN 1 END) as running_builds,
COUNT(CASE WHEN status = 'pending' THEN 1 END) as pending_builds,
AVG(EXTRACT(EPOCH FROM (completed_at - started_at))) as avg_duration_seconds
FROM builds
WHERE started_at IS NOT NULL;

View file

@ -0,0 +1,26 @@
# Database Migrations
This directory contains SQL migrations for the FC database.
## Migration Files
- `001_initial_schema.sql`: Creates the core database schema including projects,
jobsets, evaluations, builds, and related tables.
## Running Migrations
The easiest way to run migrations is to use the vendored CLI, `fc-migrate`.
Packagers should vendor this crate if possible.
```bash
# Run all pending migrations
fc-migrate up postgresql://user:password@localhost/fc_ci
# Validate current schema
fc-migrate validate postgresql://user:password@localhost/fc_ci
# Create a new migration
fc-migrate create migration_name
```
TODO: add or generate schema overviews

View file

@ -1,7 +1,10 @@
//! Common types and utilities for CI //! Common types and utilities for CI
pub mod error; pub mod error;
pub mod migrate;
pub mod migrate_cli;
pub mod models; pub mod models;
pub use error::*; pub use error::*;
pub use migrate::*;
pub use models::*; pub use models::*;

View file

@ -0,0 +1,69 @@
//! Database migration utilities
use sqlx::{PgPool, Postgres, migrate::MigrateDatabase};
use tracing::{error, info, warn};
/// Runs database migrations and ensures the database exists
pub async fn run_migrations(database_url: &str) -> anyhow::Result<()> {
info!("Starting database migrations");
// Check if database exists, create if it doesn't
if !Postgres::database_exists(database_url).await? {
warn!("Database does not exist, creating it");
Postgres::create_database(database_url).await?;
info!("Database created successfully");
}
// Set up connection pool with retry logic, then run migrations
let pool = create_connection_pool(database_url).await?;
match sqlx::migrate!("./migrations").run(&pool).await {
Ok(()) => {
info!("Database migrations completed successfully");
Ok(())
}
Err(e) => {
error!("Failed to run database migrations: {}", e);
Err(anyhow::anyhow!("Migration failed: {e}"))
}
}
}
/// Creates a connection pool with proper configuration
async fn create_connection_pool(database_url: &str) -> anyhow::Result<PgPool> {
let pool = PgPool::connect(database_url).await?;
// Test the connection
sqlx::query("SELECT 1").fetch_one(&pool).await?;
Ok(pool)
}
/// Validates that all required tables exist and have the expected structure
pub async fn validate_schema(pool: &PgPool) -> anyhow::Result<()> {
info!("Validating database schema");
let required_tables = vec![
"projects",
"jobsets",
"evaluations",
"builds",
"build_products",
"build_steps",
];
for table in required_tables {
let result = sqlx::query_scalar::<_, i64>(
"SELECT COUNT(*) FROM information_schema.tables WHERE table_name = $1",
)
.bind(table)
.fetch_one(pool)
.await?;
if result == 0 {
return Err(anyhow::anyhow!("Required table '{table}' does not exist"));
}
}
info!("Database schema validation passed");
Ok(())
}

View file

@ -0,0 +1,85 @@
//! CLI utility for database migrations
use clap::{Parser, Subcommand};
use tracing::info;
use tracing_subscriber::fmt::init;
#[derive(Parser)]
#[command(name = "fc-migrate")]
#[command(about = "Database migration utility for FC CI")]
pub struct Cli {
#[command(subcommand)]
pub command: Commands,
}
#[derive(Subcommand)]
pub enum Commands {
/// Run all pending migrations
Up {
/// Database connection URL
database_url: String,
},
/// Validate the current schema
Validate {
/// Database connection URL
database_url: String,
},
/// Create a new migration file
Create {
/// Migration name
#[arg(required = true)]
name: String,
},
}
pub async fn run() -> anyhow::Result<()> {
let cli = Cli::parse();
// Initialize logging
init();
match cli.command {
Commands::Up { database_url } => {
info!("Running database migrations");
crate::run_migrations(&database_url).await?;
info!("Migrations completed successfully");
}
Commands::Validate { database_url } => {
info!("Validating database schema");
let pool = sqlx::PgPool::connect(&database_url).await?;
crate::validate_schema(&pool).await?;
info!("Schema validation passed");
}
Commands::Create { name } => {
create_migration(&name)?;
}
}
Ok(())
}
fn create_migration(name: &str) -> anyhow::Result<()> {
use chrono::Utc;
use std::fs;
let timestamp = Utc::now().format("%Y%m%d_%H%M%S");
let filename = format!("{timestamp}_{name}.sql");
let filepath = format!("crates/common/migrations/{filename}");
let content = format!(
"-- Migration: {}\n\
-- Created: {}\n\
\n\
-- Add your migration SQL here\n\
\n\
-- Uncomment below for rollback SQL\n\
-- ROLLBACK;\n",
name,
Utc::now().to_rfc3339()
);
fs::write(&filepath, content)?;
println!("Created migration file: {filepath}");
Ok(())
}

View file

@ -0,0 +1,18 @@
[package]
name = "fc-migrate-cli"
version.workspace = true
edition.workspace = true
authors.workspace = true
license.workspace = true
repository.workspace = true
[[bin]]
name = "fc-migrate"
path = "src/main.rs"
[dependencies]
fc-common = { path = "../common" }
clap.workspace = true
anyhow.workspace = true
tracing-subscriber.workspace = true
tokio.workspace = true

View file

@ -0,0 +1,8 @@
//! Database migration CLI utility
use fc_common::migrate_cli::run;
#[tokio::main]
async fn main() -> anyhow::Result<()> {
run().await
}