fc-common: consolidate database migrations; simplify

Signed-off-by: NotAShelf <raf@notashelf.dev>
Change-Id: Ia808d76241cec6e8760d87443bb0dc976a6a6964
This commit is contained in:
raf 2026-02-18 18:29:14 +03:00
commit e7425e0abf
Signed by: NotAShelf
GPG key ID: 29D95B64378DB4BF
22 changed files with 656 additions and 671 deletions

View file

@ -0,0 +1,614 @@
-- FC database schema.
-- Full schema definition for the FC CI system.
CREATE EXTENSION IF NOT EXISTS "uuid-ossp";
-- projects: stores repository configurations
CREATE TABLE projects (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4 (),
name VARCHAR(255) NOT NULL UNIQUE,
description TEXT,
repository_url TEXT NOT NULL,
created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
updated_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW()
);
-- users: accounts for authentication and personalization
CREATE TABLE users (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4 (),
username VARCHAR(255) NOT NULL UNIQUE,
email VARCHAR(255) NOT NULL UNIQUE,
full_name VARCHAR(255),
password_hash VARCHAR(255),
user_type VARCHAR(50) NOT NULL DEFAULT 'local',
role VARCHAR(50) NOT NULL DEFAULT 'read-only',
enabled BOOLEAN NOT NULL DEFAULT true,
email_verified BOOLEAN NOT NULL DEFAULT false,
public_dashboard BOOLEAN NOT NULL DEFAULT false,
created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
updated_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
last_login_at TIMESTAMP WITH TIME ZONE
);
-- remote_builders: multi-machine / multi-arch build agents
CREATE TABLE remote_builders (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4 (),
name VARCHAR(255) NOT NULL UNIQUE,
ssh_uri TEXT NOT NULL,
systems TEXT[] NOT NULL DEFAULT '{}',
max_jobs INTEGER NOT NULL DEFAULT 1,
speed_factor INTEGER NOT NULL DEFAULT 1,
supported_features TEXT[] NOT NULL DEFAULT '{}',
mandatory_features TEXT[] NOT NULL DEFAULT '{}',
enabled BOOLEAN NOT NULL DEFAULT true,
public_host_key TEXT,
ssh_key_file TEXT,
consecutive_failures INTEGER NOT NULL DEFAULT 0,
disabled_until TIMESTAMP WITH TIME ZONE,
last_failure TIMESTAMP WITH TIME ZONE,
created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW()
);
-- jobsets: build configurations for each project
CREATE TABLE jobsets (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4 (),
project_id UUID NOT NULL REFERENCES projects (id) ON DELETE CASCADE,
name VARCHAR(255) NOT NULL,
nix_expression TEXT NOT NULL,
enabled BOOLEAN NOT NULL DEFAULT true,
flake_mode BOOLEAN NOT NULL DEFAULT true,
check_interval INTEGER NOT NULL DEFAULT 60,
branch VARCHAR(255),
scheduling_shares INTEGER NOT NULL DEFAULT 100,
state VARCHAR(50) NOT NULL DEFAULT 'enabled' CHECK (
state IN (
'disabled',
'enabled',
'one_shot',
'one_at_a_time'
)
),
last_checked_at TIMESTAMP WITH TIME ZONE,
keep_nr INTEGER NOT NULL DEFAULT 3,
created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
updated_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
UNIQUE (project_id, name)
);
-- api_keys: authentication tokens with role-based access control
CREATE TABLE api_keys (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4 (),
name VARCHAR(255) NOT NULL,
key_hash VARCHAR(128) NOT NULL UNIQUE,
role VARCHAR(50) NOT NULL DEFAULT 'read-only' CHECK (
role IN (
'admin',
'create-projects',
'restart-jobs',
'cancel-build',
'bump-to-front',
'eval-jobset',
'read-only'
)
),
user_id UUID REFERENCES users (id) ON DELETE SET NULL,
created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
last_used_at TIMESTAMP WITH TIME ZONE
);
-- evaluations: Nix evaluation results for each jobset commit
CREATE TABLE evaluations (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4 (),
jobset_id UUID NOT NULL REFERENCES jobsets (id) ON DELETE CASCADE,
commit_hash VARCHAR(40) NOT NULL,
evaluation_time TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
status TEXT NOT NULL CHECK (
status IN ('pending', 'running', 'completed', 'failed')
),
error_message TEXT,
inputs_hash VARCHAR(128),
pr_number INTEGER,
pr_head_branch TEXT,
pr_base_branch TEXT,
pr_action TEXT,
UNIQUE (jobset_id, commit_hash)
);
-- builds: individual build jobs
CREATE TABLE builds (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4 (),
evaluation_id UUID NOT NULL REFERENCES evaluations (id) ON DELETE CASCADE,
job_name VARCHAR(255) NOT NULL,
drv_path TEXT NOT NULL,
status TEXT NOT NULL CHECK (
status IN (
'pending',
'running',
'succeeded',
'failed',
'dependency_failed',
'aborted',
'cancelled',
'failed_with_output',
'timeout',
'cached_failure',
'unsupported_system',
'log_limit_exceeded',
'nar_size_limit_exceeded',
'non_deterministic'
)
),
started_at TIMESTAMP WITH TIME ZONE,
completed_at TIMESTAMP WITH TIME ZONE,
log_path TEXT,
build_output_path TEXT,
error_message TEXT,
priority INTEGER NOT NULL DEFAULT 0,
retry_count INTEGER NOT NULL DEFAULT 0,
max_retries INTEGER NOT NULL DEFAULT 3,
notification_pending_since TIMESTAMP WITH TIME ZONE,
log_url TEXT,
outputs JSONB,
is_aggregate BOOLEAN NOT NULL DEFAULT false,
constituents JSONB,
builder_id UUID REFERENCES remote_builders (id),
signed BOOLEAN NOT NULL DEFAULT false,
system VARCHAR(50),
keep BOOLEAN NOT NULL DEFAULT false,
created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
UNIQUE (evaluation_id, job_name)
);
-- build_products: output artifacts and metadata
CREATE TABLE build_products (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4 (),
build_id UUID NOT NULL REFERENCES builds (id) ON DELETE CASCADE,
name VARCHAR(255) NOT NULL,
path TEXT NOT NULL,
sha256_hash VARCHAR(64),
file_size BIGINT,
content_type VARCHAR(100),
is_directory BOOLEAN NOT NULL DEFAULT false,
gc_root_path TEXT,
created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW()
);
-- build_steps: detailed build execution logs and timing
CREATE TABLE build_steps (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4 (),
build_id UUID NOT NULL REFERENCES builds (id) ON DELETE CASCADE,
step_number INTEGER NOT NULL,
command TEXT NOT NULL,
output TEXT,
error_output TEXT,
started_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
completed_at TIMESTAMP WITH TIME ZONE,
exit_code INTEGER,
UNIQUE (build_id, step_number)
);
-- build_dependencies: tracks inter-build dependency relationships
CREATE TABLE build_dependencies (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4 (),
build_id UUID NOT NULL REFERENCES builds (id) ON DELETE CASCADE,
dependency_build_id UUID NOT NULL REFERENCES builds (id) ON DELETE CASCADE,
UNIQUE (build_id, dependency_build_id)
);
-- webhook_configs: incoming push event configuration per project
CREATE TABLE webhook_configs (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4 (),
project_id UUID NOT NULL REFERENCES projects (id) ON DELETE CASCADE,
forge_type VARCHAR(50) NOT NULL CHECK (
forge_type IN ('github', 'gitea', 'forgejo', 'gitlab')
),
secret_hash VARCHAR(128),
enabled BOOLEAN NOT NULL DEFAULT true,
created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
UNIQUE (project_id, forge_type)
);
-- notification_configs: outgoing notification configuration per project
CREATE TABLE notification_configs (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4 (),
project_id UUID NOT NULL REFERENCES projects (id) ON DELETE CASCADE,
notification_type VARCHAR(50) NOT NULL CHECK (
notification_type IN (
'github_status',
'gitea_status',
'forgejo_status',
'gitlab_status',
'webhook',
'email'
)
),
config JSONB NOT NULL DEFAULT '{}',
enabled BOOLEAN NOT NULL DEFAULT true,
created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
UNIQUE (project_id, notification_type)
);
-- jobset_inputs: parameterized inputs for jobsets
CREATE TABLE jobset_inputs (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4 (),
jobset_id UUID NOT NULL REFERENCES jobsets (id) ON DELETE CASCADE,
name VARCHAR(255) NOT NULL,
input_type VARCHAR(50) NOT NULL CHECK (
input_type IN ('git', 'string', 'boolean', 'path', 'build')
),
value TEXT NOT NULL,
revision TEXT,
created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
UNIQUE (jobset_id, name)
);
-- channels: release management, tracks the latest good evaluation per jobset
CREATE TABLE channels (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4 (),
project_id UUID NOT NULL REFERENCES projects (id) ON DELETE CASCADE,
name VARCHAR(255) NOT NULL,
jobset_id UUID NOT NULL REFERENCES jobsets (id) ON DELETE CASCADE,
current_evaluation_id UUID REFERENCES evaluations (id),
created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
updated_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
UNIQUE (project_id, name)
);
-- starred_jobs: personalized dashboard bookmarks per user
CREATE TABLE starred_jobs (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4 (),
user_id UUID NOT NULL REFERENCES users (id) ON DELETE CASCADE,
project_id UUID NOT NULL REFERENCES projects (id) ON DELETE CASCADE,
jobset_id UUID REFERENCES jobsets (id) ON DELETE CASCADE,
job_name VARCHAR(255) NOT NULL,
created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
UNIQUE (user_id, project_id, jobset_id, job_name)
);
-- user_sessions: persistent authentication tokens
CREATE TABLE user_sessions (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4 (),
user_id UUID NOT NULL REFERENCES users (id) ON DELETE CASCADE,
session_token_hash VARCHAR(255) NOT NULL,
expires_at TIMESTAMP WITH TIME ZONE NOT NULL,
created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
last_used_at TIMESTAMP WITH TIME ZONE
);
-- project_members: per-project permission assignments
CREATE TABLE project_members (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4 (),
project_id UUID NOT NULL REFERENCES projects (id) ON DELETE CASCADE,
user_id UUID NOT NULL REFERENCES users (id) ON DELETE CASCADE,
role VARCHAR(50) NOT NULL DEFAULT 'member',
created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
UNIQUE (project_id, user_id)
);
-- build_metrics: timing, size, and performance metrics per build
CREATE TABLE build_metrics (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
build_id UUID NOT NULL REFERENCES builds (id) ON DELETE CASCADE,
metric_name VARCHAR(100) NOT NULL,
metric_value DOUBLE PRECISION NOT NULL,
unit VARCHAR(50) NOT NULL,
collected_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
UNIQUE (build_id, metric_name)
);
-- failed_paths_cache: prevents rebuilding known-failing derivations
CREATE TABLE failed_paths_cache (
drv_path TEXT PRIMARY KEY,
source_build_id UUID,
failure_status TEXT,
failed_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW()
);
-- Indexes: projects
CREATE INDEX idx_projects_name ON projects (name);
CREATE INDEX idx_projects_created_at ON projects (created_at);
-- Indexes: users
CREATE INDEX idx_users_username ON users (username);
CREATE INDEX idx_users_email ON users (email);
CREATE INDEX idx_users_role ON users (role);
CREATE INDEX idx_users_enabled ON users (enabled);
-- Indexes: remote_builders
CREATE INDEX idx_remote_builders_enabled ON remote_builders (enabled)
WHERE
enabled = true;
-- Indexes: jobsets
CREATE INDEX idx_jobsets_project_id ON jobsets (project_id);
CREATE INDEX idx_jobsets_enabled ON jobsets (enabled);
CREATE INDEX idx_jobsets_name ON jobsets (name);
CREATE INDEX idx_jobsets_state ON jobsets (state);
CREATE INDEX idx_jobsets_last_checked_at ON jobsets (last_checked_at);
-- Indexes: api_keys
CREATE INDEX idx_api_keys_key_hash ON api_keys (key_hash);
CREATE INDEX idx_api_keys_user_id ON api_keys (user_id);
-- Indexes: evaluations
CREATE INDEX idx_evaluations_jobset_id ON evaluations (jobset_id);
CREATE INDEX idx_evaluations_commit_hash ON evaluations (commit_hash);
CREATE INDEX idx_evaluations_status ON evaluations (status);
CREATE INDEX idx_evaluations_evaluation_time ON evaluations (evaluation_time);
CREATE INDEX idx_evaluations_inputs_hash ON evaluations (jobset_id, inputs_hash);
CREATE INDEX idx_evaluations_pr ON evaluations (jobset_id, pr_number)
WHERE
pr_number IS NOT NULL;
-- Indexes: builds
CREATE INDEX idx_builds_evaluation_id ON builds (evaluation_id);
CREATE INDEX idx_builds_status ON builds (status);
CREATE INDEX idx_builds_job_name ON builds (job_name);
CREATE INDEX idx_builds_started_at ON builds (started_at);
CREATE INDEX idx_builds_completed_at ON builds (completed_at);
CREATE INDEX idx_builds_priority ON builds (priority DESC, created_at ASC);
CREATE INDEX idx_builds_notification_pending ON builds (notification_pending_since)
WHERE
notification_pending_since IS NOT NULL;
CREATE INDEX idx_builds_drv_path ON builds (drv_path);
CREATE INDEX idx_builds_builder ON builds (builder_id)
WHERE
builder_id IS NOT NULL;
CREATE INDEX idx_builds_system ON builds (system)
WHERE
system IS NOT NULL;
CREATE INDEX idx_builds_pending_priority ON builds (status, priority DESC, created_at ASC)
WHERE
status = 'pending';
CREATE INDEX idx_builds_drv_completed ON builds (drv_path)
WHERE
status = 'succeeded';
-- Indexes: build_products
CREATE INDEX idx_build_products_build_id ON build_products (build_id);
CREATE INDEX idx_build_products_name ON build_products (name);
CREATE INDEX idx_build_products_path_prefix ON build_products (path text_pattern_ops);
-- Indexes: build_steps
CREATE INDEX idx_build_steps_build_id ON build_steps (build_id);
CREATE INDEX idx_build_steps_started_at ON build_steps (started_at);
-- Indexes: build_dependencies
CREATE INDEX idx_build_deps_build ON build_dependencies (build_id);
CREATE INDEX idx_build_deps_dep ON build_dependencies (dependency_build_id);
-- Indexes: webhook/notification/jobset_inputs/channels
CREATE INDEX idx_webhook_configs_project ON webhook_configs (project_id);
CREATE INDEX idx_notification_configs_project ON notification_configs (project_id);
CREATE INDEX idx_jobset_inputs_jobset ON jobset_inputs (jobset_id);
CREATE INDEX idx_channels_project ON channels (project_id);
CREATE INDEX idx_channels_jobset ON channels (jobset_id);
-- Indexes: users/sessions/members
CREATE INDEX idx_starred_jobs_user_id ON starred_jobs (user_id);
CREATE INDEX idx_starred_jobs_project_id ON starred_jobs (project_id);
CREATE INDEX idx_user_sessions_token ON user_sessions (session_token_hash);
CREATE INDEX idx_user_sessions_user_id ON user_sessions (user_id);
CREATE INDEX idx_user_sessions_expires ON user_sessions (expires_at);
CREATE INDEX idx_project_members_project_id ON project_members (project_id);
CREATE INDEX idx_project_members_user_id ON project_members (user_id);
-- Indexes: build_metrics / failed_paths_cache
CREATE INDEX idx_build_metrics_build_id ON build_metrics (build_id);
CREATE INDEX idx_build_metrics_collected_at ON build_metrics (collected_at);
CREATE INDEX idx_build_metrics_name ON build_metrics (metric_name);
CREATE INDEX idx_failed_paths_cache_failed_at ON failed_paths_cache (failed_at);
-- Trigger function: auto-update updated_at on mutation
CREATE OR REPLACE FUNCTION update_updated_at_column () RETURNS TRIGGER AS $$
BEGIN
NEW.updated_at = NOW();
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
CREATE TRIGGER update_projects_updated_at BEFORE
UPDATE ON projects FOR EACH ROW
EXECUTE FUNCTION update_updated_at_column ();
CREATE TRIGGER update_jobsets_updated_at BEFORE
UPDATE ON jobsets FOR EACH ROW
EXECUTE FUNCTION update_updated_at_column ();
CREATE TRIGGER update_users_updated_at BEFORE
UPDATE ON users FOR EACH ROW
EXECUTE FUNCTION update_updated_at_column ();
-- Trigger functions: LISTEN/NOTIFY for event-driven daemon wakeup
CREATE OR REPLACE FUNCTION notify_builds_changed () RETURNS trigger AS $$
BEGIN
PERFORM pg_notify('fc_builds_changed', json_build_object(
'op', TG_OP,
'table', TG_TABLE_NAME
)::text);
RETURN NULL;
END;
$$ LANGUAGE plpgsql;
CREATE OR REPLACE FUNCTION notify_jobsets_changed () RETURNS trigger AS $$
BEGIN
PERFORM pg_notify('fc_jobsets_changed', json_build_object(
'op', TG_OP,
'table', TG_TABLE_NAME
)::text);
RETURN NULL;
END;
$$ LANGUAGE plpgsql;
CREATE TRIGGER trg_builds_insert_notify
AFTER INSERT ON builds FOR EACH ROW
EXECUTE FUNCTION notify_builds_changed ();
CREATE TRIGGER trg_builds_status_notify
AFTER
UPDATE ON builds FOR EACH ROW WHEN (OLD.status IS DISTINCT FROM NEW.status)
EXECUTE FUNCTION notify_builds_changed ();
CREATE TRIGGER trg_jobsets_insert_notify
AFTER INSERT ON jobsets FOR EACH ROW
EXECUTE FUNCTION notify_jobsets_changed ();
CREATE TRIGGER trg_jobsets_update_notify
AFTER
UPDATE ON jobsets FOR EACH ROW WHEN (
OLD.enabled IS DISTINCT FROM NEW.enabled
OR OLD.state IS DISTINCT FROM NEW.state
OR OLD.nix_expression IS DISTINCT FROM NEW.nix_expression
OR OLD.check_interval IS DISTINCT FROM NEW.check_interval
)
EXECUTE FUNCTION notify_jobsets_changed ();
CREATE TRIGGER trg_jobsets_delete_notify
AFTER DELETE ON jobsets FOR EACH ROW
EXECUTE FUNCTION notify_jobsets_changed ();
-- Views
CREATE VIEW active_jobsets AS
SELECT
j.id,
j.project_id,
j.name,
j.nix_expression,
j.enabled,
j.flake_mode,
j.check_interval,
j.branch,
j.scheduling_shares,
j.created_at,
j.updated_at,
j.state,
j.last_checked_at,
j.keep_nr,
p.name as project_name,
p.repository_url
FROM
jobsets j
JOIN projects p ON j.project_id = p.id
WHERE
j.state IN ('enabled', 'one_shot', 'one_at_a_time');
CREATE VIEW build_stats AS
SELECT
COUNT(*) as total_builds,
COUNT(
CASE
WHEN status = 'succeeded' THEN 1
END
) as completed_builds,
COUNT(
CASE
WHEN status = 'failed' THEN 1
END
) as failed_builds,
COUNT(
CASE
WHEN status = 'running' THEN 1
END
) as running_builds,
COUNT(
CASE
WHEN status = 'pending' THEN 1
END
) as pending_builds,
AVG(
EXTRACT(
EPOCH
FROM
(completed_at - started_at)
)
)::double precision as avg_duration_seconds
FROM
builds
WHERE
started_at IS NOT NULL;
CREATE VIEW build_metrics_summary AS
SELECT
b.id as build_id,
b.job_name,
b.status,
b.system,
e.jobset_id,
j.project_id,
b.started_at,
b.completed_at,
EXTRACT(
EPOCH
FROM
(b.completed_at - b.started_at)
) as duration_seconds,
MAX(
CASE
WHEN bm.metric_name = 'output_size_bytes' THEN bm.metric_value
END
) as output_size_bytes,
MAX(
CASE
WHEN bm.metric_name = 'peak_memory_bytes' THEN bm.metric_value
END
) as peak_memory_bytes,
MAX(
CASE
WHEN bm.metric_name = 'nar_size_bytes' THEN bm.metric_value
END
) as nar_size_bytes
FROM
builds b
JOIN evaluations e ON b.evaluation_id = e.id
JOIN jobsets j ON e.jobset_id = j.id
LEFT JOIN build_metrics bm ON b.id = bm.build_id
GROUP BY
b.id,
b.job_name,
b.status,
b.system,
e.jobset_id,
j.project_id,
b.started_at,
b.completed_at;

View file

@ -0,0 +1,5 @@
-- Example migration stub.
-- Replace this with real schema changes when needed.
-- Run: cargo run --bin fc-migrate -- create <name>
SELECT
1;

View file

@ -1,151 +0,0 @@
-- Initial schema for FC
-- Creates all core tables for the CI system
-- Enable UUID extension for UUID generation
CREATE EXTENSION IF NOT EXISTS "uuid-ossp";
-- Projects: stores repository configurations
CREATE TABLE projects (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
name VARCHAR(255) NOT NULL UNIQUE,
description TEXT,
repository_url TEXT NOT NULL,
created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
updated_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW()
);
-- Jobsets: Contains build configurations for each project
CREATE TABLE jobsets (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
project_id UUID NOT NULL REFERENCES projects(id) ON DELETE CASCADE,
name VARCHAR(255) NOT NULL,
nix_expression TEXT NOT NULL,
enabled BOOLEAN NOT NULL DEFAULT true,
created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
updated_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
UNIQUE(project_id, name)
);
-- Evaluations: Tracks Nix evaluation results for each jobset
CREATE TABLE evaluations (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
jobset_id UUID NOT NULL REFERENCES jobsets(id) ON DELETE CASCADE,
commit_hash VARCHAR(40) NOT NULL,
evaluation_time TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
status TEXT NOT NULL CHECK (status IN ('pending', 'running', 'completed', 'failed')),
error_message TEXT,
UNIQUE(jobset_id, commit_hash)
);
-- Builds: Individual build jobs with their status
CREATE TABLE builds (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
evaluation_id UUID NOT NULL REFERENCES evaluations(id) ON DELETE CASCADE,
job_name VARCHAR(255) NOT NULL,
drv_path TEXT NOT NULL,
status TEXT NOT NULL CHECK (status IN ('pending', 'running', 'completed', 'failed', 'cancelled')),
started_at TIMESTAMP WITH TIME ZONE,
completed_at TIMESTAMP WITH TIME ZONE,
log_path TEXT,
build_output_path TEXT,
error_message TEXT,
created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
UNIQUE(evaluation_id, job_name)
);
-- Build products: Stores output artifacts and metadata
CREATE TABLE build_products (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
build_id UUID NOT NULL REFERENCES builds(id) ON DELETE CASCADE,
name VARCHAR(255) NOT NULL,
path TEXT NOT NULL,
sha256_hash VARCHAR(64),
file_size BIGINT,
content_type VARCHAR(100),
is_directory BOOLEAN NOT NULL DEFAULT false,
created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW()
);
-- Build steps: Detailed build execution logs and timing
CREATE TABLE build_steps (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
build_id UUID NOT NULL REFERENCES builds(id) ON DELETE CASCADE,
step_number INTEGER NOT NULL,
command TEXT NOT NULL,
output TEXT,
error_output TEXT,
started_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
completed_at TIMESTAMP WITH TIME ZONE,
exit_code INTEGER,
UNIQUE(build_id, step_number)
);
-- Projects indexes
CREATE INDEX idx_projects_name ON projects(name);
CREATE INDEX idx_projects_created_at ON projects(created_at);
-- Jobsets indexes
CREATE INDEX idx_jobsets_project_id ON jobsets(project_id);
CREATE INDEX idx_jobsets_enabled ON jobsets(enabled);
CREATE INDEX idx_jobsets_name ON jobsets(name);
-- Evaluations indexes
CREATE INDEX idx_evaluations_jobset_id ON evaluations(jobset_id);
CREATE INDEX idx_evaluations_commit_hash ON evaluations(commit_hash);
CREATE INDEX idx_evaluations_status ON evaluations(status);
CREATE INDEX idx_evaluations_evaluation_time ON evaluations(evaluation_time);
-- Builds indexes
CREATE INDEX idx_builds_evaluation_id ON builds(evaluation_id);
CREATE INDEX idx_builds_status ON builds(status);
CREATE INDEX idx_builds_job_name ON builds(job_name);
CREATE INDEX idx_builds_started_at ON builds(started_at);
CREATE INDEX idx_builds_completed_at ON builds(completed_at);
-- Build products indexes
CREATE INDEX idx_build_products_build_id ON build_products(build_id);
CREATE INDEX idx_build_products_name ON build_products(name);
-- Build steps indexes
CREATE INDEX idx_build_steps_build_id ON build_steps(build_id);
CREATE INDEX idx_build_steps_started_at ON build_steps(started_at);
-- Create trigger functions for updated_at timestamps
CREATE OR REPLACE FUNCTION update_updated_at_column()
RETURNS TRIGGER AS $$
BEGIN
NEW.updated_at = NOW();
RETURN NEW;
END;
$$ language 'plpgsql';
-- Create triggers for automatic updated_at updates
CREATE TRIGGER update_projects_updated_at
BEFORE UPDATE ON projects
FOR EACH ROW EXECUTE FUNCTION update_updated_at_column();
CREATE TRIGGER update_jobsets_updated_at
BEFORE UPDATE ON jobsets
FOR EACH ROW EXECUTE FUNCTION update_updated_at_column();
-- Create view for active jobsets (jobsets that are enabled and belong to active projects)
CREATE VIEW active_jobsets AS
SELECT
j.*,
p.name as project_name,
p.repository_url
FROM jobsets j
JOIN projects p ON j.project_id = p.id
WHERE j.enabled = true;
-- Create view for build statistics
CREATE VIEW build_stats AS
SELECT
COUNT(*) as total_builds,
COUNT(CASE WHEN status = 'completed' THEN 1 END) as completed_builds,
COUNT(CASE WHEN status = 'failed' THEN 1 END) as failed_builds,
COUNT(CASE WHEN status = 'running' THEN 1 END) as running_builds,
COUNT(CASE WHEN status = 'pending' THEN 1 END) as pending_builds,
AVG(EXTRACT(EPOCH FROM (completed_at - started_at))) as avg_duration_seconds
FROM builds
WHERE started_at IS NOT NULL;

View file

@ -1,2 +0,0 @@
-- Add system field to builds table
ALTER TABLE builds ADD COLUMN system VARCHAR(50);

View file

@ -1,92 +0,0 @@
-- Production features: auth, priority, retry, notifications, GC roots, log paths
-- API key authentication
CREATE TABLE api_keys (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
name VARCHAR(255) NOT NULL,
key_hash VARCHAR(128) NOT NULL UNIQUE,
role VARCHAR(50) NOT NULL DEFAULT 'admin'
CHECK (role IN ('admin', 'create-projects', 'restart-jobs', 'cancel-build', 'bump-to-front', 'eval-jobset', 'read-only')),
created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
last_used_at TIMESTAMP WITH TIME ZONE
);
-- Build priority and retry support
ALTER TABLE builds ADD COLUMN priority INTEGER NOT NULL DEFAULT 0;
ALTER TABLE builds ADD COLUMN retry_count INTEGER NOT NULL DEFAULT 0;
ALTER TABLE builds ADD COLUMN max_retries INTEGER NOT NULL DEFAULT 3;
ALTER TABLE builds ADD COLUMN notification_pending_since TIMESTAMP WITH TIME ZONE;
-- GC root tracking on build products
ALTER TABLE build_products ADD COLUMN gc_root_path TEXT;
-- Build log file path (filesystem path to captured log)
ALTER TABLE builds ADD COLUMN log_url TEXT;
-- Webhook configuration for incoming push events
CREATE TABLE webhook_configs (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
project_id UUID NOT NULL REFERENCES projects(id) ON DELETE CASCADE,
forge_type VARCHAR(50) NOT NULL CHECK (forge_type IN ('github', 'gitea', 'forgejo', 'gitlab')),
secret_hash VARCHAR(128),
enabled BOOLEAN NOT NULL DEFAULT true,
created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
UNIQUE(project_id, forge_type)
);
-- Notification configuration per project
CREATE TABLE notification_configs (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
project_id UUID NOT NULL REFERENCES projects(id) ON DELETE CASCADE,
notification_type VARCHAR(50) NOT NULL
CHECK (notification_type IN ('github_status', 'gitea_status', 'forgejo_status', 'gitlab_status', 'run_command', 'email')),
config JSONB NOT NULL DEFAULT '{}',
enabled BOOLEAN NOT NULL DEFAULT true,
created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
UNIQUE(project_id, notification_type)
);
-- Jobset inputs for multi-input support
CREATE TABLE jobset_inputs (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
jobset_id UUID NOT NULL REFERENCES jobsets(id) ON DELETE CASCADE,
name VARCHAR(255) NOT NULL,
input_type VARCHAR(50) NOT NULL
CHECK (input_type IN ('git', 'string', 'boolean', 'path', 'build')),
value TEXT NOT NULL,
revision TEXT,
created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
UNIQUE(jobset_id, name)
);
-- Track flake mode per jobset
ALTER TABLE jobsets ADD COLUMN flake_mode BOOLEAN NOT NULL DEFAULT true;
ALTER TABLE jobsets ADD COLUMN check_interval INTEGER NOT NULL DEFAULT 60;
-- Store the flake URI or legacy expression path in nix_expression (already exists)
-- For flake mode: nix_expression = "github:owner/repo" or "."
-- For legacy mode: nix_expression = "release.nix"
-- Indexes for new columns
CREATE INDEX idx_builds_priority ON builds(priority DESC, created_at ASC);
CREATE INDEX idx_builds_notification_pending ON builds(notification_pending_since) WHERE notification_pending_since IS NOT NULL;
CREATE INDEX idx_api_keys_key_hash ON api_keys(key_hash);
CREATE INDEX idx_webhook_configs_project ON webhook_configs(project_id);
CREATE INDEX idx_notification_configs_project ON notification_configs(project_id);
CREATE INDEX idx_jobset_inputs_jobset ON jobset_inputs(jobset_id);
-- Update active_jobsets view to include flake_mode
-- Must DROP first: adding columns to jobsets changes j.* expansion,
-- and CREATE OR REPLACE VIEW cannot rename existing columns.
DROP VIEW IF EXISTS active_jobsets;
CREATE VIEW active_jobsets AS
SELECT
j.*,
p.name as project_name,
p.repository_url
FROM jobsets j
JOIN projects p ON j.project_id = p.id
WHERE j.enabled = true;
-- Update list_pending to respect priority ordering
-- (handled in application code, but index above supports it)

View file

@ -1,14 +0,0 @@
ALTER TABLE builds ADD COLUMN outputs JSONB;
ALTER TABLE builds ADD COLUMN is_aggregate BOOLEAN NOT NULL DEFAULT false;
ALTER TABLE builds ADD COLUMN constituents JSONB;
CREATE TABLE build_dependencies (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
build_id UUID NOT NULL REFERENCES builds(id) ON DELETE CASCADE,
dependency_build_id UUID NOT NULL REFERENCES builds(id) ON DELETE CASCADE,
UNIQUE(build_id, dependency_build_id)
);
CREATE INDEX idx_build_deps_build ON build_dependencies(build_id);
CREATE INDEX idx_build_deps_dep ON build_dependencies(dependency_build_id);
CREATE INDEX idx_builds_drv_path ON builds(drv_path);

View file

@ -1,44 +0,0 @@
-- Channels for release management (like Hydra channels)
-- A channel tracks the latest "good" evaluation for a jobset
CREATE TABLE channels (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
project_id UUID NOT NULL REFERENCES projects(id) ON DELETE CASCADE,
name VARCHAR(255) NOT NULL,
jobset_id UUID NOT NULL REFERENCES jobsets(id) ON DELETE CASCADE,
current_evaluation_id UUID REFERENCES evaluations(id),
created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
updated_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
UNIQUE(project_id, name)
);
-- Remote builders for multi-machine / multi-arch builds
CREATE TABLE remote_builders (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
name VARCHAR(255) NOT NULL UNIQUE,
ssh_uri TEXT NOT NULL,
systems TEXT[] NOT NULL DEFAULT '{}',
max_jobs INTEGER NOT NULL DEFAULT 1,
speed_factor INTEGER NOT NULL DEFAULT 1,
supported_features TEXT[] NOT NULL DEFAULT '{}',
mandatory_features TEXT[] NOT NULL DEFAULT '{}',
enabled BOOLEAN NOT NULL DEFAULT true,
public_host_key TEXT,
ssh_key_file TEXT,
created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW()
);
-- Track input hash for evaluation caching (skip re-eval when inputs unchanged)
ALTER TABLE evaluations ADD COLUMN inputs_hash VARCHAR(128);
-- Track which remote builder was used for a build
ALTER TABLE builds ADD COLUMN builder_id UUID REFERENCES remote_builders(id);
-- Track whether build outputs have been signed
ALTER TABLE builds ADD COLUMN signed BOOLEAN NOT NULL DEFAULT false;
-- Indexes
CREATE INDEX idx_channels_project ON channels(project_id);
CREATE INDEX idx_channels_jobset ON channels(jobset_id);
CREATE INDEX idx_remote_builders_enabled ON remote_builders(enabled) WHERE enabled = true;
CREATE INDEX idx_evaluations_inputs_hash ON evaluations(jobset_id, inputs_hash);
CREATE INDEX idx_builds_builder ON builds(builder_id) WHERE builder_id IS NOT NULL;

View file

@ -1,14 +0,0 @@
-- Hardening: indexes for performance
-- Cache lookup index (prefix match on path)
CREATE INDEX IF NOT EXISTS idx_build_products_path_prefix ON build_products (path text_pattern_ops);
-- Composite index for pending builds query
CREATE INDEX IF NOT EXISTS idx_builds_pending_priority ON builds (status, priority DESC, created_at ASC)
WHERE status = 'pending';
-- System filtering index
CREATE INDEX IF NOT EXISTS idx_builds_system ON builds(system) WHERE system IS NOT NULL;
-- Deduplication lookup by drv_path + status
CREATE INDEX IF NOT EXISTS idx_builds_drv_completed ON builds(drv_path) WHERE status = 'completed';

View file

@ -1,3 +0,0 @@
-- Multi-branch evaluation and scheduling shares
ALTER TABLE jobsets ADD COLUMN IF NOT EXISTS branch VARCHAR(255) DEFAULT NULL;
ALTER TABLE jobsets ADD COLUMN IF NOT EXISTS scheduling_shares INTEGER NOT NULL DEFAULT 100;

View file

@ -1,72 +0,0 @@
-- Migration 008: User Management Core
-- Adds user accounts, starred jobs, and project membership tables
-- User accounts for authentication and personalization
CREATE TABLE users (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
username VARCHAR(255) NOT NULL UNIQUE,
email VARCHAR(255) NOT NULL UNIQUE,
full_name VARCHAR(255),
password_hash VARCHAR(255), -- NULL for OAuth-only users
user_type VARCHAR(50) NOT NULL DEFAULT 'local', -- 'local', 'github', 'google'
role VARCHAR(50) NOT NULL DEFAULT 'read-only',
enabled BOOLEAN NOT NULL DEFAULT true,
email_verified BOOLEAN NOT NULL DEFAULT false,
public_dashboard BOOLEAN NOT NULL DEFAULT false,
created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
updated_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
last_login_at TIMESTAMP WITH TIME ZONE
);
-- Link API keys to users for audit trail
ALTER TABLE api_keys ADD COLUMN user_id UUID REFERENCES users(id) ON DELETE SET NULL;
-- Starred jobs for personalized dashboard
CREATE TABLE starred_jobs (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
user_id UUID NOT NULL REFERENCES users(id) ON DELETE CASCADE,
project_id UUID NOT NULL REFERENCES projects(id) ON DELETE CASCADE,
jobset_id UUID REFERENCES jobsets(id) ON DELETE CASCADE,
job_name VARCHAR(255) NOT NULL,
created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
UNIQUE(user_id, project_id, jobset_id, job_name)
);
-- User sessions for persistent authentication across restarts
CREATE TABLE user_sessions (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
user_id UUID NOT NULL REFERENCES users(id) ON DELETE CASCADE,
session_token_hash VARCHAR(255) NOT NULL, -- Hashed session token
expires_at TIMESTAMP WITH TIME ZONE NOT NULL,
created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
last_used_at TIMESTAMP WITH TIME ZONE
);
-- Project membership for per-project permissions
CREATE TABLE project_members (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
project_id UUID NOT NULL REFERENCES projects(id) ON DELETE CASCADE,
user_id UUID NOT NULL REFERENCES users(id) ON DELETE CASCADE,
role VARCHAR(50) NOT NULL DEFAULT 'member', -- 'member', 'maintainer', 'admin'
created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
UNIQUE(project_id, user_id)
);
-- Indexes for performance
CREATE INDEX idx_users_username ON users(username);
CREATE INDEX idx_users_email ON users(email);
CREATE INDEX idx_users_role ON users(role);
CREATE INDEX idx_users_enabled ON users(enabled);
CREATE INDEX idx_api_keys_user_id ON api_keys(user_id);
CREATE INDEX idx_starred_jobs_user_id ON starred_jobs(user_id);
CREATE INDEX idx_starred_jobs_project_id ON starred_jobs(project_id);
CREATE INDEX idx_user_sessions_token ON user_sessions(session_token_hash);
CREATE INDEX idx_user_sessions_user_id ON user_sessions(user_id);
CREATE INDEX idx_user_sessions_expires ON user_sessions(expires_at);
CREATE INDEX idx_project_members_project_id ON project_members(project_id);
CREATE INDEX idx_project_members_user_id ON project_members(user_id);
-- Trigger for updated_at on users
CREATE TRIGGER update_users_updated_at
BEFORE UPDATE ON users
FOR EACH ROW EXECUTE FUNCTION update_updated_at_column();

View file

@ -1,2 +0,0 @@
-- Add index on builds.job_name for ILIKE queries in list_filtered
CREATE INDEX IF NOT EXISTS idx_builds_job_name ON builds (job_name);

View file

@ -1,12 +0,0 @@
-- Add pull request tracking to evaluations
-- This enables PR-based CI workflows for GitHub/GitLab/Gitea
-- Add PR-specific columns to evaluations table
ALTER TABLE evaluations ADD COLUMN pr_number INTEGER;
ALTER TABLE evaluations ADD COLUMN pr_head_branch TEXT;
ALTER TABLE evaluations ADD COLUMN pr_base_branch TEXT;
ALTER TABLE evaluations ADD COLUMN pr_action TEXT;
-- Index for efficient PR queries
CREATE INDEX idx_evaluations_pr ON evaluations(jobset_id, pr_number)
WHERE pr_number IS NOT NULL;

View file

@ -1,39 +0,0 @@
-- Migration: Add jobset states for Hydra-compatible scheduling
-- Supports 4 states: disabled, enabled, one_shot, one_at_a_time
-- Add state column with CHECK constraint
ALTER TABLE jobsets ADD COLUMN state VARCHAR(50) NOT NULL DEFAULT 'enabled'
CHECK (state IN ('disabled', 'enabled', 'one_shot', 'one_at_a_time'));
-- Migrate existing data based on enabled column
UPDATE jobsets SET state = CASE WHEN enabled THEN 'enabled' ELSE 'disabled' END;
-- Add last_checked_at for per-jobset interval tracking
ALTER TABLE jobsets ADD COLUMN last_checked_at TIMESTAMP WITH TIME ZONE;
-- Drop and recreate active_jobsets view to include new columns
DROP VIEW IF EXISTS active_jobsets;
CREATE VIEW active_jobsets AS
SELECT
j.id,
j.project_id,
j.name,
j.nix_expression,
j.enabled,
j.flake_mode,
j.check_interval,
j.branch,
j.scheduling_shares,
j.created_at,
j.updated_at,
j.state,
j.last_checked_at,
p.name as project_name,
p.repository_url
FROM jobsets j
JOIN projects p ON j.project_id = p.id
WHERE j.state IN ('enabled', 'one_shot', 'one_at_a_time');
-- Indexes for efficient queries
CREATE INDEX idx_jobsets_state ON jobsets(state);
CREATE INDEX idx_jobsets_last_checked_at ON jobsets(last_checked_at);

View file

@ -1,45 +0,0 @@
-- Migration: Add build metrics collection
-- Stores timing, size, and performance metrics for builds
-- Create build_metrics table
CREATE TABLE build_metrics (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
build_id UUID NOT NULL REFERENCES builds(id) ON DELETE CASCADE,
metric_name VARCHAR(100) NOT NULL,
metric_value DOUBLE PRECISION NOT NULL,
unit VARCHAR(50) NOT NULL,
collected_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW()
);
-- Index for efficient lookups by build
CREATE INDEX idx_build_metrics_build_id ON build_metrics(build_id);
-- Index for time-based queries (alerting)
CREATE INDEX idx_build_metrics_collected_at ON build_metrics(collected_at);
-- Index for metric name filtering
CREATE INDEX idx_build_metrics_name ON build_metrics(metric_name);
-- Prevent duplicate metrics for same build+name
ALTER TABLE build_metrics ADD CONSTRAINT unique_build_metric_name UNIQUE (build_id, metric_name);
-- Create view for aggregate build statistics
CREATE VIEW build_metrics_summary AS
SELECT
b.id as build_id,
b.job_name,
b.status,
b.system,
e.jobset_id,
j.project_id,
b.started_at,
b.completed_at,
EXTRACT(EPOCH FROM (b.completed_at - b.started_at)) as duration_seconds,
MAX(CASE WHEN bm.metric_name = 'output_size_bytes' THEN bm.metric_value END) as output_size_bytes,
MAX(CASE WHEN bm.metric_name = 'peak_memory_bytes' THEN bm.metric_value END) as peak_memory_bytes,
MAX(CASE WHEN bm.metric_name = 'nar_size_bytes' THEN bm.metric_value END) as nar_size_bytes
FROM builds b
JOIN evaluations e ON b.evaluation_id = e.id
JOIN jobsets j ON e.jobset_id = j.id
LEFT JOIN build_metrics bm ON b.id = bm.build_id
GROUP BY b.id, b.job_name, b.status, b.system, e.jobset_id, j.project_id, b.started_at, b.completed_at;

View file

@ -1,26 +0,0 @@
-- Extended build status codes to match Hydra
-- Update the builds table CHECK constraint to include all new statuses
ALTER TABLE builds DROP CONSTRAINT builds_status_check;
ALTER TABLE builds ADD CONSTRAINT builds_status_check CHECK (
status IN (
'pending',
'running',
'succeeded',
'failed',
'dependency_failed',
'aborted',
'cancelled',
'failed_with_output',
'timeout',
'cached_failure',
'unsupported_system',
'log_limit_exceeded',
'nar_size_limit_exceeded',
'non_deterministic'
)
);
-- Add index on status for faster filtering
CREATE INDEX IF NOT EXISTS idx_builds_status ON builds(status);

View file

@ -1,17 +0,0 @@
-- Fix build_stats view and data after 'completed' -> 'succeeded' status rename
-- Migrate any existing builds still using the old status value
UPDATE builds SET status = 'succeeded' WHERE status = 'completed';
-- Recreate the build_stats view to reference the new status
DROP VIEW IF EXISTS build_stats;
CREATE VIEW build_stats AS
SELECT
COUNT(*) as total_builds,
COUNT(CASE WHEN status = 'succeeded' THEN 1 END) as completed_builds,
COUNT(CASE WHEN status = 'failed' THEN 1 END) as failed_builds,
COUNT(CASE WHEN status = 'running' THEN 1 END) as running_builds,
COUNT(CASE WHEN status = 'pending' THEN 1 END) as pending_builds,
AVG(EXTRACT(EPOCH FROM (completed_at - started_at))) as avg_duration_seconds
FROM builds
WHERE started_at IS NOT NULL;

View file

@ -1,61 +0,0 @@
-- PostgreSQL LISTEN/NOTIFY triggers for event-driven reactivity
-- Emits notifications on builds/jobsets mutations so daemons can wake immediately
-- Trigger function: notify on builds changes
CREATE OR REPLACE FUNCTION notify_builds_changed() RETURNS trigger AS $$
BEGIN
PERFORM pg_notify('fc_builds_changed', json_build_object(
'op', TG_OP,
'table', TG_TABLE_NAME
)::text);
RETURN NULL;
END;
$$ LANGUAGE plpgsql;
-- Trigger function: notify on jobsets changes
CREATE OR REPLACE FUNCTION notify_jobsets_changed() RETURNS trigger AS $$
BEGIN
PERFORM pg_notify('fc_jobsets_changed', json_build_object(
'op', TG_OP,
'table', TG_TABLE_NAME
)::text);
RETURN NULL;
END;
$$ LANGUAGE plpgsql;
-- Builds: new build inserted (queue-runner should wake)
CREATE TRIGGER trg_builds_insert_notify
AFTER INSERT ON builds
FOR EACH ROW
EXECUTE FUNCTION notify_builds_changed();
-- Builds: status changed (queue-runner should re-check, e.g. deps resolved)
CREATE TRIGGER trg_builds_status_notify
AFTER UPDATE ON builds
FOR EACH ROW
WHEN (OLD.status IS DISTINCT FROM NEW.status)
EXECUTE FUNCTION notify_builds_changed();
-- Jobsets: new jobset created (evaluator should wake)
CREATE TRIGGER trg_jobsets_insert_notify
AFTER INSERT ON jobsets
FOR EACH ROW
EXECUTE FUNCTION notify_jobsets_changed();
-- Jobsets: relevant fields changed (evaluator should re-check)
CREATE TRIGGER trg_jobsets_update_notify
AFTER UPDATE ON jobsets
FOR EACH ROW
WHEN (
OLD.enabled IS DISTINCT FROM NEW.enabled
OR OLD.state IS DISTINCT FROM NEW.state
OR OLD.nix_expression IS DISTINCT FROM NEW.nix_expression
OR OLD.check_interval IS DISTINCT FROM NEW.check_interval
)
EXECUTE FUNCTION notify_jobsets_changed();
-- Jobsets: deleted (evaluator should wake to stop tracking)
CREATE TRIGGER trg_jobsets_delete_notify
AFTER DELETE ON jobsets
FOR EACH ROW
EXECUTE FUNCTION notify_jobsets_changed();

View file

@ -1,9 +0,0 @@
-- Failed paths cache: prevents rebuilding known-failing derivations
CREATE TABLE failed_paths_cache (
drv_path TEXT PRIMARY KEY,
source_build_id UUID,
failure_status TEXT,
failed_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW()
);
CREATE INDEX idx_failed_paths_cache_failed_at ON failed_paths_cache(failed_at);

View file

@ -1,32 +0,0 @@
-- GC pinning (#11)
ALTER TABLE builds ADD COLUMN IF NOT EXISTS keep BOOLEAN NOT NULL DEFAULT false;
ALTER TABLE jobsets ADD COLUMN IF NOT EXISTS keep_nr INTEGER NOT NULL DEFAULT 3;
-- Recreate active_jobsets view to include keep_nr
DROP VIEW IF EXISTS active_jobsets;
CREATE VIEW active_jobsets AS
SELECT
j.id,
j.project_id,
j.name,
j.nix_expression,
j.enabled,
j.flake_mode,
j.check_interval,
j.branch,
j.scheduling_shares,
j.created_at,
j.updated_at,
j.state,
j.last_checked_at,
j.keep_nr,
p.name as project_name,
p.repository_url
FROM jobsets j
JOIN projects p ON j.project_id = p.id
WHERE j.state IN ('enabled', 'one_shot', 'one_at_a_time');
-- Machine health tracking (#5)
ALTER TABLE remote_builders ADD COLUMN IF NOT EXISTS consecutive_failures INTEGER NOT NULL DEFAULT 0;
ALTER TABLE remote_builders ADD COLUMN IF NOT EXISTS disabled_until TIMESTAMP WITH TIME ZONE;
ALTER TABLE remote_builders ADD COLUMN IF NOT EXISTS last_failure TIMESTAMP WITH TIME ZONE;

View file

@ -4,8 +4,9 @@ This directory contains SQL migrations for the FC database.
## Migration Files
- `001_initial_schema.sql`: Creates the core database schema including projects,
jobsets, evaluations, builds, and related tables.
- `0001_schema.sql`: Full schema, all tables, indexes, triggers, and views.
- `0002_example.sql`: Example stub for the next migration when we make a stable
release.
## Running Migrations
@ -22,5 +23,3 @@ fc-migrate validate postgresql://user:password@localhost/fc_ci
# Create a new migration
fc-migrate create migration_name
```
TODO: add or generate schema overviews

View file

@ -131,7 +131,7 @@ impl std::fmt::Debug for GitHubOAuthConfig {
#[serde(default)]
#[derive(Default)]
pub struct NotificationsConfig {
pub run_command: Option<String>,
pub webhook_url: Option<String>,
pub github_token: Option<String>,
pub gitea_url: Option<String>,
pub gitea_token: Option<String>,
@ -304,8 +304,8 @@ pub struct DeclarativeProject {
/// Declarative notification configuration.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct DeclarativeNotification {
/// Notification type: `github_status`, email, `gitlab_status`,
/// `gitea_status`, `run_command`
/// Notification type: `github_status`, `email`, `gitlab_status`,
/// `gitea_status`, `webhook`
pub notification_type: String,
/// Type-specific configuration (JSON object)
pub config: serde_json::Value,

View file

@ -23,9 +23,9 @@ pub async fn dispatch_build_finished(
commit_hash: &str,
config: &NotificationsConfig,
) {
// 1. Run command notification
if let Some(ref cmd) = config.run_command {
run_command_notification(cmd, build, project).await;
// 1. Generic webhook notification
if let Some(ref url) = config.webhook_url {
webhook_notification(url, build, project, commit_hash).await;
}
// 2. GitHub commit status
@ -56,7 +56,12 @@ pub async fn dispatch_build_finished(
}
}
async fn run_command_notification(cmd: &str, build: &Build, project: &Project) {
async fn webhook_notification(
url: &str,
build: &Build,
project: &Project,
commit_hash: &str,
) {
let status_str = match build.status {
BuildStatus::Succeeded | BuildStatus::CachedFailure => "success",
BuildStatus::Failed
@ -72,32 +77,29 @@ async fn run_command_notification(cmd: &str, build: &Build, project: &Project) {
BuildStatus::Pending | BuildStatus::Running => "pending",
};
let result = tokio::process::Command::new("sh")
.arg("-c")
.arg(cmd)
.env("FC_BUILD_ID", build.id.to_string())
.env("FC_BUILD_STATUS", status_str)
.env("FC_BUILD_JOB", &build.job_name)
.env("FC_BUILD_DRV", &build.drv_path)
.env("FC_PROJECT_NAME", &project.name)
.env("FC_PROJECT_URL", &project.repository_url)
.env(
"FC_BUILD_OUTPUT",
build.build_output_path.as_deref().unwrap_or(""),
)
.output()
.await;
let payload = serde_json::json!({
"build_id": build.id,
"build_status": status_str,
"build_job": build.job_name,
"build_drv": build.drv_path,
"build_output": build.build_output_path.as_deref().unwrap_or(""),
"project_name": project.name,
"project_url": project.repository_url,
"commit_hash": commit_hash,
});
match result {
Ok(output) => {
if output.status.success() {
info!(build_id = %build.id, "RunCommand completed successfully");
} else {
let stderr = String::from_utf8_lossy(&output.stderr);
warn!(build_id = %build.id, "RunCommand failed: {stderr}");
}
match http_client().post(url).json(&payload).send().await {
Ok(resp) if resp.status().is_success() => {
info!(build_id = %build.id, "Webhook notification sent");
},
Err(e) => error!(build_id = %build.id, "RunCommand execution failed: {e}"),
Ok(resp) => {
warn!(
build_id = %build.id,
status = %resp.status(),
"Webhook notification rejected"
);
},
Err(e) => error!(build_id = %build.id, "Webhook notification failed: {e}"),
}
}