fc-common: add build_metrics table and repository

Signed-off-by: NotAShelf <raf@notashelf.dev>
Change-Id: If6842311f49bfcba9e1b11fa8bc9748c6a6a6964
This commit is contained in:
raf 2026-02-08 21:36:19 +03:00
commit f8f9703faa
Signed by: NotAShelf
GPG key ID: 29D95B64378DB4BF
4 changed files with 126 additions and 0 deletions

View file

@ -0,0 +1,45 @@
-- Migration: Add build metrics collection
-- Stores timing, size, and performance metrics for builds
-- Create build_metrics table
CREATE TABLE build_metrics (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
build_id UUID NOT NULL REFERENCES builds(id) ON DELETE CASCADE,
metric_name VARCHAR(100) NOT NULL,
metric_value DOUBLE PRECISION NOT NULL,
unit VARCHAR(50) NOT NULL,
collected_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW()
);
-- Index for efficient lookups by build
CREATE INDEX idx_build_metrics_build_id ON build_metrics(build_id);
-- Index for time-based queries (alerting)
CREATE INDEX idx_build_metrics_collected_at ON build_metrics(collected_at);
-- Index for metric name filtering
CREATE INDEX idx_build_metrics_name ON build_metrics(metric_name);
-- Prevent duplicate metrics for same build+name
ALTER TABLE build_metrics ADD CONSTRAINT unique_build_metric_name UNIQUE (build_id, metric_name);
-- Create view for aggregate build statistics
CREATE VIEW build_metrics_summary AS
SELECT
b.id as build_id,
b.job_name,
b.status,
b.system,
e.jobset_id,
j.project_id,
b.started_at,
b.completed_at,
EXTRACT(EPOCH FROM (b.completed_at - b.started_at)) as duration_seconds,
MAX(CASE WHEN bm.metric_name = 'output_size_bytes' THEN bm.metric_value END) as output_size_bytes,
MAX(CASE WHEN bm.metric_name = 'peak_memory_bytes' THEN bm.metric_value END) as peak_memory_bytes,
MAX(CASE WHEN bm.metric_name = 'nar_size_bytes' THEN bm.metric_value END) as nar_size_bytes
FROM builds b
JOIN evaluations e ON b.evaluation_id = e.id
JOIN jobsets j ON e.jobset_id = j.id
LEFT JOIN build_metrics bm ON b.id = bm.build_id
GROUP BY b.id, b.job_name, b.status, b.system, e.jobset_id, j.project_id, b.started_at, b.completed_at;

View file

@ -164,6 +164,26 @@ pub struct BuildDependency {
pub dependency_build_id: Uuid, pub dependency_build_id: Uuid,
} }
#[derive(Debug, Clone, Serialize, Deserialize, FromRow)]
pub struct BuildMetric {
pub id: Uuid,
pub build_id: Uuid,
pub metric_name: String,
pub metric_value: f64,
pub unit: String,
pub collected_at: DateTime<Utc>,
}
pub mod metric_names {
pub const BUILD_DURATION_SECONDS: &str = "build_duration_seconds";
pub const OUTPUT_SIZE_BYTES: &str = "output_size_bytes";
}
pub mod metric_units {
pub const SECONDS: &str = "seconds";
pub const BYTES: &str = "bytes";
}
/// Active jobset view — enabled jobsets joined with project info. /// Active jobset view — enabled jobsets joined with project info.
#[derive(Debug, Clone, Serialize, Deserialize, FromRow)] #[derive(Debug, Clone, Serialize, Deserialize, FromRow)]
pub struct ActiveJobset { pub struct ActiveJobset {

View file

@ -0,0 +1,60 @@
use sqlx::PgPool;
use uuid::Uuid;
use crate::{
error::{CiError, Result},
models::BuildMetric,
};
pub async fn upsert(
pool: &PgPool,
build_id: Uuid,
metric_name: &str,
metric_value: f64,
unit: &str,
) -> Result<BuildMetric> {
sqlx::query_as::<_, BuildMetric>(
"INSERT INTO build_metrics (build_id, metric_name, metric_value, unit) \
VALUES ($1, $2, $3, $4) ON CONFLICT (build_id, metric_name) DO UPDATE \
SET metric_value = EXCLUDED.metric_value, collected_at = NOW() RETURNING \
*",
)
.bind(build_id)
.bind(metric_name)
.bind(metric_value)
.bind(unit)
.fetch_one(pool)
.await
.map_err(CiError::Database)
}
pub async fn calculate_failure_rate(
pool: &PgPool,
project_id: Option<Uuid>,
jobset_id: Option<Uuid>,
window_minutes: i64,
) -> Result<f64> {
let rows: Vec<(Uuid, String)> = sqlx::query_as(
"SELECT b.id, b.status::text FROM builds b JOIN evaluations e ON \
b.evaluation_id = e.id JOIN jobsets j ON e.jobset_id = j.id WHERE \
($1::uuid IS NULL OR j.project_id = $1) AND ($2::uuid IS NULL OR j.id = \
$2) AND b.completed_at > NOW() - (INTERVAL '1 minute' * $3) ORDER BY \
b.completed_at DESC",
)
.bind(project_id)
.bind(jobset_id)
.bind(window_minutes)
.fetch_all(pool)
.await
.map_err(CiError::Database)?;
if rows.is_empty() {
return Ok(0.0);
}
let failed_count = rows
.iter()
.filter(|(_, status)| *status == "Failed")
.count();
Ok((failed_count as f64) / (rows.len() as f64) * 100.0)
}

View file

@ -1,5 +1,6 @@
pub mod api_keys; pub mod api_keys;
pub mod build_dependencies; pub mod build_dependencies;
pub mod build_metrics;
pub mod build_products; pub mod build_products;
pub mod build_steps; pub mod build_steps;
pub mod builds; pub mod builds;