nix: VM tests; demo VM; cleanup

Signed-off-by: NotAShelf <raf@notashelf.dev>
Change-Id: If0fe78ab43436c7e85fa357483bb4c8c6a6a6964
This commit is contained in:
raf 2026-02-02 01:31:17 +03:00
commit 389bf71d82
Signed by: NotAShelf
GPG key ID: 29D95B64378DB4BF
10 changed files with 2882 additions and 63 deletions

12
flake.lock generated
View file

@ -2,11 +2,11 @@
"nodes": { "nodes": {
"crane": { "crane": {
"locked": { "locked": {
"lastModified": 1760924934, "lastModified": 1769737823,
"narHash": "sha256-tuuqY5aU7cUkR71sO2TraVKK2boYrdW3gCSXUkF4i44=", "narHash": "sha256-DrBaNpZ+sJ4stXm+0nBX7zqZT9t9P22zbk6m5YhQxS4=",
"owner": "ipetkov", "owner": "ipetkov",
"repo": "crane", "repo": "crane",
"rev": "c6b4d5308293d0d04fcfeee92705017537cad02f", "rev": "b2f45c3830aa96b7456a4c4bc327d04d7a43e1ba",
"type": "github" "type": "github"
}, },
"original": { "original": {
@ -17,11 +17,11 @@
}, },
"nixpkgs": { "nixpkgs": {
"locked": { "locked": {
"lastModified": 1761880412, "lastModified": 1769740369,
"narHash": "sha256-QoJjGd4NstnyOG4mm4KXF+weBzA2AH/7gn1Pmpfcb0A=", "narHash": "sha256-xKPyJoMoXfXpDM5DFDZDsi9PHArf2k5BJjvReYXoFpM=",
"owner": "NixOS", "owner": "NixOS",
"repo": "nixpkgs", "repo": "nixpkgs",
"rev": "a7fc11be66bdfb5cdde611ee5ce381c183da8386", "rev": "6308c3b21396534d8aaeac46179c14c439a89b8a",
"type": "github" "type": "github"
}, },
"original": { "original": {

118
flake.nix
View file

@ -1,71 +1,100 @@
{ {
inputs = { inputs = {
nixpkgs.url = "github:NixOS/nixpkgs/nixpkgs-unstable"; nixpkgs.url = "github:NixOS/nixpkgs?ref=nixpkgs-unstable";
crane.url = "github:ipetkov/crane"; crane.url = "github:ipetkov/crane";
}; };
outputs = { outputs = {
nixpkgs, nixpkgs,
crane, crane,
self,
... ...
}: let }: let
# FIXME: allow multi-system when I can be arsed to write the abstractions inherit (nixpkgs) lib;
system = "x86_64-linux"; forAllSystems = lib.genAttrs ["x86_64-linux" "aarch64-linux"];
in {
# NixOS module for feel-ci
nixosModules = {
fc-ci = ./nix/modules/nixos.nix;
default = self.nixosModules.fc-ci;
};
packages = forAllSystems (system: let
pkgs = nixpkgs.legacyPackages.${system}; pkgs = nixpkgs.legacyPackages.${system};
craneLib = crane.mkLib pkgs; craneLib = crane.mkLib pkgs;
src = craneLib.cleanCargoSource ./.;
src = let
fs = lib.fileset;
s = ./.;
in
fs.toSource {
root = s;
fileset = fs.unions [
(s + /crates)
(s + /Cargo.lock)
(s + /Cargo.toml)
];
};
commonArgs = { commonArgs = {
pname = "feel-ci"; pname = "feel-ci";
inherit src; inherit src;
strictDeps = true; strictDeps = true;
nativeBuildInputs = with pkgs; [pkg-config];
buildInputs = with pkgs; [openssl];
}; };
cargoArtifacts = craneLib.buildDepsOnly commonArgs; cargoArtifacts = craneLib.buildDepsOnly commonArgs;
# Build individual workspace members
server = craneLib.buildPackage (commonArgs
// {
inherit cargoArtifacts;
pname = "server";
cargoExtraArgs = "--package server";
});
evaluator = craneLib.buildPackage (commonArgs
// {
inherit cargoArtifacts;
pname = "evaluator";
cargoExtraArgs = "--package evaluator";
});
queue-runner = craneLib.buildPackage (commonArgs
// {
inherit cargoArtifacts;
pname = "queue-runner";
cargoExtraArgs = "--package queue-runner";
});
common = craneLib.buildPackage (commonArgs
// {
inherit cargoArtifacts;
pname = "common";
cargoExtraArgs = "--package common";
});
migrate-cli = craneLib.buildPackage (commonArgs
// {
inherit cargoArtifacts;
pname = "migrate-cli";
cargoExtraArgs = "--package migrate-cli";
});
in { in {
packages.${system} = { demo-vm = pkgs.callPackage ./nix/demo-vm.nix {
inherit server evaluator queue-runner common migrate-cli; nixosModule = self.nixosModules.default;
fc-packages = {
inherit (self.packages.${system}) fc-common fc-evaluator fc-migrate-cli fc-queue-runner fc-server;
};
}; };
devShells.${system}.default = craneLib.devShell { # FC Packages
fc-common = pkgs.callPackage ./nix/packages/fc-common.nix {
inherit craneLib commonArgs cargoArtifacts;
};
fc-evaluator = pkgs.callPackage ./nix/packages/fc-evaluator.nix {
inherit craneLib commonArgs cargoArtifacts;
};
fc-migrate-cli = pkgs.callPackage ./nix/packages/fc-migrate-cli.nix {
inherit craneLib commonArgs cargoArtifacts;
};
fc-queue-runner = pkgs.callPackage ./nix/packages/fc-queue-runner.nix {
inherit craneLib commonArgs cargoArtifacts;
};
fc-server = pkgs.callPackage ./nix/packages/fc-server.nix {
inherit craneLib commonArgs cargoArtifacts;
};
});
checks = forAllSystems (system: let
pkgs = nixpkgs.legacyPackages.${system};
in {
vm-test = pkgs.callPackage ./nix/vm-test.nix {
nixosModule = self.nixosModules.default;
fc-packages = {
inherit (self.packages.${system}) fc-common fc-evaluator fc-migrate-cli fc-queue-runner fc-server;
};
};
});
devShells = forAllSystems (system: let
pkgs = nixpkgs.legacyPackages.${system};
craneLib = crane.mkLib pkgs;
in {
default = craneLib.devShell {
name = "fc"; name = "fc";
inputsFrom = [server]; inputsFrom = [self.packages.${system}.fc-server];
strictDeps = true;
packages = with pkgs; [ packages = with pkgs; [
rust-analyzer rust-analyzer
postgresql postgresql
@ -73,5 +102,6 @@
openssl openssl
]; ];
}; };
});
}; };
} }

153
nix/demo-vm.nix Normal file
View file

@ -0,0 +1,153 @@
{
pkgs,
fc-packages,
nixosModule,
}: let
nixos = pkgs.nixos ({
modulesPath,
pkgs,
...
}: {
imports = [
nixosModule
(modulesPath + "/virtualisation/qemu-vm.nix")
];
## VM hardware
virtualisation = {
memorySize = 2048;
cores = 2;
diskSize = 4096;
graphics = false;
# Forward guest:3000 -> host:3000 so the dashboard is reachable
forwardPorts = [
{
from = "host";
host.port = 3000;
guest.port = 3000;
}
];
};
services.fc = {
enable = true;
package = fc-packages.fc-server;
evaluatorPackage = fc-packages.fc-evaluator;
queueRunnerPackage = fc-packages.fc-queue-runner;
migratePackage = fc-packages.fc-migrate-cli;
server.enable = true;
evaluator.enable = true;
queueRunner.enable = true;
settings = {
database.url = "postgresql:///fc?host=/run/postgresql";
gc.enabled = false;
logs.log_dir = "/var/lib/fc/logs";
cache.enabled = true;
signing.enabled = false;
server = {
# Bind to all interfaces so port forwarding works
host = "0.0.0.0";
port = 3000;
cors_permissive = true;
};
};
};
## Seed an admin API key on first boot
# Token: fc_demo_admin_key, SHA-256 hash inserted into api_keys
# A read-only key is also seeded for testing RBAC.
systemd.services.fc-seed-keys = {
description = "Seed demo API keys";
after = ["fc-server.service"];
requires = ["fc-server.service"];
wantedBy = ["multi-user.target"];
serviceConfig = {
Type = "oneshot";
RemainAfterExit = true;
User = "fc";
Group = "fc";
};
path = [pkgs.postgresql pkgs.curl];
script = ''
# Wait for server to be ready
for i in $(seq 1 30); do
if curl -sf http://127.0.0.1:3000/health >/dev/null 2>&1; then
break
fi
sleep 1
done
# Admin key: fc_demo_admin_key
ADMIN_HASH="$(echo -n 'fc_demo_admin_key' | sha256sum | cut -d' ' -f1)"
psql -U fc -d fc -c "INSERT INTO api_keys (name, key_hash, role) VALUES ('demo-admin', '$ADMIN_HASH', 'admin') ON CONFLICT DO NOTHING" 2>/dev/null || true
# Read-only key: fc_demo_readonly_key
RO_HASH="$(echo -n 'fc_demo_readonly_key' | sha256sum | cut -d' ' -f1)"
psql -U fc -d fc -c "INSERT INTO api_keys (name, key_hash, role) VALUES ('demo-readonly', '$RO_HASH', 'read-only') ON CONFLICT DO NOTHING" 2>/dev/null || true
echo ""
echo "==========================================="
echo ""
echo " Dashboard: http://localhost:3000"
echo " Health: http://localhost:3000/health"
echo " API base: http://localhost:3000/api/v1"
echo ""
echo " Admin key: fc_demo_admin_key"
echo " Read-only key: fc_demo_readonly_key"
echo ""
echo " Login at http://localhost:3000/login"
echo " using the admin key above."
echo "==========================================="
'';
};
# --- Useful tools inside the VM ---
environment.systemPackages = with pkgs; [
curl
jq
htop
nix
nix-eval-jobs
git
zstd
];
# --- Misc VM settings ---
networking.hostName = "fc-demo";
networking.firewall.allowedTCPPorts = [3000];
services.getty.autologinUser = "root";
# Show a helpful MOTD
environment.etc."motd".text = ''
Dashboard: http://localhost:3000
API: http://localhost:3000/api/v1
Admin API key: fc_demo_admin_key
Read-only API key: fc_demo_readonly_key
Useful commands:
$ systemctl status fc-server
$ journalctl -u fc-server -f
$ curl -sf localhost:3000/health | jq
$ curl -sf localhost:3000/metrics
Press Ctrl-a x to quit QEMU.
'';
system.stateVersion = "26.11";
});
in
pkgs.writeShellApplication {
name = "run-fc-demo-vm";
text = ''
echo "Starting FC CI demo VM..."
echo "Dashboard will be available at http://localhost:3000"
echo "Press Ctrl-a x to quit."
echo ""
exec ${nixos.config.system.build.vm}/bin/run-fc-demo-vm
'';
}

365
nix/modules/nixos.nix Normal file
View file

@ -0,0 +1,365 @@
{
config,
pkgs,
lib,
...
}: let
inherit (lib.options) mkOption mkEnableOption;
inherit (lib.types) bool str int package listOf submodule nullOr;
cfg = config.services.fc;
settingsFormat = pkgs.formats.toml {};
settingsType = settingsFormat.type;
# Build the final settings by merging declarative config into settings
finalSettings = lib.recursiveUpdate cfg.settings (lib.optionalAttrs (cfg.declarative.projects != [] || cfg.declarative.apiKeys != []) {
declarative = {
projects = map (p: {
name = p.name;
repository_url = p.repositoryUrl;
description = p.description or null;
jobsets = map (j: {
name = j.name;
nix_expression = j.nixExpression;
enabled = j.enabled;
flake_mode = j.flakeMode;
check_interval = j.checkInterval;
}) p.jobsets;
}) cfg.declarative.projects;
api_keys = map (k: {
name = k.name;
key = k.key;
role = k.role;
}) cfg.declarative.apiKeys;
};
});
settingsFile = settingsFormat.generate "fc.toml" finalSettings;
inherit (builtins) map;
jobsetOpts = {
options = {
name = mkOption {
type = str;
description = "Jobset name.";
};
nixExpression = mkOption {
type = str;
description = "Nix expression to evaluate (e.g. 'packages', 'checks', 'hydraJobs').";
};
enabled = mkOption {
type = bool;
default = true;
description = "Whether this jobset is enabled for evaluation.";
};
flakeMode = mkOption {
type = bool;
default = true;
description = "Whether to evaluate as a flake.";
};
checkInterval = mkOption {
type = int;
default = 60;
description = "Seconds between evaluation checks.";
};
};
};
projectOpts = {
options = {
name = mkOption {
type = str;
description = "Project name (unique identifier).";
};
repositoryUrl = mkOption {
type = str;
description = "Git repository URL.";
};
description = mkOption {
type = nullOr str;
default = null;
description = "Optional project description.";
};
jobsets = mkOption {
type = listOf (submodule jobsetOpts);
default = [];
description = "Jobsets to create for this project.";
};
};
};
apiKeyOpts = {
options = {
name = mkOption {
type = str;
description = "Human-readable name for this API key.";
};
key = mkOption {
type = str;
description = ''
The raw API key value (e.g. "fc_mykey123").
Will be hashed before storage. Consider using a secrets manager.
'';
};
role = mkOption {
type = str;
default = "admin";
description = "Role: admin, read-only, create-projects, eval-jobset, cancel-build, restart-jobs, bump-to-front.";
};
};
};
in {
options.services.fc = {
enable = mkEnableOption "FC CI system";
package = mkOption {
type = package;
description = "The FC server package.";
};
evaluatorPackage = mkOption {
type = package;
default = cfg.package;
description = "The FC evaluator package. Defaults to cfg.package.";
};
queueRunnerPackage = mkOption {
type = package;
default = cfg.package;
description = "The FC queue runner package. Defaults to cfg.package.";
};
migratePackage = mkOption {
type = package;
description = "The FC migration CLI package.";
};
settings = mkOption {
type = settingsType;
default = {};
description = ''
FC configuration as a Nix attribute set.
Will be converted to TOML and written to fc.toml.
'';
};
declarative = {
projects = mkOption {
type = listOf (submodule projectOpts);
default = [];
description = ''
Declarative project definitions. These are upserted on every
server startup, ensuring the database matches this configuration.
'';
example = lib.literalExpression ''
[
{
name = "my-project";
repositoryUrl = "https://github.com/user/repo";
description = "My Nix project";
jobsets = [
{ name = "packages"; nixExpression = "packages"; }
{ name = "checks"; nixExpression = "checks"; }
];
}
]
'';
};
apiKeys = mkOption {
type = listOf (submodule apiKeyOpts);
default = [];
description = ''
Declarative API key definitions. Keys are upserted on every
server startup. Use a secrets manager for production deployments.
'';
example = lib.literalExpression ''
[
{ name = "admin"; key = "fc_admin_secret"; role = "admin"; }
{ name = "ci-bot"; key = "fc_ci_bot_key"; role = "eval-jobset"; }
]
'';
};
};
database = {
createLocally = mkOption {
type = bool;
default = true;
description = "Whether to create the PostgreSQL database locally.";
};
};
server = {
enable = mkEnableOption "FC server (REST API)";
};
evaluator = {
enable = mkEnableOption "FC evaluator (git polling and nix evaluation)";
};
queueRunner = {
enable = mkEnableOption "FC queue runner (build dispatch)";
};
};
config = lib.mkIf cfg.enable {
users.users.fc = {
isSystemUser = true;
group = "fc";
home = "/var/lib/fc";
createHome = true;
};
users.groups.fc = {};
services.postgresql = lib.mkIf cfg.database.createLocally {
enable = true;
ensureDatabases = ["fc"];
ensureUsers = [
{
name = "fc";
ensureDBOwnership = true;
}
];
};
services.fc.settings = lib.mkDefault {
database.url = "postgresql:///fc?host=/run/postgresql";
server.host = "127.0.0.1";
server.port = 3000;
gc.gc_roots_dir = "/nix/var/nix/gcroots/per-user/fc/fc-roots";
gc.enabled = true;
gc.max_age_days = 30;
gc.cleanup_interval = 3600;
logs.log_dir = "/var/lib/fc/logs";
cache.enabled = true;
evaluator.restrict_eval = true;
evaluator.allow_ifd = false;
signing.enabled = false;
};
systemd.tmpfiles.rules = [
(lib.mkIf cfg.server.enable "d /var/lib/fc/logs 0750 fc fc -")
(lib.mkIf cfg.queueRunner.enable "d /nix/var/nix/gcroots/per-user/fc 0755 fc fc -")
];
systemd.services.fc-server = lib.mkIf cfg.server.enable {
description = "FC CI Server";
wantedBy = ["multi-user.target"];
after = ["network.target"] ++ lib.optional cfg.database.createLocally "postgresql.target";
requires = lib.optional cfg.database.createLocally "postgresql.target";
path = with pkgs; [nix zstd];
serviceConfig = {
ExecStartPre = "${cfg.migratePackage}/bin/fc-migrate up ${finalSettings.database.url or "postgresql:///fc?host=/run/postgresql"}";
ExecStart = "${cfg.package}/bin/fc-server";
Restart = "on-failure";
RestartSec = 5;
User = "fc";
Group = "fc";
StateDirectory = "fc";
LogsDirectory = "fc";
WorkingDirectory = "/var/lib/fc";
ReadWritePaths = ["/var/lib/fc"];
# Hardening
ProtectSystem = "strict";
ProtectHome = true;
NoNewPrivileges = true;
PrivateTmp = true;
ProtectKernelTunables = true;
ProtectKernelModules = true;
ProtectControlGroups = true;
RestrictSUIDSGID = true;
};
environment = {
FC_CONFIG_FILE = "${settingsFile}";
};
};
systemd.services.fc-evaluator = lib.mkIf cfg.evaluator.enable {
description = "FC CI Evaluator";
wantedBy = ["multi-user.target"];
after = ["network.target" "fc-server.service"] ++ lib.optional cfg.database.createLocally "postgresql.target";
requires = ["fc-server.service"] ++ lib.optional cfg.database.createLocally "postgresql.target";
path = with pkgs; [
nix
git
nix-eval-jobs
];
serviceConfig = {
ExecStart = "${cfg.evaluatorPackage}/bin/fc-evaluator";
Restart = "on-failure";
RestartSec = 10;
User = "fc";
Group = "fc";
StateDirectory = "fc";
WorkingDirectory = "/var/lib/fc";
ReadWritePaths = ["/var/lib/fc"];
# Hardening
ProtectSystem = "strict";
ProtectHome = true;
NoNewPrivileges = true;
PrivateTmp = true;
ProtectKernelTunables = true;
ProtectKernelModules = true;
ProtectControlGroups = true;
RestrictSUIDSGID = true;
};
environment = {
FC_CONFIG_FILE = "${settingsFile}";
FC_EVALUATOR__WORK_DIR = "/var/lib/fc/evaluator";
FC_EVALUATOR__RESTRICT_EVAL = "true";
};
};
systemd.services.fc-queue-runner = lib.mkIf cfg.queueRunner.enable {
description = "FC CI Queue Runner";
wantedBy = ["multi-user.target"];
after = ["network.target" "fc-server.service"] ++ lib.optional cfg.database.createLocally "postgresql.target";
requires = ["fc-server.service"] ++ lib.optional cfg.database.createLocally "postgresql.target";
path = with pkgs; [
nix
];
serviceConfig = {
ExecStart = "${cfg.queueRunnerPackage}/bin/fc-queue-runner";
Restart = "on-failure";
RestartSec = 10;
User = "fc";
Group = "fc";
StateDirectory = "fc";
LogsDirectory = "fc";
WorkingDirectory = "/var/lib/fc";
ReadWritePaths = [
"/var/lib/fc"
"/nix/var/nix/gcroots/per-user/fc"
];
# Hardening
ProtectSystem = "strict";
ProtectHome = true;
NoNewPrivileges = true;
PrivateTmp = true;
ProtectKernelTunables = true;
ProtectKernelModules = true;
ProtectControlGroups = true;
RestrictSUIDSGID = true;
};
environment = {
FC_CONFIG_FILE = "${settingsFile}";
FC_QUEUE_RUNNER__WORK_DIR = "/var/lib/fc/queue-runner";
};
};
};
}

View file

@ -0,0 +1,11 @@
{
craneLib,
commonArgs,
cargoArtifacts,
}:
craneLib.buildPackage (commonArgs
// {
inherit cargoArtifacts;
pname = "fc-common";
cargoExtraArgs = "--package fc-common";
})

View file

@ -0,0 +1,11 @@
{
craneLib,
commonArgs,
cargoArtifacts,
}:
craneLib.buildPackage (commonArgs
// {
inherit cargoArtifacts;
pname = "fc-evaluator";
cargoExtraArgs = "--package fc-evaluator";
})

View file

@ -0,0 +1,11 @@
{
craneLib,
commonArgs,
cargoArtifacts,
}:
craneLib.buildPackage (commonArgs
// {
inherit cargoArtifacts;
pname = "fc-migrate-cli";
cargoExtraArgs = "--package fc-migrate-cli";
})

View file

@ -0,0 +1,11 @@
{
craneLib,
commonArgs,
cargoArtifacts,
}:
craneLib.buildPackage (commonArgs
// {
inherit cargoArtifacts;
pname = "fc-queue-runner";
cargoExtraArgs = "--package fc-queue-runner";
})

View file

@ -0,0 +1,11 @@
{
craneLib,
commonArgs,
cargoArtifacts,
}:
craneLib.buildPackage (commonArgs
// {
inherit cargoArtifacts;
pname = "fc-server";
cargoExtraArgs = "--package fc-server";
})

2216
nix/vm-test.nix Normal file

File diff suppressed because it is too large Load diff