Compare commits

...

10 commits

Author SHA1 Message Date
f812ca50b3
chore: bump dependencies; add clippy config
Signed-off-by: NotAShelf <raf@notashelf.dev>
Change-Id: I7e4d1b531e6d9f1fa824707a95fb3f2e6a6a6964
2026-02-28 12:18:22 +03:00
0ca92f2710
treewide: address all clippy lints
Signed-off-by: NotAShelf <raf@notashelf.dev>
Change-Id: I5cf55cc4cb558c3f9f764c71224e87176a6a6964
2026-02-28 12:18:21 +03:00
967d51e867
docs: finalize REST API status in comparison doc
Signed-off-by: NotAShelf <raf@notashelf.dev>
Change-Id: I438e2416b03104a71c935752c34f81ad6a6a6964
2026-02-28 12:18:20 +03:00
6f5ad09748
chore: format formatter files with toml formatter
Signed-off-by: NotAShelf <raf@notashelf.dev>
Change-Id: I857fd75519bfca8221670598d0ff9d9e6a6a6964
2026-02-28 12:18:19 +03:00
21446c6dcb
fc-queue-runner: implement persistent notification retry queue with exponential backoff
Adds a `notification_tasks` table and a background worker to (hopefully
reliably) deliver webhooks, git status updates, and e-mail notifications
with automatic retry on transient failures.

This was one of the critical gaps, finally done.

Signed-off-by: NotAShelf <raf@notashelf.dev>
Change-Id: I794967c66958658c4d8aed40793d67f96a6a6964
2026-02-28 12:18:18 +03:00
3807293eb7
chore: update security updates in example config
Signed-off-by: NotAShelf <raf@notashelf.dev>
Change-Id: I2b2eee2f6f8b2e939d0c080fb37f0bb76a6a6964
2026-02-28 12:18:17 +03:00
d0ffa5d9e5
fc-server: implent proper rate limiting with token bucket algorithm; fix rate_limit_rps
Signed-off-by: NotAShelf <raf@notashelf.dev>
Change-Id: I68237ff6216337eba1afa8e8606d545b6a6a6964
2026-02-28 12:18:16 +03:00
754f5afb6d
flake: bump inputs
Signed-off-by: NotAShelf <raf@notashelf.dev>
Change-Id: I0b44f21e9fb8a087904a64448c358cdd6a6a6964
2026-02-28 12:18:15 +03:00
d541b7ebbf
nix: simplify tests
Signed-off-by: NotAShelf <raf@notashelf.dev>
Change-Id: I02126573ee9573fd7a1e5a12e42dd02d6a6a6964
2026-02-28 12:18:14 +03:00
e7425e0abf
fc-common: consolidate database migrations; simplify
Signed-off-by: NotAShelf <raf@notashelf.dev>
Change-Id: Ia808d76241cec6e8760d87443bb0dc976a6a6964
2026-02-28 12:18:13 +03:00
94 changed files with 3634 additions and 1956 deletions

View file

@ -24,4 +24,3 @@ unstable_features = true
use_field_init_shorthand = true use_field_init_shorthand = true
use_try_shorthand = true use_try_shorthand = true
wrap_comments = true wrap_comments = true

View file

@ -11,5 +11,3 @@ keys = [ "package" ]
[rule.formatting] [rule.formatting]
reorder_keys = false reorder_keys = false

114
Cargo.lock generated
View file

@ -90,9 +90,9 @@ dependencies = [
[[package]] [[package]]
name = "anyhow" name = "anyhow"
version = "1.0.101" version = "1.0.102"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5f0e0fee31ef5ed1ba1316088939cea399010ed7731dba877ed44aeb407a75ea" checksum = "7f202df86484c868dbad7eaa557ef785d5c66295e41b460ef922eca0723b842c"
[[package]] [[package]]
name = "ar_archive_writer" name = "ar_archive_writer"
@ -229,9 +229,9 @@ checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8"
[[package]] [[package]]
name = "aws-lc-rs" name = "aws-lc-rs"
version = "1.15.4" version = "1.16.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7b7b6141e96a8c160799cc2d5adecd5cbbe5054cb8c7c4af53da0f83bb7ad256" checksum = "d9a7b350e3bb1767102698302bc37256cbd48422809984b98d292c40e2579aa9"
dependencies = [ dependencies = [
"aws-lc-sys", "aws-lc-sys",
"zeroize", "zeroize",
@ -373,9 +373,9 @@ dependencies = [
[[package]] [[package]]
name = "bumpalo" name = "bumpalo"
version = "3.19.1" version = "3.20.2"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5dd9dc738b7a8311c7ade152424974d8115f2cdad61e8dab8dac9f2362298510" checksum = "5d20789868f4b01b2f2caec9f5c4e0213b41e3e5702a50157d699ae31ced2fcb"
[[package]] [[package]]
name = "byteorder" name = "byteorder"
@ -421,9 +421,9 @@ checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724"
[[package]] [[package]]
name = "chrono" name = "chrono"
version = "0.4.43" version = "0.4.44"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fac4744fb15ae8337dc853fee7fb3f4e48c0fbaa23d0afe49c447b4fab126118" checksum = "c673075a2e0e5f4a1dde27ce9dee1ea4558c7ffe648f576438a20ca1d2acc4b0"
dependencies = [ dependencies = [
"iana-time-zone", "iana-time-zone",
"js-sys", "js-sys",
@ -445,9 +445,9 @@ dependencies = [
[[package]] [[package]]
name = "clap" name = "clap"
version = "4.5.59" version = "4.5.60"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c5caf74d17c3aec5495110c34cc3f78644bfa89af6c8993ed4de2790e49b6499" checksum = "2797f34da339ce31042b27d23607e051786132987f595b02ba4f6a6dffb7030a"
dependencies = [ dependencies = [
"clap_builder", "clap_builder",
"clap_derive", "clap_derive",
@ -455,9 +455,9 @@ dependencies = [
[[package]] [[package]]
name = "clap_builder" name = "clap_builder"
version = "4.5.59" version = "4.5.60"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "370daa45065b80218950227371916a1633217ae42b2715b2287b606dcd618e24" checksum = "24a241312cea5059b13574bb9b3861cabf758b879c15190b37b6d6fd63ab6876"
dependencies = [ dependencies = [
"anstream", "anstream",
"anstyle", "anstyle",
@ -823,7 +823,7 @@ dependencies = [
"tempfile", "tempfile",
"thiserror 2.0.18", "thiserror 2.0.18",
"tokio", "tokio",
"toml 1.0.2+spec-1.1.0", "toml 1.0.3+spec-1.1.0",
"tracing", "tracing",
"tracing-subscriber", "tracing-subscriber",
"urlencoding", "urlencoding",
@ -849,7 +849,7 @@ dependencies = [
"tempfile", "tempfile",
"thiserror 2.0.18", "thiserror 2.0.18",
"tokio", "tokio",
"toml 1.0.2+spec-1.1.0", "toml 1.0.3+spec-1.1.0",
"tracing", "tracing",
"tracing-subscriber", "tracing-subscriber",
"uuid", "uuid",
@ -1556,9 +1556,9 @@ dependencies = [
[[package]] [[package]]
name = "js-sys" name = "js-sys"
version = "0.3.85" version = "0.3.91"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8c942ebf8e95485ca0d52d97da7c5a2c387d0e7f0ba4c35e93bfcaee045955b3" checksum = "b49715b7073f385ba4bc528e5747d02e66cb39c6146efb66b781f131f0fb399c"
dependencies = [ dependencies = [
"once_cell", "once_cell",
"wasm-bindgen", "wasm-bindgen",
@ -1652,7 +1652,7 @@ checksum = "3d0b95e02c851351f877147b7deea7b1afb1df71b63aa5f8270716e0c5720616"
dependencies = [ dependencies = [
"bitflags", "bitflags",
"libc", "libc",
"redox_syscall 0.7.1", "redox_syscall 0.7.3",
] ]
[[package]] [[package]]
@ -1681,9 +1681,9 @@ dependencies = [
[[package]] [[package]]
name = "libz-sys" name = "libz-sys"
version = "1.1.23" version = "1.1.24"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "15d118bbf3771060e7311cc7bb0545b01d08a8b4a7de949198dec1fa0ca1c0f7" checksum = "4735e9cbde5aac84a5ce588f6b23a90b9b0b528f6c5a8db8a4aff300463a0839"
dependencies = [ dependencies = [
"cc", "cc",
"libc", "libc",
@ -1693,9 +1693,9 @@ dependencies = [
[[package]] [[package]]
name = "linux-raw-sys" name = "linux-raw-sys"
version = "0.11.0" version = "0.12.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "df1d3c3b53da64cf5760482273a98e575c651a67eec7f77df96b5b642de8f039" checksum = "32a66949e030da00e8c7d4434b251670a91556f4144941d37452769c25d58a53"
[[package]] [[package]]
name = "litemap" name = "litemap"
@ -2038,9 +2038,9 @@ dependencies = [
[[package]] [[package]]
name = "pin-project-lite" name = "pin-project-lite"
version = "0.2.16" version = "0.2.17"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3b3cff922bd51709b605d9ead9aa71031d81447142d828eb4a6eba76fe619f9b" checksum = "a89322df9ebe1c1578d689c92318e070967d1042b512afbe49518723f4e6d5cd"
[[package]] [[package]]
name = "pin-utils" name = "pin-utils"
@ -2269,9 +2269,9 @@ dependencies = [
[[package]] [[package]]
name = "redox_syscall" name = "redox_syscall"
version = "0.7.1" version = "0.7.3"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "35985aa610addc02e24fc232012c86fd11f14111180f902b67e2d5331f8ebf2b" checksum = "6ce70a74e890531977d37e532c34d45e9055d2409ed08ddba14529471ed0be16"
dependencies = [ dependencies = [
"bitflags", "bitflags",
] ]
@ -2301,9 +2301,9 @@ dependencies = [
[[package]] [[package]]
name = "regex-syntax" name = "regex-syntax"
version = "0.8.9" version = "0.8.10"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a96887878f22d7bad8a3b6dc5b7440e0ada9a245242924394987b21cf2210a4c" checksum = "dc897dd8d9e8bd1ed8cdad82b5966c3e0ecae09fb1907d58efaa013543185d0a"
[[package]] [[package]]
name = "reqwest" name = "reqwest"
@ -2446,9 +2446,9 @@ checksum = "357703d41365b4b27c590e3ed91eabb1b663f07c4c084095e60cbed4362dff0d"
[[package]] [[package]]
name = "rustix" name = "rustix"
version = "1.1.3" version = "1.1.4"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "146c9e247ccc180c1f61615433868c99f3de3ae256a30a43b49f67c2d9171f34" checksum = "b6fe4565b9518b83ef4f91bb47ce29620ca828bd32cb7e408f0062e9930ba190"
dependencies = [ dependencies = [
"bitflags", "bitflags",
"errno", "errno",
@ -2459,9 +2459,9 @@ dependencies = [
[[package]] [[package]]
name = "rustls" name = "rustls"
version = "0.23.36" version = "0.23.37"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c665f33d38cea657d9614f766881e4d510e0eda4239891eea56b4cadcf01801b" checksum = "758025cb5fccfd3bc2fd74708fd4682be41d99e5dff73c377c0646c6012c73a4"
dependencies = [ dependencies = [
"aws-lc-rs", "aws-lc-rs",
"log", "log",
@ -2572,9 +2572,9 @@ checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49"
[[package]] [[package]]
name = "security-framework" name = "security-framework"
version = "3.6.0" version = "3.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d17b898a6d6948c3a8ee4372c17cb384f90d2e6e912ef00895b14fd7ab54ec38" checksum = "b7f4bc775c73d9a02cde8bf7b2ec4c9d12743edf609006c7facc23998404cd1d"
dependencies = [ dependencies = [
"bitflags", "bitflags",
"core-foundation", "core-foundation",
@ -2585,9 +2585,9 @@ dependencies = [
[[package]] [[package]]
name = "security-framework-sys" name = "security-framework-sys"
version = "2.16.0" version = "2.17.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "321c8673b092a9a42605034a9879d73cb79101ed5fd117bc9a597b89b4e9e61a" checksum = "6ce2691df843ecc5d231c0b14ece2acc3efb62c0a398c7e1d875f3983ce020e3"
dependencies = [ dependencies = [
"core-foundation-sys", "core-foundation-sys",
"libc", "libc",
@ -3029,9 +3029,9 @@ checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292"
[[package]] [[package]]
name = "syn" name = "syn"
version = "2.0.116" version = "2.0.117"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3df424c70518695237746f84cede799c9c58fcb37450d7b23716568cc8bc69cb" checksum = "e665b8803e7b1d2a727f4023456bbbbe74da67099c585258af0ad9c5013b9b99"
dependencies = [ dependencies = [
"proc-macro2", "proc-macro2",
"quote", "quote",
@ -3071,9 +3071,9 @@ dependencies = [
[[package]] [[package]]
name = "tempfile" name = "tempfile"
version = "3.25.0" version = "3.26.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0136791f7c95b1f6dd99f9cc786b91bb81c3800b639b3478e561ddb7be95e5f1" checksum = "82a72c767771b47409d2345987fda8628641887d5466101319899796367354a0"
dependencies = [ dependencies = [
"fastrand", "fastrand",
"getrandom 0.4.1", "getrandom 0.4.1",
@ -3242,9 +3242,9 @@ dependencies = [
[[package]] [[package]]
name = "toml" name = "toml"
version = "1.0.2+spec-1.1.0" version = "1.0.3+spec-1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d1dfefef6a142e93f346b64c160934eb13b5594b84ab378133ac6815cb2bd57f" checksum = "c7614eaf19ad818347db24addfa201729cf2a9b6fdfd9eb0ab870fcacc606c0c"
dependencies = [ dependencies = [
"indexmap", "indexmap",
"serde_core", "serde_core",
@ -3606,9 +3606,9 @@ checksum = "b8dad83b4f25e74f184f64c43b150b91efe7647395b42289f38e50566d82855b"
[[package]] [[package]]
name = "wasm-bindgen" name = "wasm-bindgen"
version = "0.2.108" version = "0.2.114"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "64024a30ec1e37399cf85a7ffefebdb72205ca1c972291c51512360d90bd8566" checksum = "6532f9a5c1ece3798cb1c2cfdba640b9b3ba884f5db45973a6f442510a87d38e"
dependencies = [ dependencies = [
"cfg-if", "cfg-if",
"once_cell", "once_cell",
@ -3619,9 +3619,9 @@ dependencies = [
[[package]] [[package]]
name = "wasm-bindgen-futures" name = "wasm-bindgen-futures"
version = "0.4.58" version = "0.4.64"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "70a6e77fd0ae8029c9ea0063f87c46fde723e7d887703d74ad2616d792e51e6f" checksum = "e9c5522b3a28661442748e09d40924dfb9ca614b21c00d3fd135720e48b67db8"
dependencies = [ dependencies = [
"cfg-if", "cfg-if",
"futures-util", "futures-util",
@ -3633,9 +3633,9 @@ dependencies = [
[[package]] [[package]]
name = "wasm-bindgen-macro" name = "wasm-bindgen-macro"
version = "0.2.108" version = "0.2.114"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "008b239d9c740232e71bd39e8ef6429d27097518b6b30bdf9086833bd5b6d608" checksum = "18a2d50fcf105fb33bb15f00e7a77b772945a2ee45dcf454961fd843e74c18e6"
dependencies = [ dependencies = [
"quote", "quote",
"wasm-bindgen-macro-support", "wasm-bindgen-macro-support",
@ -3643,9 +3643,9 @@ dependencies = [
[[package]] [[package]]
name = "wasm-bindgen-macro-support" name = "wasm-bindgen-macro-support"
version = "0.2.108" version = "0.2.114"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5256bae2d58f54820e6490f9839c49780dff84c65aeab9e772f15d5f0e913a55" checksum = "03ce4caeaac547cdf713d280eda22a730824dd11e6b8c3ca9e42247b25c631e3"
dependencies = [ dependencies = [
"bumpalo", "bumpalo",
"proc-macro2", "proc-macro2",
@ -3656,9 +3656,9 @@ dependencies = [
[[package]] [[package]]
name = "wasm-bindgen-shared" name = "wasm-bindgen-shared"
version = "0.2.108" version = "0.2.114"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1f01b580c9ac74c8d8f0c0e4afb04eeef2acf145458e52c03845ee9cd23e3d12" checksum = "75a326b8c223ee17883a4251907455a2431acc2791c98c26279376490c378c16"
dependencies = [ dependencies = [
"unicode-ident", "unicode-ident",
] ]
@ -3699,9 +3699,9 @@ dependencies = [
[[package]] [[package]]
name = "web-sys" name = "web-sys"
version = "0.3.85" version = "0.3.91"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "312e32e551d92129218ea9a2452120f4aabc03529ef03e4d0d82fb2780608598" checksum = "854ba17bb104abfb26ba36da9729addc7ce7f06f5c0f90f3c391f8461cca21f9"
dependencies = [ dependencies = [
"js-sys", "js-sys",
"wasm-bindgen", "wasm-bindgen",
@ -4277,18 +4277,18 @@ dependencies = [
[[package]] [[package]]
name = "zerocopy" name = "zerocopy"
version = "0.8.39" version = "0.8.40"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "db6d35d663eadb6c932438e763b262fe1a70987f9ae936e60158176d710cae4a" checksum = "a789c6e490b576db9f7e6b6d661bcc9799f7c0ac8352f56ea20193b2681532e5"
dependencies = [ dependencies = [
"zerocopy-derive", "zerocopy-derive",
] ]
[[package]] [[package]]
name = "zerocopy-derive" name = "zerocopy-derive"
version = "0.8.39" version = "0.8.40"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4122cd3169e94605190e77839c9a40d40ed048d305bfdc146e7df40ab0f3e517" checksum = "f65c489a7071a749c849713807783f70672b28094011623e200cb86dcb835953"
dependencies = [ dependencies = [
"proc-macro2", "proc-macro2",
"quote", "quote",

View file

@ -67,6 +67,76 @@ urlencoding = "2.1.3"
uuid = { version = "1.18.1", features = [ "v4", "serde" ] } uuid = { version = "1.18.1", features = [ "v4", "serde" ] }
xz2 = "0.1.7" xz2 = "0.1.7"
# See:
# <https://doc.rust-lang.org/rustc/lints/listing/allowed-by-default.html>
[workspace.lints.clippy]
cargo = { level = "warn", priority = -1 }
complexity = { level = "warn", priority = -1 }
nursery = { level = "warn", priority = -1 }
pedantic = { level = "warn", priority = -1 }
perf = { level = "warn", priority = -1 }
style = { level = "warn", priority = -1 }
# The lint groups above enable some less-than-desirable rules, we should manually
# enable those to keep our sanity.
absolute_paths = "allow"
arbitrary_source_item_ordering = "allow"
clone_on_ref_ptr = "warn"
dbg_macro = "warn"
empty_drop = "warn"
empty_structs_with_brackets = "warn"
exit = "warn"
filetype_is_file = "warn"
get_unwrap = "warn"
implicit_return = "allow"
infinite_loop = "warn"
map_with_unused_argument_over_ranges = "warn"
missing_docs_in_private_items = "allow"
multiple_crate_versions = "allow" # :(
non_ascii_literal = "allow"
non_std_lazy_statics = "warn"
pathbuf_init_then_push = "warn"
pattern_type_mismatch = "allow"
question_mark_used = "allow"
rc_buffer = "warn"
rc_mutex = "warn"
rest_pat_in_fully_bound_structs = "warn"
similar_names = "allow"
single_call_fn = "allow"
std_instead_of_core = "allow"
too_long_first_doc_paragraph = "allow"
too_many_lines = "allow"
cast_possible_truncation = "allow"
cast_possible_wrap = "allow"
cast_precision_loss = "allow"
cast_sign_loss = "allow"
undocumented_unsafe_blocks = "warn"
unnecessary_safety_comment = "warn"
unused_result_ok = "warn"
unused_trait_names = "allow"
# False positive:
# clippy's build script check doesn't recognize workspace-inherited metadata
# which means in our current workspace layout, we get pranked by Clippy.
cargo_common_metadata = "allow"
# In the honor of a recent Cloudflare regression
panic = "deny"
unwrap_used = "deny"
# Less dangerous, but we'd like to know
# Those must be opt-in, and are fine ONLY in tests and examples.
expect_used = "warn"
print_stderr = "warn"
print_stdout = "warn"
todo = "warn"
unimplemented = "warn"
unreachable = "warn"
[profile.dev]
debug = true
opt-level = 0
[profile.release] [profile.release]
lto = true lto = true
opt-level = "z" opt-level = "z"

View file

@ -0,0 +1,649 @@
-- FC database schema.
-- Full schema definition for the FC CI system.
CREATE EXTENSION IF NOT EXISTS "uuid-ossp";
-- projects: stores repository configurations
CREATE TABLE projects (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4 (),
name VARCHAR(255) NOT NULL UNIQUE,
description TEXT,
repository_url TEXT NOT NULL,
created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
updated_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW()
);
-- users: accounts for authentication and personalization
CREATE TABLE users (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4 (),
username VARCHAR(255) NOT NULL UNIQUE,
email VARCHAR(255) NOT NULL UNIQUE,
full_name VARCHAR(255),
password_hash VARCHAR(255),
user_type VARCHAR(50) NOT NULL DEFAULT 'local',
role VARCHAR(50) NOT NULL DEFAULT 'read-only',
enabled BOOLEAN NOT NULL DEFAULT true,
email_verified BOOLEAN NOT NULL DEFAULT false,
public_dashboard BOOLEAN NOT NULL DEFAULT false,
created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
updated_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
last_login_at TIMESTAMP WITH TIME ZONE
);
-- remote_builders: multi-machine / multi-arch build agents
CREATE TABLE remote_builders (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4 (),
name VARCHAR(255) NOT NULL UNIQUE,
ssh_uri TEXT NOT NULL,
systems TEXT[] NOT NULL DEFAULT '{}',
max_jobs INTEGER NOT NULL DEFAULT 1,
speed_factor INTEGER NOT NULL DEFAULT 1,
supported_features TEXT[] NOT NULL DEFAULT '{}',
mandatory_features TEXT[] NOT NULL DEFAULT '{}',
enabled BOOLEAN NOT NULL DEFAULT true,
public_host_key TEXT,
ssh_key_file TEXT,
consecutive_failures INTEGER NOT NULL DEFAULT 0,
disabled_until TIMESTAMP WITH TIME ZONE,
last_failure TIMESTAMP WITH TIME ZONE,
created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW()
);
-- jobsets: build configurations for each project
CREATE TABLE jobsets (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4 (),
project_id UUID NOT NULL REFERENCES projects (id) ON DELETE CASCADE,
name VARCHAR(255) NOT NULL,
nix_expression TEXT NOT NULL,
enabled BOOLEAN NOT NULL DEFAULT true,
flake_mode BOOLEAN NOT NULL DEFAULT true,
check_interval INTEGER NOT NULL DEFAULT 60,
branch VARCHAR(255),
scheduling_shares INTEGER NOT NULL DEFAULT 100,
state VARCHAR(50) NOT NULL DEFAULT 'enabled' CHECK (
state IN (
'disabled',
'enabled',
'one_shot',
'one_at_a_time'
)
),
last_checked_at TIMESTAMP WITH TIME ZONE,
keep_nr INTEGER NOT NULL DEFAULT 3,
created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
updated_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
UNIQUE (project_id, name)
);
-- api_keys: authentication tokens with role-based access control
CREATE TABLE api_keys (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4 (),
name VARCHAR(255) NOT NULL,
key_hash VARCHAR(128) NOT NULL UNIQUE,
role VARCHAR(50) NOT NULL DEFAULT 'read-only' CHECK (
role IN (
'admin',
'create-projects',
'restart-jobs',
'cancel-build',
'bump-to-front',
'eval-jobset',
'read-only'
)
),
user_id UUID REFERENCES users (id) ON DELETE SET NULL,
created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
last_used_at TIMESTAMP WITH TIME ZONE
);
-- evaluations: Nix evaluation results for each jobset commit
CREATE TABLE evaluations (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4 (),
jobset_id UUID NOT NULL REFERENCES jobsets (id) ON DELETE CASCADE,
commit_hash VARCHAR(40) NOT NULL,
evaluation_time TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
status TEXT NOT NULL CHECK (
status IN ('pending', 'running', 'completed', 'failed')
),
error_message TEXT,
inputs_hash VARCHAR(128),
pr_number INTEGER,
pr_head_branch TEXT,
pr_base_branch TEXT,
pr_action TEXT,
UNIQUE (jobset_id, commit_hash)
);
-- builds: individual build jobs
CREATE TABLE builds (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4 (),
evaluation_id UUID NOT NULL REFERENCES evaluations (id) ON DELETE CASCADE,
job_name VARCHAR(255) NOT NULL,
drv_path TEXT NOT NULL,
status TEXT NOT NULL CHECK (
status IN (
'pending',
'running',
'succeeded',
'failed',
'dependency_failed',
'aborted',
'cancelled',
'failed_with_output',
'timeout',
'cached_failure',
'unsupported_system',
'log_limit_exceeded',
'nar_size_limit_exceeded',
'non_deterministic'
)
),
started_at TIMESTAMP WITH TIME ZONE,
completed_at TIMESTAMP WITH TIME ZONE,
log_path TEXT,
build_output_path TEXT,
error_message TEXT,
priority INTEGER NOT NULL DEFAULT 0,
retry_count INTEGER NOT NULL DEFAULT 0,
max_retries INTEGER NOT NULL DEFAULT 3,
notification_pending_since TIMESTAMP WITH TIME ZONE,
log_url TEXT,
outputs JSONB,
is_aggregate BOOLEAN NOT NULL DEFAULT false,
constituents JSONB,
builder_id UUID REFERENCES remote_builders (id),
signed BOOLEAN NOT NULL DEFAULT false,
system VARCHAR(50),
keep BOOLEAN NOT NULL DEFAULT false,
created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
UNIQUE (evaluation_id, job_name)
);
-- build_products: output artifacts and metadata
CREATE TABLE build_products (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4 (),
build_id UUID NOT NULL REFERENCES builds (id) ON DELETE CASCADE,
name VARCHAR(255) NOT NULL,
path TEXT NOT NULL,
sha256_hash VARCHAR(64),
file_size BIGINT,
content_type VARCHAR(100),
is_directory BOOLEAN NOT NULL DEFAULT false,
gc_root_path TEXT,
created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW()
);
-- build_steps: detailed build execution logs and timing
CREATE TABLE build_steps (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4 (),
build_id UUID NOT NULL REFERENCES builds (id) ON DELETE CASCADE,
step_number INTEGER NOT NULL,
command TEXT NOT NULL,
output TEXT,
error_output TEXT,
started_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
completed_at TIMESTAMP WITH TIME ZONE,
exit_code INTEGER,
UNIQUE (build_id, step_number)
);
-- build_dependencies: tracks inter-build dependency relationships
CREATE TABLE build_dependencies (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4 (),
build_id UUID NOT NULL REFERENCES builds (id) ON DELETE CASCADE,
dependency_build_id UUID NOT NULL REFERENCES builds (id) ON DELETE CASCADE,
UNIQUE (build_id, dependency_build_id)
);
-- webhook_configs: incoming push event configuration per project
CREATE TABLE webhook_configs (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4 (),
project_id UUID NOT NULL REFERENCES projects (id) ON DELETE CASCADE,
forge_type VARCHAR(50) NOT NULL CHECK (
forge_type IN ('github', 'gitea', 'forgejo', 'gitlab')
),
secret_hash VARCHAR(128),
enabled BOOLEAN NOT NULL DEFAULT true,
created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
UNIQUE (project_id, forge_type)
);
-- notification_configs: outgoing notification configuration per project
CREATE TABLE notification_configs (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4 (),
project_id UUID NOT NULL REFERENCES projects (id) ON DELETE CASCADE,
notification_type VARCHAR(50) NOT NULL CHECK (
notification_type IN (
'github_status',
'gitea_status',
'forgejo_status',
'gitlab_status',
'webhook',
'email'
)
),
config JSONB NOT NULL DEFAULT '{}',
enabled BOOLEAN NOT NULL DEFAULT true,
created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
UNIQUE (project_id, notification_type)
);
-- jobset_inputs: parameterized inputs for jobsets
CREATE TABLE jobset_inputs (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4 (),
jobset_id UUID NOT NULL REFERENCES jobsets (id) ON DELETE CASCADE,
name VARCHAR(255) NOT NULL,
input_type VARCHAR(50) NOT NULL CHECK (
input_type IN ('git', 'string', 'boolean', 'path', 'build')
),
value TEXT NOT NULL,
revision TEXT,
created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
UNIQUE (jobset_id, name)
);
-- channels: release management, tracks the latest good evaluation per jobset
CREATE TABLE channels (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4 (),
project_id UUID NOT NULL REFERENCES projects (id) ON DELETE CASCADE,
name VARCHAR(255) NOT NULL,
jobset_id UUID NOT NULL REFERENCES jobsets (id) ON DELETE CASCADE,
current_evaluation_id UUID REFERENCES evaluations (id),
created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
updated_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
UNIQUE (project_id, name)
);
-- starred_jobs: personalized dashboard bookmarks per user
CREATE TABLE starred_jobs (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4 (),
user_id UUID NOT NULL REFERENCES users (id) ON DELETE CASCADE,
project_id UUID NOT NULL REFERENCES projects (id) ON DELETE CASCADE,
jobset_id UUID REFERENCES jobsets (id) ON DELETE CASCADE,
job_name VARCHAR(255) NOT NULL,
created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
UNIQUE (user_id, project_id, jobset_id, job_name)
);
-- user_sessions: persistent authentication tokens
CREATE TABLE user_sessions (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4 (),
user_id UUID NOT NULL REFERENCES users (id) ON DELETE CASCADE,
session_token_hash VARCHAR(255) NOT NULL,
expires_at TIMESTAMP WITH TIME ZONE NOT NULL,
created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
last_used_at TIMESTAMP WITH TIME ZONE
);
-- project_members: per-project permission assignments
CREATE TABLE project_members (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4 (),
project_id UUID NOT NULL REFERENCES projects (id) ON DELETE CASCADE,
user_id UUID NOT NULL REFERENCES users (id) ON DELETE CASCADE,
role VARCHAR(50) NOT NULL DEFAULT 'member',
created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
UNIQUE (project_id, user_id)
);
-- build_metrics: timing, size, and performance metrics per build
CREATE TABLE build_metrics (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
build_id UUID NOT NULL REFERENCES builds (id) ON DELETE CASCADE,
metric_name VARCHAR(100) NOT NULL,
metric_value DOUBLE PRECISION NOT NULL,
unit VARCHAR(50) NOT NULL,
collected_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
UNIQUE (build_id, metric_name)
);
-- failed_paths_cache: prevents rebuilding known-failing derivations
CREATE TABLE failed_paths_cache (
drv_path TEXT PRIMARY KEY,
source_build_id UUID,
failure_status TEXT,
failed_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW()
);
-- Indexes: projects
CREATE INDEX idx_projects_name ON projects (name);
CREATE INDEX idx_projects_created_at ON projects (created_at);
-- Indexes: users
CREATE INDEX idx_users_username ON users (username);
CREATE INDEX idx_users_email ON users (email);
CREATE INDEX idx_users_role ON users (role);
CREATE INDEX idx_users_enabled ON users (enabled);
-- Indexes: remote_builders
CREATE INDEX idx_remote_builders_enabled ON remote_builders (enabled)
WHERE
enabled = true;
-- Indexes: jobsets
CREATE INDEX idx_jobsets_project_id ON jobsets (project_id);
CREATE INDEX idx_jobsets_enabled ON jobsets (enabled);
CREATE INDEX idx_jobsets_name ON jobsets (name);
CREATE INDEX idx_jobsets_state ON jobsets (state);
CREATE INDEX idx_jobsets_last_checked_at ON jobsets (last_checked_at);
-- Indexes: api_keys
CREATE INDEX idx_api_keys_key_hash ON api_keys (key_hash);
CREATE INDEX idx_api_keys_user_id ON api_keys (user_id);
-- Indexes: evaluations
CREATE INDEX idx_evaluations_jobset_id ON evaluations (jobset_id);
CREATE INDEX idx_evaluations_commit_hash ON evaluations (commit_hash);
CREATE INDEX idx_evaluations_status ON evaluations (status);
CREATE INDEX idx_evaluations_evaluation_time ON evaluations (evaluation_time);
CREATE INDEX idx_evaluations_inputs_hash ON evaluations (jobset_id, inputs_hash);
CREATE INDEX idx_evaluations_pr ON evaluations (jobset_id, pr_number)
WHERE
pr_number IS NOT NULL;
-- Indexes: builds
CREATE INDEX idx_builds_evaluation_id ON builds (evaluation_id);
CREATE INDEX idx_builds_status ON builds (status);
CREATE INDEX idx_builds_job_name ON builds (job_name);
CREATE INDEX idx_builds_started_at ON builds (started_at);
CREATE INDEX idx_builds_completed_at ON builds (completed_at);
CREATE INDEX idx_builds_priority ON builds (priority DESC, created_at ASC);
CREATE INDEX idx_builds_notification_pending ON builds (notification_pending_since)
WHERE
notification_pending_since IS NOT NULL;
CREATE INDEX idx_builds_drv_path ON builds (drv_path);
CREATE INDEX idx_builds_builder ON builds (builder_id)
WHERE
builder_id IS NOT NULL;
CREATE INDEX idx_builds_system ON builds (system)
WHERE
system IS NOT NULL;
CREATE INDEX idx_builds_pending_priority ON builds (status, priority DESC, created_at ASC)
WHERE
status = 'pending';
CREATE INDEX idx_builds_drv_completed ON builds (drv_path)
WHERE
status = 'succeeded';
-- Indexes: build_products
CREATE INDEX idx_build_products_build_id ON build_products (build_id);
CREATE INDEX idx_build_products_name ON build_products (name);
CREATE INDEX idx_build_products_path_prefix ON build_products (path text_pattern_ops);
-- Indexes: build_steps
CREATE INDEX idx_build_steps_build_id ON build_steps (build_id);
CREATE INDEX idx_build_steps_started_at ON build_steps (started_at);
-- Indexes: build_dependencies
CREATE INDEX idx_build_deps_build ON build_dependencies (build_id);
CREATE INDEX idx_build_deps_dep ON build_dependencies (dependency_build_id);
-- Indexes: webhook/notification/jobset_inputs/channels
CREATE INDEX idx_webhook_configs_project ON webhook_configs (project_id);
CREATE INDEX idx_notification_configs_project ON notification_configs (project_id);
CREATE INDEX idx_jobset_inputs_jobset ON jobset_inputs (jobset_id);
CREATE INDEX idx_channels_project ON channels (project_id);
CREATE INDEX idx_channels_jobset ON channels (jobset_id);
-- Indexes: users/sessions/members
CREATE INDEX idx_starred_jobs_user_id ON starred_jobs (user_id);
CREATE INDEX idx_starred_jobs_project_id ON starred_jobs (project_id);
CREATE INDEX idx_user_sessions_token ON user_sessions (session_token_hash);
CREATE INDEX idx_user_sessions_user_id ON user_sessions (user_id);
CREATE INDEX idx_user_sessions_expires ON user_sessions (expires_at);
CREATE INDEX idx_project_members_project_id ON project_members (project_id);
CREATE INDEX idx_project_members_user_id ON project_members (user_id);
-- Indexes: build_metrics / failed_paths_cache
CREATE INDEX idx_build_metrics_build_id ON build_metrics (build_id);
CREATE INDEX idx_build_metrics_collected_at ON build_metrics (collected_at);
CREATE INDEX idx_build_metrics_name ON build_metrics (metric_name);
CREATE INDEX idx_failed_paths_cache_failed_at ON failed_paths_cache (failed_at);
-- Trigger function: auto-update updated_at on mutation
CREATE OR REPLACE FUNCTION update_updated_at_column () RETURNS TRIGGER AS $$
BEGIN
NEW.updated_at = NOW();
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
CREATE TRIGGER update_projects_updated_at BEFORE
UPDATE ON projects FOR EACH ROW
EXECUTE FUNCTION update_updated_at_column ();
CREATE TRIGGER update_jobsets_updated_at BEFORE
UPDATE ON jobsets FOR EACH ROW
EXECUTE FUNCTION update_updated_at_column ();
CREATE TRIGGER update_users_updated_at BEFORE
UPDATE ON users FOR EACH ROW
EXECUTE FUNCTION update_updated_at_column ();
-- Trigger functions: LISTEN/NOTIFY for event-driven daemon wakeup
CREATE OR REPLACE FUNCTION notify_builds_changed () RETURNS trigger AS $$
BEGIN
PERFORM pg_notify('fc_builds_changed', json_build_object(
'op', TG_OP,
'table', TG_TABLE_NAME
)::text);
RETURN NULL;
END;
$$ LANGUAGE plpgsql;
CREATE OR REPLACE FUNCTION notify_jobsets_changed () RETURNS trigger AS $$
BEGIN
PERFORM pg_notify('fc_jobsets_changed', json_build_object(
'op', TG_OP,
'table', TG_TABLE_NAME
)::text);
RETURN NULL;
END;
$$ LANGUAGE plpgsql;
CREATE TRIGGER trg_builds_insert_notify
AFTER INSERT ON builds FOR EACH ROW
EXECUTE FUNCTION notify_builds_changed ();
CREATE TRIGGER trg_builds_status_notify
AFTER
UPDATE ON builds FOR EACH ROW WHEN (OLD.status IS DISTINCT FROM NEW.status)
EXECUTE FUNCTION notify_builds_changed ();
CREATE TRIGGER trg_jobsets_insert_notify
AFTER INSERT ON jobsets FOR EACH ROW
EXECUTE FUNCTION notify_jobsets_changed ();
CREATE TRIGGER trg_jobsets_update_notify
AFTER
UPDATE ON jobsets FOR EACH ROW WHEN (
OLD.enabled IS DISTINCT FROM NEW.enabled
OR OLD.state IS DISTINCT FROM NEW.state
OR OLD.nix_expression IS DISTINCT FROM NEW.nix_expression
OR OLD.check_interval IS DISTINCT FROM NEW.check_interval
)
EXECUTE FUNCTION notify_jobsets_changed ();
CREATE TRIGGER trg_jobsets_delete_notify
AFTER DELETE ON jobsets FOR EACH ROW
EXECUTE FUNCTION notify_jobsets_changed ();
-- notification_tasks: persistent notification retry queue
-- Stores notification delivery tasks with automatic retry and exponential backoff
CREATE TABLE notification_tasks (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4 (),
notification_type VARCHAR(50) NOT NULL CHECK (
notification_type IN (
'webhook',
'github_status',
'gitea_status',
'gitlab_status',
'email'
)
),
payload JSONB NOT NULL,
status VARCHAR(20) NOT NULL DEFAULT 'pending' CHECK (
status IN ('pending', 'running', 'completed', 'failed')
),
attempts INTEGER NOT NULL DEFAULT 0,
max_attempts INTEGER NOT NULL DEFAULT 5,
next_retry_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
last_error TEXT,
created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
completed_at TIMESTAMP WITH TIME ZONE
);
-- Indexes: notification_tasks
CREATE INDEX idx_notification_tasks_status_next_retry ON notification_tasks (
status,
next_retry_at
)
WHERE
status IN ('pending', 'running');
CREATE INDEX idx_notification_tasks_created_at ON notification_tasks (created_at);
-- Views
CREATE VIEW active_jobsets AS
SELECT
j.id,
j.project_id,
j.name,
j.nix_expression,
j.enabled,
j.flake_mode,
j.check_interval,
j.branch,
j.scheduling_shares,
j.created_at,
j.updated_at,
j.state,
j.last_checked_at,
j.keep_nr,
p.name as project_name,
p.repository_url
FROM
jobsets j
JOIN projects p ON j.project_id = p.id
WHERE
j.state IN ('enabled', 'one_shot', 'one_at_a_time');
CREATE VIEW build_stats AS
SELECT
COUNT(*) as total_builds,
COUNT(
CASE
WHEN status = 'succeeded' THEN 1
END
) as completed_builds,
COUNT(
CASE
WHEN status = 'failed' THEN 1
END
) as failed_builds,
COUNT(
CASE
WHEN status = 'running' THEN 1
END
) as running_builds,
COUNT(
CASE
WHEN status = 'pending' THEN 1
END
) as pending_builds,
AVG(
EXTRACT(
EPOCH
FROM
(completed_at - started_at)
)
)::double precision as avg_duration_seconds
FROM
builds
WHERE
started_at IS NOT NULL;
CREATE VIEW build_metrics_summary AS
SELECT
b.id as build_id,
b.job_name,
b.status,
b.system,
e.jobset_id,
j.project_id,
b.started_at,
b.completed_at,
EXTRACT(
EPOCH
FROM
(b.completed_at - b.started_at)
) as duration_seconds,
MAX(
CASE
WHEN bm.metric_name = 'output_size_bytes' THEN bm.metric_value
END
) as output_size_bytes,
MAX(
CASE
WHEN bm.metric_name = 'peak_memory_bytes' THEN bm.metric_value
END
) as peak_memory_bytes,
MAX(
CASE
WHEN bm.metric_name = 'nar_size_bytes' THEN bm.metric_value
END
) as nar_size_bytes
FROM
builds b
JOIN evaluations e ON b.evaluation_id = e.id
JOIN jobsets j ON e.jobset_id = j.id
LEFT JOIN build_metrics bm ON b.id = bm.build_id
GROUP BY
b.id,
b.job_name,
b.status,
b.system,
e.jobset_id,
j.project_id,
b.started_at,
b.completed_at;

View file

@ -0,0 +1,5 @@
-- Example migration stub.
-- Replace this with real schema changes when needed.
-- Run: cargo run --bin fc-migrate -- create <name>
SELECT
1;

View file

@ -1,151 +0,0 @@
-- Initial schema for FC
-- Creates all core tables for the CI system
-- Enable UUID extension for UUID generation
CREATE EXTENSION IF NOT EXISTS "uuid-ossp";
-- Projects: stores repository configurations
CREATE TABLE projects (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
name VARCHAR(255) NOT NULL UNIQUE,
description TEXT,
repository_url TEXT NOT NULL,
created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
updated_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW()
);
-- Jobsets: Contains build configurations for each project
CREATE TABLE jobsets (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
project_id UUID NOT NULL REFERENCES projects(id) ON DELETE CASCADE,
name VARCHAR(255) NOT NULL,
nix_expression TEXT NOT NULL,
enabled BOOLEAN NOT NULL DEFAULT true,
created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
updated_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
UNIQUE(project_id, name)
);
-- Evaluations: Tracks Nix evaluation results for each jobset
CREATE TABLE evaluations (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
jobset_id UUID NOT NULL REFERENCES jobsets(id) ON DELETE CASCADE,
commit_hash VARCHAR(40) NOT NULL,
evaluation_time TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
status TEXT NOT NULL CHECK (status IN ('pending', 'running', 'completed', 'failed')),
error_message TEXT,
UNIQUE(jobset_id, commit_hash)
);
-- Builds: Individual build jobs with their status
CREATE TABLE builds (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
evaluation_id UUID NOT NULL REFERENCES evaluations(id) ON DELETE CASCADE,
job_name VARCHAR(255) NOT NULL,
drv_path TEXT NOT NULL,
status TEXT NOT NULL CHECK (status IN ('pending', 'running', 'completed', 'failed', 'cancelled')),
started_at TIMESTAMP WITH TIME ZONE,
completed_at TIMESTAMP WITH TIME ZONE,
log_path TEXT,
build_output_path TEXT,
error_message TEXT,
created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
UNIQUE(evaluation_id, job_name)
);
-- Build products: Stores output artifacts and metadata
CREATE TABLE build_products (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
build_id UUID NOT NULL REFERENCES builds(id) ON DELETE CASCADE,
name VARCHAR(255) NOT NULL,
path TEXT NOT NULL,
sha256_hash VARCHAR(64),
file_size BIGINT,
content_type VARCHAR(100),
is_directory BOOLEAN NOT NULL DEFAULT false,
created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW()
);
-- Build steps: Detailed build execution logs and timing
CREATE TABLE build_steps (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
build_id UUID NOT NULL REFERENCES builds(id) ON DELETE CASCADE,
step_number INTEGER NOT NULL,
command TEXT NOT NULL,
output TEXT,
error_output TEXT,
started_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
completed_at TIMESTAMP WITH TIME ZONE,
exit_code INTEGER,
UNIQUE(build_id, step_number)
);
-- Projects indexes
CREATE INDEX idx_projects_name ON projects(name);
CREATE INDEX idx_projects_created_at ON projects(created_at);
-- Jobsets indexes
CREATE INDEX idx_jobsets_project_id ON jobsets(project_id);
CREATE INDEX idx_jobsets_enabled ON jobsets(enabled);
CREATE INDEX idx_jobsets_name ON jobsets(name);
-- Evaluations indexes
CREATE INDEX idx_evaluations_jobset_id ON evaluations(jobset_id);
CREATE INDEX idx_evaluations_commit_hash ON evaluations(commit_hash);
CREATE INDEX idx_evaluations_status ON evaluations(status);
CREATE INDEX idx_evaluations_evaluation_time ON evaluations(evaluation_time);
-- Builds indexes
CREATE INDEX idx_builds_evaluation_id ON builds(evaluation_id);
CREATE INDEX idx_builds_status ON builds(status);
CREATE INDEX idx_builds_job_name ON builds(job_name);
CREATE INDEX idx_builds_started_at ON builds(started_at);
CREATE INDEX idx_builds_completed_at ON builds(completed_at);
-- Build products indexes
CREATE INDEX idx_build_products_build_id ON build_products(build_id);
CREATE INDEX idx_build_products_name ON build_products(name);
-- Build steps indexes
CREATE INDEX idx_build_steps_build_id ON build_steps(build_id);
CREATE INDEX idx_build_steps_started_at ON build_steps(started_at);
-- Create trigger functions for updated_at timestamps
CREATE OR REPLACE FUNCTION update_updated_at_column()
RETURNS TRIGGER AS $$
BEGIN
NEW.updated_at = NOW();
RETURN NEW;
END;
$$ language 'plpgsql';
-- Create triggers for automatic updated_at updates
CREATE TRIGGER update_projects_updated_at
BEFORE UPDATE ON projects
FOR EACH ROW EXECUTE FUNCTION update_updated_at_column();
CREATE TRIGGER update_jobsets_updated_at
BEFORE UPDATE ON jobsets
FOR EACH ROW EXECUTE FUNCTION update_updated_at_column();
-- Create view for active jobsets (jobsets that are enabled and belong to active projects)
CREATE VIEW active_jobsets AS
SELECT
j.*,
p.name as project_name,
p.repository_url
FROM jobsets j
JOIN projects p ON j.project_id = p.id
WHERE j.enabled = true;
-- Create view for build statistics
CREATE VIEW build_stats AS
SELECT
COUNT(*) as total_builds,
COUNT(CASE WHEN status = 'completed' THEN 1 END) as completed_builds,
COUNT(CASE WHEN status = 'failed' THEN 1 END) as failed_builds,
COUNT(CASE WHEN status = 'running' THEN 1 END) as running_builds,
COUNT(CASE WHEN status = 'pending' THEN 1 END) as pending_builds,
AVG(EXTRACT(EPOCH FROM (completed_at - started_at))) as avg_duration_seconds
FROM builds
WHERE started_at IS NOT NULL;

View file

@ -1,2 +0,0 @@
-- Add system field to builds table
ALTER TABLE builds ADD COLUMN system VARCHAR(50);

View file

@ -1,92 +0,0 @@
-- Production features: auth, priority, retry, notifications, GC roots, log paths
-- API key authentication
CREATE TABLE api_keys (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
name VARCHAR(255) NOT NULL,
key_hash VARCHAR(128) NOT NULL UNIQUE,
role VARCHAR(50) NOT NULL DEFAULT 'admin'
CHECK (role IN ('admin', 'create-projects', 'restart-jobs', 'cancel-build', 'bump-to-front', 'eval-jobset', 'read-only')),
created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
last_used_at TIMESTAMP WITH TIME ZONE
);
-- Build priority and retry support
ALTER TABLE builds ADD COLUMN priority INTEGER NOT NULL DEFAULT 0;
ALTER TABLE builds ADD COLUMN retry_count INTEGER NOT NULL DEFAULT 0;
ALTER TABLE builds ADD COLUMN max_retries INTEGER NOT NULL DEFAULT 3;
ALTER TABLE builds ADD COLUMN notification_pending_since TIMESTAMP WITH TIME ZONE;
-- GC root tracking on build products
ALTER TABLE build_products ADD COLUMN gc_root_path TEXT;
-- Build log file path (filesystem path to captured log)
ALTER TABLE builds ADD COLUMN log_url TEXT;
-- Webhook configuration for incoming push events
CREATE TABLE webhook_configs (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
project_id UUID NOT NULL REFERENCES projects(id) ON DELETE CASCADE,
forge_type VARCHAR(50) NOT NULL CHECK (forge_type IN ('github', 'gitea', 'forgejo', 'gitlab')),
secret_hash VARCHAR(128),
enabled BOOLEAN NOT NULL DEFAULT true,
created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
UNIQUE(project_id, forge_type)
);
-- Notification configuration per project
CREATE TABLE notification_configs (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
project_id UUID NOT NULL REFERENCES projects(id) ON DELETE CASCADE,
notification_type VARCHAR(50) NOT NULL
CHECK (notification_type IN ('github_status', 'gitea_status', 'forgejo_status', 'gitlab_status', 'run_command', 'email')),
config JSONB NOT NULL DEFAULT '{}',
enabled BOOLEAN NOT NULL DEFAULT true,
created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
UNIQUE(project_id, notification_type)
);
-- Jobset inputs for multi-input support
CREATE TABLE jobset_inputs (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
jobset_id UUID NOT NULL REFERENCES jobsets(id) ON DELETE CASCADE,
name VARCHAR(255) NOT NULL,
input_type VARCHAR(50) NOT NULL
CHECK (input_type IN ('git', 'string', 'boolean', 'path', 'build')),
value TEXT NOT NULL,
revision TEXT,
created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
UNIQUE(jobset_id, name)
);
-- Track flake mode per jobset
ALTER TABLE jobsets ADD COLUMN flake_mode BOOLEAN NOT NULL DEFAULT true;
ALTER TABLE jobsets ADD COLUMN check_interval INTEGER NOT NULL DEFAULT 60;
-- Store the flake URI or legacy expression path in nix_expression (already exists)
-- For flake mode: nix_expression = "github:owner/repo" or "."
-- For legacy mode: nix_expression = "release.nix"
-- Indexes for new columns
CREATE INDEX idx_builds_priority ON builds(priority DESC, created_at ASC);
CREATE INDEX idx_builds_notification_pending ON builds(notification_pending_since) WHERE notification_pending_since IS NOT NULL;
CREATE INDEX idx_api_keys_key_hash ON api_keys(key_hash);
CREATE INDEX idx_webhook_configs_project ON webhook_configs(project_id);
CREATE INDEX idx_notification_configs_project ON notification_configs(project_id);
CREATE INDEX idx_jobset_inputs_jobset ON jobset_inputs(jobset_id);
-- Update active_jobsets view to include flake_mode
-- Must DROP first: adding columns to jobsets changes j.* expansion,
-- and CREATE OR REPLACE VIEW cannot rename existing columns.
DROP VIEW IF EXISTS active_jobsets;
CREATE VIEW active_jobsets AS
SELECT
j.*,
p.name as project_name,
p.repository_url
FROM jobsets j
JOIN projects p ON j.project_id = p.id
WHERE j.enabled = true;
-- Update list_pending to respect priority ordering
-- (handled in application code, but index above supports it)

View file

@ -1,14 +0,0 @@
ALTER TABLE builds ADD COLUMN outputs JSONB;
ALTER TABLE builds ADD COLUMN is_aggregate BOOLEAN NOT NULL DEFAULT false;
ALTER TABLE builds ADD COLUMN constituents JSONB;
CREATE TABLE build_dependencies (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
build_id UUID NOT NULL REFERENCES builds(id) ON DELETE CASCADE,
dependency_build_id UUID NOT NULL REFERENCES builds(id) ON DELETE CASCADE,
UNIQUE(build_id, dependency_build_id)
);
CREATE INDEX idx_build_deps_build ON build_dependencies(build_id);
CREATE INDEX idx_build_deps_dep ON build_dependencies(dependency_build_id);
CREATE INDEX idx_builds_drv_path ON builds(drv_path);

View file

@ -1,44 +0,0 @@
-- Channels for release management (like Hydra channels)
-- A channel tracks the latest "good" evaluation for a jobset
CREATE TABLE channels (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
project_id UUID NOT NULL REFERENCES projects(id) ON DELETE CASCADE,
name VARCHAR(255) NOT NULL,
jobset_id UUID NOT NULL REFERENCES jobsets(id) ON DELETE CASCADE,
current_evaluation_id UUID REFERENCES evaluations(id),
created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
updated_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
UNIQUE(project_id, name)
);
-- Remote builders for multi-machine / multi-arch builds
CREATE TABLE remote_builders (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
name VARCHAR(255) NOT NULL UNIQUE,
ssh_uri TEXT NOT NULL,
systems TEXT[] NOT NULL DEFAULT '{}',
max_jobs INTEGER NOT NULL DEFAULT 1,
speed_factor INTEGER NOT NULL DEFAULT 1,
supported_features TEXT[] NOT NULL DEFAULT '{}',
mandatory_features TEXT[] NOT NULL DEFAULT '{}',
enabled BOOLEAN NOT NULL DEFAULT true,
public_host_key TEXT,
ssh_key_file TEXT,
created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW()
);
-- Track input hash for evaluation caching (skip re-eval when inputs unchanged)
ALTER TABLE evaluations ADD COLUMN inputs_hash VARCHAR(128);
-- Track which remote builder was used for a build
ALTER TABLE builds ADD COLUMN builder_id UUID REFERENCES remote_builders(id);
-- Track whether build outputs have been signed
ALTER TABLE builds ADD COLUMN signed BOOLEAN NOT NULL DEFAULT false;
-- Indexes
CREATE INDEX idx_channels_project ON channels(project_id);
CREATE INDEX idx_channels_jobset ON channels(jobset_id);
CREATE INDEX idx_remote_builders_enabled ON remote_builders(enabled) WHERE enabled = true;
CREATE INDEX idx_evaluations_inputs_hash ON evaluations(jobset_id, inputs_hash);
CREATE INDEX idx_builds_builder ON builds(builder_id) WHERE builder_id IS NOT NULL;

View file

@ -1,14 +0,0 @@
-- Hardening: indexes for performance
-- Cache lookup index (prefix match on path)
CREATE INDEX IF NOT EXISTS idx_build_products_path_prefix ON build_products (path text_pattern_ops);
-- Composite index for pending builds query
CREATE INDEX IF NOT EXISTS idx_builds_pending_priority ON builds (status, priority DESC, created_at ASC)
WHERE status = 'pending';
-- System filtering index
CREATE INDEX IF NOT EXISTS idx_builds_system ON builds(system) WHERE system IS NOT NULL;
-- Deduplication lookup by drv_path + status
CREATE INDEX IF NOT EXISTS idx_builds_drv_completed ON builds(drv_path) WHERE status = 'completed';

View file

@ -1,3 +0,0 @@
-- Multi-branch evaluation and scheduling shares
ALTER TABLE jobsets ADD COLUMN IF NOT EXISTS branch VARCHAR(255) DEFAULT NULL;
ALTER TABLE jobsets ADD COLUMN IF NOT EXISTS scheduling_shares INTEGER NOT NULL DEFAULT 100;

View file

@ -1,72 +0,0 @@
-- Migration 008: User Management Core
-- Adds user accounts, starred jobs, and project membership tables
-- User accounts for authentication and personalization
CREATE TABLE users (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
username VARCHAR(255) NOT NULL UNIQUE,
email VARCHAR(255) NOT NULL UNIQUE,
full_name VARCHAR(255),
password_hash VARCHAR(255), -- NULL for OAuth-only users
user_type VARCHAR(50) NOT NULL DEFAULT 'local', -- 'local', 'github', 'google'
role VARCHAR(50) NOT NULL DEFAULT 'read-only',
enabled BOOLEAN NOT NULL DEFAULT true,
email_verified BOOLEAN NOT NULL DEFAULT false,
public_dashboard BOOLEAN NOT NULL DEFAULT false,
created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
updated_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
last_login_at TIMESTAMP WITH TIME ZONE
);
-- Link API keys to users for audit trail
ALTER TABLE api_keys ADD COLUMN user_id UUID REFERENCES users(id) ON DELETE SET NULL;
-- Starred jobs for personalized dashboard
CREATE TABLE starred_jobs (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
user_id UUID NOT NULL REFERENCES users(id) ON DELETE CASCADE,
project_id UUID NOT NULL REFERENCES projects(id) ON DELETE CASCADE,
jobset_id UUID REFERENCES jobsets(id) ON DELETE CASCADE,
job_name VARCHAR(255) NOT NULL,
created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
UNIQUE(user_id, project_id, jobset_id, job_name)
);
-- User sessions for persistent authentication across restarts
CREATE TABLE user_sessions (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
user_id UUID NOT NULL REFERENCES users(id) ON DELETE CASCADE,
session_token_hash VARCHAR(255) NOT NULL, -- Hashed session token
expires_at TIMESTAMP WITH TIME ZONE NOT NULL,
created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
last_used_at TIMESTAMP WITH TIME ZONE
);
-- Project membership for per-project permissions
CREATE TABLE project_members (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
project_id UUID NOT NULL REFERENCES projects(id) ON DELETE CASCADE,
user_id UUID NOT NULL REFERENCES users(id) ON DELETE CASCADE,
role VARCHAR(50) NOT NULL DEFAULT 'member', -- 'member', 'maintainer', 'admin'
created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
UNIQUE(project_id, user_id)
);
-- Indexes for performance
CREATE INDEX idx_users_username ON users(username);
CREATE INDEX idx_users_email ON users(email);
CREATE INDEX idx_users_role ON users(role);
CREATE INDEX idx_users_enabled ON users(enabled);
CREATE INDEX idx_api_keys_user_id ON api_keys(user_id);
CREATE INDEX idx_starred_jobs_user_id ON starred_jobs(user_id);
CREATE INDEX idx_starred_jobs_project_id ON starred_jobs(project_id);
CREATE INDEX idx_user_sessions_token ON user_sessions(session_token_hash);
CREATE INDEX idx_user_sessions_user_id ON user_sessions(user_id);
CREATE INDEX idx_user_sessions_expires ON user_sessions(expires_at);
CREATE INDEX idx_project_members_project_id ON project_members(project_id);
CREATE INDEX idx_project_members_user_id ON project_members(user_id);
-- Trigger for updated_at on users
CREATE TRIGGER update_users_updated_at
BEFORE UPDATE ON users
FOR EACH ROW EXECUTE FUNCTION update_updated_at_column();

View file

@ -1,2 +0,0 @@
-- Add index on builds.job_name for ILIKE queries in list_filtered
CREATE INDEX IF NOT EXISTS idx_builds_job_name ON builds (job_name);

View file

@ -1,12 +0,0 @@
-- Add pull request tracking to evaluations
-- This enables PR-based CI workflows for GitHub/GitLab/Gitea
-- Add PR-specific columns to evaluations table
ALTER TABLE evaluations ADD COLUMN pr_number INTEGER;
ALTER TABLE evaluations ADD COLUMN pr_head_branch TEXT;
ALTER TABLE evaluations ADD COLUMN pr_base_branch TEXT;
ALTER TABLE evaluations ADD COLUMN pr_action TEXT;
-- Index for efficient PR queries
CREATE INDEX idx_evaluations_pr ON evaluations(jobset_id, pr_number)
WHERE pr_number IS NOT NULL;

View file

@ -1,39 +0,0 @@
-- Migration: Add jobset states for Hydra-compatible scheduling
-- Supports 4 states: disabled, enabled, one_shot, one_at_a_time
-- Add state column with CHECK constraint
ALTER TABLE jobsets ADD COLUMN state VARCHAR(50) NOT NULL DEFAULT 'enabled'
CHECK (state IN ('disabled', 'enabled', 'one_shot', 'one_at_a_time'));
-- Migrate existing data based on enabled column
UPDATE jobsets SET state = CASE WHEN enabled THEN 'enabled' ELSE 'disabled' END;
-- Add last_checked_at for per-jobset interval tracking
ALTER TABLE jobsets ADD COLUMN last_checked_at TIMESTAMP WITH TIME ZONE;
-- Drop and recreate active_jobsets view to include new columns
DROP VIEW IF EXISTS active_jobsets;
CREATE VIEW active_jobsets AS
SELECT
j.id,
j.project_id,
j.name,
j.nix_expression,
j.enabled,
j.flake_mode,
j.check_interval,
j.branch,
j.scheduling_shares,
j.created_at,
j.updated_at,
j.state,
j.last_checked_at,
p.name as project_name,
p.repository_url
FROM jobsets j
JOIN projects p ON j.project_id = p.id
WHERE j.state IN ('enabled', 'one_shot', 'one_at_a_time');
-- Indexes for efficient queries
CREATE INDEX idx_jobsets_state ON jobsets(state);
CREATE INDEX idx_jobsets_last_checked_at ON jobsets(last_checked_at);

View file

@ -1,45 +0,0 @@
-- Migration: Add build metrics collection
-- Stores timing, size, and performance metrics for builds
-- Create build_metrics table
CREATE TABLE build_metrics (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
build_id UUID NOT NULL REFERENCES builds(id) ON DELETE CASCADE,
metric_name VARCHAR(100) NOT NULL,
metric_value DOUBLE PRECISION NOT NULL,
unit VARCHAR(50) NOT NULL,
collected_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW()
);
-- Index for efficient lookups by build
CREATE INDEX idx_build_metrics_build_id ON build_metrics(build_id);
-- Index for time-based queries (alerting)
CREATE INDEX idx_build_metrics_collected_at ON build_metrics(collected_at);
-- Index for metric name filtering
CREATE INDEX idx_build_metrics_name ON build_metrics(metric_name);
-- Prevent duplicate metrics for same build+name
ALTER TABLE build_metrics ADD CONSTRAINT unique_build_metric_name UNIQUE (build_id, metric_name);
-- Create view for aggregate build statistics
CREATE VIEW build_metrics_summary AS
SELECT
b.id as build_id,
b.job_name,
b.status,
b.system,
e.jobset_id,
j.project_id,
b.started_at,
b.completed_at,
EXTRACT(EPOCH FROM (b.completed_at - b.started_at)) as duration_seconds,
MAX(CASE WHEN bm.metric_name = 'output_size_bytes' THEN bm.metric_value END) as output_size_bytes,
MAX(CASE WHEN bm.metric_name = 'peak_memory_bytes' THEN bm.metric_value END) as peak_memory_bytes,
MAX(CASE WHEN bm.metric_name = 'nar_size_bytes' THEN bm.metric_value END) as nar_size_bytes
FROM builds b
JOIN evaluations e ON b.evaluation_id = e.id
JOIN jobsets j ON e.jobset_id = j.id
LEFT JOIN build_metrics bm ON b.id = bm.build_id
GROUP BY b.id, b.job_name, b.status, b.system, e.jobset_id, j.project_id, b.started_at, b.completed_at;

View file

@ -1,26 +0,0 @@
-- Extended build status codes to match Hydra
-- Update the builds table CHECK constraint to include all new statuses
ALTER TABLE builds DROP CONSTRAINT builds_status_check;
ALTER TABLE builds ADD CONSTRAINT builds_status_check CHECK (
status IN (
'pending',
'running',
'succeeded',
'failed',
'dependency_failed',
'aborted',
'cancelled',
'failed_with_output',
'timeout',
'cached_failure',
'unsupported_system',
'log_limit_exceeded',
'nar_size_limit_exceeded',
'non_deterministic'
)
);
-- Add index on status for faster filtering
CREATE INDEX IF NOT EXISTS idx_builds_status ON builds(status);

View file

@ -1,17 +0,0 @@
-- Fix build_stats view and data after 'completed' -> 'succeeded' status rename
-- Migrate any existing builds still using the old status value
UPDATE builds SET status = 'succeeded' WHERE status = 'completed';
-- Recreate the build_stats view to reference the new status
DROP VIEW IF EXISTS build_stats;
CREATE VIEW build_stats AS
SELECT
COUNT(*) as total_builds,
COUNT(CASE WHEN status = 'succeeded' THEN 1 END) as completed_builds,
COUNT(CASE WHEN status = 'failed' THEN 1 END) as failed_builds,
COUNT(CASE WHEN status = 'running' THEN 1 END) as running_builds,
COUNT(CASE WHEN status = 'pending' THEN 1 END) as pending_builds,
AVG(EXTRACT(EPOCH FROM (completed_at - started_at))) as avg_duration_seconds
FROM builds
WHERE started_at IS NOT NULL;

View file

@ -1,61 +0,0 @@
-- PostgreSQL LISTEN/NOTIFY triggers for event-driven reactivity
-- Emits notifications on builds/jobsets mutations so daemons can wake immediately
-- Trigger function: notify on builds changes
CREATE OR REPLACE FUNCTION notify_builds_changed() RETURNS trigger AS $$
BEGIN
PERFORM pg_notify('fc_builds_changed', json_build_object(
'op', TG_OP,
'table', TG_TABLE_NAME
)::text);
RETURN NULL;
END;
$$ LANGUAGE plpgsql;
-- Trigger function: notify on jobsets changes
CREATE OR REPLACE FUNCTION notify_jobsets_changed() RETURNS trigger AS $$
BEGIN
PERFORM pg_notify('fc_jobsets_changed', json_build_object(
'op', TG_OP,
'table', TG_TABLE_NAME
)::text);
RETURN NULL;
END;
$$ LANGUAGE plpgsql;
-- Builds: new build inserted (queue-runner should wake)
CREATE TRIGGER trg_builds_insert_notify
AFTER INSERT ON builds
FOR EACH ROW
EXECUTE FUNCTION notify_builds_changed();
-- Builds: status changed (queue-runner should re-check, e.g. deps resolved)
CREATE TRIGGER trg_builds_status_notify
AFTER UPDATE ON builds
FOR EACH ROW
WHEN (OLD.status IS DISTINCT FROM NEW.status)
EXECUTE FUNCTION notify_builds_changed();
-- Jobsets: new jobset created (evaluator should wake)
CREATE TRIGGER trg_jobsets_insert_notify
AFTER INSERT ON jobsets
FOR EACH ROW
EXECUTE FUNCTION notify_jobsets_changed();
-- Jobsets: relevant fields changed (evaluator should re-check)
CREATE TRIGGER trg_jobsets_update_notify
AFTER UPDATE ON jobsets
FOR EACH ROW
WHEN (
OLD.enabled IS DISTINCT FROM NEW.enabled
OR OLD.state IS DISTINCT FROM NEW.state
OR OLD.nix_expression IS DISTINCT FROM NEW.nix_expression
OR OLD.check_interval IS DISTINCT FROM NEW.check_interval
)
EXECUTE FUNCTION notify_jobsets_changed();
-- Jobsets: deleted (evaluator should wake to stop tracking)
CREATE TRIGGER trg_jobsets_delete_notify
AFTER DELETE ON jobsets
FOR EACH ROW
EXECUTE FUNCTION notify_jobsets_changed();

View file

@ -1,9 +0,0 @@
-- Failed paths cache: prevents rebuilding known-failing derivations
CREATE TABLE failed_paths_cache (
drv_path TEXT PRIMARY KEY,
source_build_id UUID,
failure_status TEXT,
failed_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW()
);
CREATE INDEX idx_failed_paths_cache_failed_at ON failed_paths_cache(failed_at);

View file

@ -1,32 +0,0 @@
-- GC pinning (#11)
ALTER TABLE builds ADD COLUMN IF NOT EXISTS keep BOOLEAN NOT NULL DEFAULT false;
ALTER TABLE jobsets ADD COLUMN IF NOT EXISTS keep_nr INTEGER NOT NULL DEFAULT 3;
-- Recreate active_jobsets view to include keep_nr
DROP VIEW IF EXISTS active_jobsets;
CREATE VIEW active_jobsets AS
SELECT
j.id,
j.project_id,
j.name,
j.nix_expression,
j.enabled,
j.flake_mode,
j.check_interval,
j.branch,
j.scheduling_shares,
j.created_at,
j.updated_at,
j.state,
j.last_checked_at,
j.keep_nr,
p.name as project_name,
p.repository_url
FROM jobsets j
JOIN projects p ON j.project_id = p.id
WHERE j.state IN ('enabled', 'one_shot', 'one_at_a_time');
-- Machine health tracking (#5)
ALTER TABLE remote_builders ADD COLUMN IF NOT EXISTS consecutive_failures INTEGER NOT NULL DEFAULT 0;
ALTER TABLE remote_builders ADD COLUMN IF NOT EXISTS disabled_until TIMESTAMP WITH TIME ZONE;
ALTER TABLE remote_builders ADD COLUMN IF NOT EXISTS last_failure TIMESTAMP WITH TIME ZONE;

View file

@ -4,8 +4,9 @@ This directory contains SQL migrations for the FC database.
## Migration Files ## Migration Files
- `001_initial_schema.sql`: Creates the core database schema including projects, - `0001_schema.sql`: Full schema, all tables, indexes, triggers, and views.
jobsets, evaluations, builds, and related tables. - `0002_example.sql`: Example stub for the next migration when we make a stable
release.
## Running Migrations ## Running Migrations
@ -22,5 +23,3 @@ fc-migrate validate postgresql://user:password@localhost/fc_ci
# Create a new migration # Create a new migration
fc-migrate create migration_name fc-migrate create migration_name
``` ```
TODO: add or generate schema overviews

View file

@ -30,11 +30,13 @@ impl std::fmt::Debug for AlertManager {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("AlertManager") f.debug_struct("AlertManager")
.field("config", &self.config) .field("config", &self.config)
.finish() .finish_non_exhaustive()
} }
} }
impl AlertManager { impl AlertManager {
/// Create an alert manager from config.
#[must_use]
pub fn new(config: AlertConfig) -> Self { pub fn new(config: AlertConfig) -> Self {
Self { Self {
config, config,
@ -42,10 +44,14 @@ impl AlertManager {
} }
} }
pub fn is_enabled(&self) -> bool { /// Check if alerts are enabled in the config.
#[must_use]
pub const fn is_enabled(&self) -> bool {
self.config.enabled self.config.enabled
} }
/// Calculate failure rate and dispatch alerts if threshold exceeded.
/// Returns the computed failure rate if alerts are enabled.
pub async fn check_and_alert( pub async fn check_and_alert(
&self, &self,
pool: &PgPool, pool: &PgPool,
@ -56,16 +62,15 @@ impl AlertManager {
return None; return None;
} }
let failure_rate = match build_metrics::calculate_failure_rate( let Ok(failure_rate) = build_metrics::calculate_failure_rate(
pool, pool,
project_id, project_id,
jobset_id, jobset_id,
self.config.time_window_minutes, self.config.time_window_minutes,
) )
.await .await
{ else {
Ok(rate) => rate, return None;
Err(_) => return None,
}; };
if failure_rate > self.config.error_threshold { if failure_rate > self.config.error_threshold {
@ -74,6 +79,7 @@ impl AlertManager {
if time_since_last >= self.config.time_window_minutes { if time_since_last >= self.config.time_window_minutes {
state.last_alert_at = Utc::now(); state.last_alert_at = Utc::now();
drop(state);
info!( info!(
"Alert: failure rate {:.1}% exceeds threshold {:.1}%", "Alert: failure rate {:.1}% exceeds threshold {:.1}%",
failure_rate, self.config.error_threshold failure_rate, self.config.error_threshold

View file

@ -21,11 +21,10 @@ use crate::{
/// Supports ${VAR}, $VAR, and ~ for home directory. /// Supports ${VAR}, $VAR, and ~ for home directory.
fn expand_path(path: &str) -> String { fn expand_path(path: &str) -> String {
let expanded = if path.starts_with('~') { let expanded = if path.starts_with('~') {
if let Some(home) = std::env::var_os("HOME") { std::env::var_os("HOME").map_or_else(
path.replacen('~', &home.to_string_lossy(), 1) || path.to_string(),
} else { |home| path.replacen('~', &home.to_string_lossy(), 1),
path.to_string() )
}
} else { } else {
path.to_string() path.to_string()
}; };
@ -51,9 +50,9 @@ fn expand_path(path: &str) -> String {
/// Resolve secret for a webhook from inline value or file. /// Resolve secret for a webhook from inline value or file.
fn resolve_webhook_secret(webhook: &DeclarativeWebhook) -> Option<String> { fn resolve_webhook_secret(webhook: &DeclarativeWebhook) -> Option<String> {
if let Some(ref secret) = webhook.secret { webhook.secret.as_ref().map_or_else(
Some(secret.clone()) || {
} else if let Some(ref file) = webhook.secret_file { webhook.secret_file.as_ref().and_then(|file| {
let expanded = expand_path(file); let expanded = expand_path(file);
match std::fs::read_to_string(&expanded) { match std::fs::read_to_string(&expanded) {
Ok(s) => Some(s.trim().to_string()), Ok(s) => Some(s.trim().to_string()),
@ -66,9 +65,10 @@ fn resolve_webhook_secret(webhook: &DeclarativeWebhook) -> Option<String> {
None None
}, },
} }
} else { })
None },
} |secret| Some(secret.clone()),
)
} }
/// Bootstrap declarative configuration into the database. /// Bootstrap declarative configuration into the database.
@ -76,6 +76,10 @@ fn resolve_webhook_secret(webhook: &DeclarativeWebhook) -> Option<String> {
/// This function is idempotent: running it multiple times with the same config /// This function is idempotent: running it multiple times with the same config
/// produces the same database state. It upserts (insert or update) all /// produces the same database state. It upserts (insert or update) all
/// configured projects, jobsets, API keys, and users. /// configured projects, jobsets, API keys, and users.
///
/// # Errors
///
/// Returns error if database operations fail.
pub async fn run(pool: &PgPool, config: &DeclarativeConfig) -> Result<()> { pub async fn run(pool: &PgPool, config: &DeclarativeConfig) -> Result<()> {
if config.projects.is_empty() if config.projects.is_empty()
&& config.api_keys.is_empty() && config.api_keys.is_empty()
@ -120,10 +124,10 @@ pub async fn run(pool: &PgPool, config: &DeclarativeConfig) -> Result<()> {
let state = decl_jobset.state.as_ref().map(|s| { let state = decl_jobset.state.as_ref().map(|s| {
match s.as_str() { match s.as_str() {
"disabled" => JobsetState::Disabled, "disabled" => JobsetState::Disabled,
"enabled" => JobsetState::Enabled,
"one_shot" => JobsetState::OneShot, "one_shot" => JobsetState::OneShot,
"one_at_a_time" => JobsetState::OneAtATime, "one_at_a_time" => JobsetState::OneAtATime,
_ => JobsetState::Enabled, // Default to enabled for unknown values _ => JobsetState::Enabled, /* Default to enabled for "enabled" or
* unknown values */
} }
}); });
@ -239,9 +243,9 @@ pub async fn run(pool: &PgPool, config: &DeclarativeConfig) -> Result<()> {
// Upsert users // Upsert users
for decl_user in &config.users { for decl_user in &config.users {
// Resolve password from inline or file // Resolve password from inline or file
let password = if let Some(ref p) = decl_user.password { let password = decl_user.password.as_ref().map_or_else(
Some(p.clone()) || {
} else if let Some(ref file) = decl_user.password_file { decl_user.password_file.as_ref().and_then(|file| {
let expanded = expand_path(file); let expanded = expand_path(file);
match std::fs::read_to_string(&expanded) { match std::fs::read_to_string(&expanded) {
Ok(p) => Some(p.trim().to_string()), Ok(p) => Some(p.trim().to_string()),
@ -254,9 +258,10 @@ pub async fn run(pool: &PgPool, config: &DeclarativeConfig) -> Result<()> {
None None
}, },
} }
} else { })
None },
}; |p| Some(p.clone()),
);
// Check if user exists // Check if user exists
let existing = let existing =

View file

@ -50,6 +50,9 @@ pub struct ServerConfig {
/// Allowed URL schemes for repository URLs. Insecure schemes emit a warning /// Allowed URL schemes for repository URLs. Insecure schemes emit a warning
/// on startup /// on startup
pub allowed_url_schemes: Vec<String>, pub allowed_url_schemes: Vec<String>,
/// Force Secure flag on session cookies (enable when behind HTTPS reverse
/// proxy)
pub force_secure_cookies: bool,
} }
#[derive(Debug, Clone, Serialize, Deserialize)] #[derive(Debug, Clone, Serialize, Deserialize)]
@ -131,7 +134,7 @@ impl std::fmt::Debug for GitHubOAuthConfig {
#[serde(default)] #[serde(default)]
#[derive(Default)] #[derive(Default)]
pub struct NotificationsConfig { pub struct NotificationsConfig {
pub run_command: Option<String>, pub webhook_url: Option<String>,
pub github_token: Option<String>, pub github_token: Option<String>,
pub gitea_url: Option<String>, pub gitea_url: Option<String>,
pub gitea_token: Option<String>, pub gitea_token: Option<String>,
@ -139,6 +142,18 @@ pub struct NotificationsConfig {
pub gitlab_token: Option<String>, pub gitlab_token: Option<String>,
pub email: Option<EmailConfig>, pub email: Option<EmailConfig>,
pub alerts: Option<AlertConfig>, pub alerts: Option<AlertConfig>,
/// Enable notification retry queue (persistent, with exponential backoff)
#[serde(default = "default_true")]
pub enable_retry_queue: bool,
/// Maximum retry attempts per notification (default 5)
#[serde(default = "default_notification_max_attempts")]
pub max_retry_attempts: i32,
/// Retention period for old completed/failed tasks in days (default 7)
#[serde(default = "default_notification_retention_days")]
pub retention_days: i64,
/// Polling interval for retry worker in seconds (default 5)
#[serde(default = "default_notification_poll_interval")]
pub retry_poll_interval: u64,
} }
#[derive(Debug, Clone, Serialize, Deserialize)] #[derive(Debug, Clone, Serialize, Deserialize)]
@ -187,16 +202,18 @@ pub struct SigningConfig {
#[derive(Debug, Clone, Serialize, Deserialize)] #[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(default)] #[serde(default)]
#[derive(Default)]
pub struct CacheUploadConfig { pub struct CacheUploadConfig {
pub enabled: bool, pub enabled: bool,
pub store_uri: Option<String>, pub store_uri: Option<String>,
/// S3-specific configuration (used when store_uri starts with s3://) /// S3-specific configuration (used when `store_uri` starts with s3://)
pub s3: Option<S3CacheConfig>, pub s3: Option<S3CacheConfig>,
} }
/// S3-specific cache configuration. /// S3-specific cache configuration.
#[derive(Debug, Clone, Serialize, Deserialize)] #[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(default)] #[serde(default)]
#[derive(Default)]
pub struct S3CacheConfig { pub struct S3CacheConfig {
/// AWS region (e.g., "us-east-1") /// AWS region (e.g., "us-east-1")
pub region: Option<String>, pub region: Option<String>,
@ -208,36 +225,12 @@ pub struct S3CacheConfig {
pub secret_access_key: Option<String>, pub secret_access_key: Option<String>,
/// Session token for temporary credentials (optional) /// Session token for temporary credentials (optional)
pub session_token: Option<String>, pub session_token: Option<String>,
/// Endpoint URL for S3-compatible services (e.g., MinIO) /// Endpoint URL for S3-compatible services (e.g., `MinIO`)
pub endpoint_url: Option<String>, pub endpoint_url: Option<String>,
/// Whether to use path-style addressing (for MinIO compatibility) /// Whether to use path-style addressing (for `MinIO` compatibility)
pub use_path_style: bool, pub use_path_style: bool,
} }
impl Default for S3CacheConfig {
fn default() -> Self {
Self {
region: None,
prefix: None,
access_key_id: None,
secret_access_key: None,
session_token: None,
endpoint_url: None,
use_path_style: false,
}
}
}
impl Default for CacheUploadConfig {
fn default() -> Self {
Self {
enabled: false,
store_uri: None,
s3: None,
}
}
}
/// Declarative project/jobset/api-key/user definitions. /// Declarative project/jobset/api-key/user definitions.
/// These are upserted on server startup, enabling fully declarative operation. /// These are upserted on server startup, enabling fully declarative operation.
#[derive(Debug, Clone, Serialize, Deserialize, Default)] #[derive(Debug, Clone, Serialize, Deserialize, Default)]
@ -304,8 +297,8 @@ pub struct DeclarativeProject {
/// Declarative notification configuration. /// Declarative notification configuration.
#[derive(Debug, Clone, Serialize, Deserialize)] #[derive(Debug, Clone, Serialize, Deserialize)]
pub struct DeclarativeNotification { pub struct DeclarativeNotification {
/// Notification type: `github_status`, email, `gitlab_status`, /// Notification type: `github_status`, `email`, `gitlab_status`,
/// `gitea_status`, `run_command` /// `gitea_status`, `webhook`
pub notification_type: String, pub notification_type: String,
/// Type-specific configuration (JSON object) /// Type-specific configuration (JSON object)
pub config: serde_json::Value, pub config: serde_json::Value,
@ -431,6 +424,18 @@ fn default_role() -> String {
"read-only".to_string() "read-only".to_string()
} }
const fn default_notification_max_attempts() -> i32 {
5
}
const fn default_notification_retention_days() -> i64 {
7
}
const fn default_notification_poll_interval() -> u64 {
5
}
#[derive(Debug, Clone, Serialize, Deserialize)] #[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(default)] #[serde(default)]
pub struct TracingConfig { pub struct TracingConfig {
@ -466,6 +471,11 @@ impl Default for DatabaseConfig {
} }
impl DatabaseConfig { impl DatabaseConfig {
/// Validate database configuration.
///
/// # Errors
///
/// Returns error if configuration is invalid.
pub fn validate(&self) -> anyhow::Result<()> { pub fn validate(&self) -> anyhow::Result<()> {
if self.url.is_empty() { if self.url.is_empty() {
return Err(anyhow::anyhow!("Database URL cannot be empty")); return Err(anyhow::anyhow!("Database URL cannot be empty"));
@ -513,6 +523,7 @@ impl Default for ServerConfig {
"git".into(), "git".into(),
"ssh".into(), "ssh".into(),
], ],
force_secure_cookies: false,
} }
} }
} }
@ -578,6 +589,11 @@ impl Default for CacheConfig {
} }
impl Config { impl Config {
/// Load configuration from file and environment variables.
///
/// # Errors
///
/// Returns error if configuration loading or validation fails.
pub fn load() -> anyhow::Result<Self> { pub fn load() -> anyhow::Result<Self> {
let mut settings = config_crate::Config::builder(); let mut settings = config_crate::Config::builder();
@ -611,6 +627,11 @@ impl Config {
Ok(config) Ok(config)
} }
/// Validate all configuration sections.
///
/// # Errors
///
/// Returns error if any configuration section is invalid.
pub fn validate(&self) -> anyhow::Result<()> { pub fn validate(&self) -> anyhow::Result<()> {
// Validate database URL // Validate database URL
if self.database.url.is_empty() { if self.database.url.is_empty() {

View file

@ -12,6 +12,11 @@ pub struct Database {
} }
impl Database { impl Database {
/// Create a new database connection pool from config.
///
/// # Errors
///
/// Returns error if connection fails or health check fails.
pub async fn new(config: DatabaseConfig) -> anyhow::Result<Self> { pub async fn new(config: DatabaseConfig) -> anyhow::Result<Self> {
info!("Initializing database connection pool"); info!("Initializing database connection pool");
@ -32,11 +37,17 @@ impl Database {
Ok(Self { pool }) Ok(Self { pool })
} }
/// Get a reference to the underlying connection pool.
#[must_use] #[must_use]
pub const fn pool(&self) -> &PgPool { pub const fn pool(&self) -> &PgPool {
&self.pool &self.pool
} }
/// Run a simple query to verify the database is reachable.
///
/// # Errors
///
/// Returns error if query fails or returns unexpected result.
pub async fn health_check(pool: &PgPool) -> anyhow::Result<()> { pub async fn health_check(pool: &PgPool) -> anyhow::Result<()> {
debug!("Performing database health check"); debug!("Performing database health check");
@ -52,11 +63,17 @@ impl Database {
Ok(()) Ok(())
} }
/// Close the connection pool gracefully.
pub async fn close(&self) { pub async fn close(&self) {
info!("Closing database connection pool"); info!("Closing database connection pool");
self.pool.close().await; self.pool.close().await;
} }
/// Query database metadata (version, user, address).
///
/// # Errors
///
/// Returns error if query fails.
pub async fn get_connection_info(&self) -> anyhow::Result<ConnectionInfo> { pub async fn get_connection_info(&self) -> anyhow::Result<ConnectionInfo> {
let row = sqlx::query( let row = sqlx::query(
r" r"
@ -80,7 +97,9 @@ impl Database {
}) })
} }
pub async fn get_pool_stats(&self) -> PoolStats { /// Get current connection pool statistics (size, idle, active).
#[must_use]
pub fn get_pool_stats(&self) -> PoolStats {
let pool = &self.pool; let pool = &self.pool;
PoolStats { PoolStats {

View file

@ -51,6 +51,7 @@ pub enum CiError {
} }
impl CiError { impl CiError {
/// Check if this error indicates a disk-full condition.
#[must_use] #[must_use]
pub fn is_disk_full(&self) -> bool { pub fn is_disk_full(&self) -> bool {
let msg = self.to_string().to_lowercase(); let msg = self.to_string().to_lowercase();
@ -65,6 +66,10 @@ impl CiError {
pub type Result<T> = std::result::Result<T, CiError>; pub type Result<T> = std::result::Result<T, CiError>;
/// Check disk space on the given path /// Check disk space on the given path
///
/// # Errors
///
/// Returns error if statfs call fails or path is invalid.
pub fn check_disk_space(path: &std::path::Path) -> Result<DiskSpaceInfo> { pub fn check_disk_space(path: &std::path::Path) -> Result<DiskSpaceInfo> {
fn to_gb(bytes: u64) -> f64 { fn to_gb(bytes: u64) -> f64 {
bytes as f64 / 1024.0 / 1024.0 / 1024.0 bytes as f64 / 1024.0 / 1024.0 / 1024.0
@ -83,9 +88,9 @@ pub fn check_disk_space(path: &std::path::Path) -> Result<DiskSpaceInfo> {
return Err(CiError::Io(std::io::Error::last_os_error())); return Err(CiError::Io(std::io::Error::last_os_error()));
} }
let bavail = statfs.f_bavail * (statfs.f_bsize as u64); let bavail = statfs.f_bavail * statfs.f_bsize.cast_unsigned();
let bfree = statfs.f_bfree * (statfs.f_bsize as u64); let bfree = statfs.f_bfree * statfs.f_bsize.cast_unsigned();
let btotal = statfs.f_blocks * (statfs.f_bsize as u64); let btotal = statfs.f_blocks * statfs.f_bsize.cast_unsigned();
Ok(DiskSpaceInfo { Ok(DiskSpaceInfo {
total_gb: to_gb(btotal), total_gb: to_gb(btotal),

View file

@ -13,6 +13,10 @@ use uuid::Uuid;
/// Remove GC root symlinks with mtime older than `max_age`. Returns count /// Remove GC root symlinks with mtime older than `max_age`. Returns count
/// removed. Symlinks whose filename matches a UUID in `pinned_build_ids` are /// removed. Symlinks whose filename matches a UUID in `pinned_build_ids` are
/// skipped regardless of age. /// skipped regardless of age.
///
/// # Errors
///
/// Returns error if directory read fails.
pub fn cleanup_old_roots( pub fn cleanup_old_roots(
roots_dir: &Path, roots_dir: &Path,
max_age: Duration, max_age: Duration,
@ -29,23 +33,20 @@ pub fn cleanup_old_roots(
let entry = entry?; let entry = entry?;
// Check if this root is pinned (filename is a build UUID with keep=true) // Check if this root is pinned (filename is a build UUID with keep=true)
if let Some(name) = entry.file_name().to_str() { if let Some(name) = entry.file_name().to_str()
if let Ok(build_id) = name.parse::<Uuid>() { && let Ok(build_id) = name.parse::<Uuid>()
if pinned_build_ids.contains(&build_id) { && pinned_build_ids.contains(&build_id)
{
debug!(build_id = %build_id, "Skipping pinned GC root"); debug!(build_id = %build_id, "Skipping pinned GC root");
continue; continue;
} }
}
}
let metadata = match entry.metadata() { let Ok(metadata) = entry.metadata() else {
Ok(m) => m, continue;
Err(_) => continue,
}; };
let modified = match metadata.modified() { let Ok(modified) = metadata.modified() else {
Ok(t) => t, continue;
Err(_) => continue,
}; };
if let Ok(age) = now.duration_since(modified) if let Ok(age) = now.duration_since(modified)
@ -71,6 +72,11 @@ pub struct GcRoots {
} }
impl GcRoots { impl GcRoots {
/// Create a GC roots manager. Creates the directory if enabled.
///
/// # Errors
///
/// Returns error if directory creation or permission setting fails.
pub fn new(roots_dir: PathBuf, enabled: bool) -> std::io::Result<Self> { pub fn new(roots_dir: PathBuf, enabled: bool) -> std::io::Result<Self> {
if enabled { if enabled {
std::fs::create_dir_all(&roots_dir)?; std::fs::create_dir_all(&roots_dir)?;
@ -87,6 +93,10 @@ impl GcRoots {
} }
/// Register a GC root for a build output. Returns the symlink path. /// Register a GC root for a build output. Returns the symlink path.
///
/// # Errors
///
/// Returns error if path is invalid or symlink creation fails.
pub fn register( pub fn register(
&self, &self,
build_id: &uuid::Uuid, build_id: &uuid::Uuid,

View file

@ -9,6 +9,11 @@ pub struct LogStorage {
} }
impl LogStorage { impl LogStorage {
/// Create a log storage instance. Creates the directory if needed.
///
/// # Errors
///
/// Returns error if directory creation fails.
pub fn new(log_dir: PathBuf) -> std::io::Result<Self> { pub fn new(log_dir: PathBuf) -> std::io::Result<Self> {
std::fs::create_dir_all(&log_dir)?; std::fs::create_dir_all(&log_dir)?;
Ok(Self { log_dir }) Ok(Self { log_dir })
@ -27,6 +32,10 @@ impl LogStorage {
} }
/// Write build log content to file /// Write build log content to file
///
/// # Errors
///
/// Returns error if file write fails.
pub fn write_log( pub fn write_log(
&self, &self,
build_id: &Uuid, build_id: &Uuid,
@ -50,6 +59,10 @@ impl LogStorage {
} }
/// Read a build log from disk. Returns None if the file doesn't exist. /// Read a build log from disk. Returns None if the file doesn't exist.
///
/// # Errors
///
/// Returns error if file read fails.
pub fn read_log(&self, build_id: &Uuid) -> std::io::Result<Option<String>> { pub fn read_log(&self, build_id: &Uuid) -> std::io::Result<Option<String>> {
let path = self.log_path(build_id); let path = self.log_path(build_id);
if !path.exists() { if !path.exists() {
@ -60,6 +73,10 @@ impl LogStorage {
} }
/// Delete a build log /// Delete a build log
///
/// # Errors
///
/// Returns error if file deletion fails.
pub fn delete_log(&self, build_id: &Uuid) -> std::io::Result<()> { pub fn delete_log(&self, build_id: &Uuid) -> std::io::Result<()> {
let path = self.log_path(build_id); let path = self.log_path(build_id);
if path.exists() { if path.exists() {

View file

@ -4,6 +4,10 @@ use sqlx::{PgPool, Postgres, migrate::MigrateDatabase};
use tracing::{error, info, warn}; use tracing::{error, info, warn};
/// Runs database migrations and ensures the database exists /// Runs database migrations and ensures the database exists
///
/// # Errors
///
/// Returns error if database operations or migrations fail.
pub async fn run_migrations(database_url: &str) -> anyhow::Result<()> { pub async fn run_migrations(database_url: &str) -> anyhow::Result<()> {
info!("Starting database migrations"); info!("Starting database migrations");
@ -39,6 +43,10 @@ async fn create_connection_pool(database_url: &str) -> anyhow::Result<PgPool> {
} }
/// Validates that all required tables exist and have the expected structure /// Validates that all required tables exist and have the expected structure
///
/// # Errors
///
/// Returns error if schema validation fails or required tables are missing.
pub async fn validate_schema(pool: &PgPool) -> anyhow::Result<()> { pub async fn validate_schema(pool: &PgPool) -> anyhow::Result<()> {
info!("Validating database schema"); info!("Validating database schema");

View file

@ -32,6 +32,11 @@ pub enum Commands {
}, },
} }
/// Execute the CLI command.
///
/// # Errors
///
/// Returns error if command execution fails.
pub async fn run() -> anyhow::Result<()> { pub async fn run() -> anyhow::Result<()> {
let cli = Cli::parse(); let cli = Cli::parse();

View file

@ -147,20 +147,23 @@ pub enum BuildStatus {
impl BuildStatus { impl BuildStatus {
/// Returns true if the build has completed (not pending or running). /// Returns true if the build has completed (not pending or running).
pub fn is_finished(&self) -> bool { #[must_use]
pub const fn is_finished(&self) -> bool {
!matches!(self, Self::Pending | Self::Running) !matches!(self, Self::Pending | Self::Running)
} }
/// Returns true if the build succeeded. /// Returns true if the build succeeded.
/// Note: Does NOT include CachedFailure - a cached failure is still a /// Note: Does NOT include `CachedFailure` - a cached failure is still a
/// failure. /// failure.
pub fn is_success(&self) -> bool { #[must_use]
pub const fn is_success(&self) -> bool {
matches!(self, Self::Succeeded) matches!(self, Self::Succeeded)
} }
/// Returns true if the build completed without needing a retry. /// Returns true if the build completed without needing a retry.
/// This includes both successful builds and cached failures. /// This includes both successful builds and cached failures.
pub fn is_terminal(&self) -> bool { #[must_use]
pub const fn is_terminal(&self) -> bool {
matches!( matches!(
self, self,
Self::Succeeded Self::Succeeded
@ -180,7 +183,8 @@ impl BuildStatus {
/// Returns the database integer representation of this status. /// Returns the database integer representation of this status.
/// Note: This uses an internal numbering scheme (0-13), not Hydra exit codes. /// Note: This uses an internal numbering scheme (0-13), not Hydra exit codes.
pub fn as_i32(&self) -> i32 { #[must_use]
pub const fn as_i32(&self) -> i32 {
match self { match self {
Self::Pending => 0, Self::Pending => 0,
Self::Running => 1, Self::Running => 1,
@ -199,9 +203,10 @@ impl BuildStatus {
} }
} }
/// Converts a database integer to BuildStatus. /// Converts a database integer to `BuildStatus`.
/// This is the inverse of as_i32() for reading from the database. /// This is the inverse of `as_i32()` for reading from the database.
pub fn from_i32(code: i32) -> Option<Self> { #[must_use]
pub const fn from_i32(code: i32) -> Option<Self> {
match code { match code {
0 => Some(Self::Pending), 0 => Some(Self::Pending),
1 => Some(Self::Running), 1 => Some(Self::Running),
@ -221,17 +226,17 @@ impl BuildStatus {
} }
} }
/// Converts a Hydra-compatible exit code to a BuildStatus. /// Converts a Hydra-compatible exit code to a `BuildStatus`.
/// Note: These codes follow Hydra's conventions and differ from /// Note: These codes follow Hydra's conventions and differ from
/// as_i32/from_i32. /// `as_i32/from_i32`.
pub fn from_exit_code(exit_code: i32) -> Self { #[must_use]
pub const fn from_exit_code(exit_code: i32) -> Self {
match exit_code { match exit_code {
0 => Self::Succeeded, 0 => Self::Succeeded,
1 => Self::Failed, 1 => Self::Failed,
2 => Self::DependencyFailed, 2 => Self::DependencyFailed,
3 => Self::Aborted, 3 | 5 => Self::Aborted, // 5 is obsolete in Hydra, treat as aborted
4 => Self::Cancelled, 4 => Self::Cancelled,
5 => Self::Aborted, // Obsolete in Hydra, treat as aborted
6 => Self::FailedWithOutput, 6 => Self::FailedWithOutput,
7 => Self::Timeout, 7 => Self::Timeout,
8 => Self::CachedFailure, 8 => Self::CachedFailure,
@ -262,7 +267,7 @@ impl std::fmt::Display for BuildStatus {
Self::NarSizeLimitExceeded => "nar size limit exceeded", Self::NarSizeLimitExceeded => "nar size limit exceeded",
Self::NonDeterministic => "non-deterministic", Self::NonDeterministic => "non-deterministic",
}; };
write!(f, "{}", s) write!(f, "{s}")
} }
} }
@ -320,7 +325,7 @@ pub mod metric_units {
pub const BYTES: &str = "bytes"; pub const BYTES: &str = "bytes";
} }
/// Active jobset view — enabled jobsets joined with project info. /// Active jobsets joined with project info.
#[derive(Debug, Clone, Serialize, Deserialize, FromRow)] #[derive(Debug, Clone, Serialize, Deserialize, FromRow)]
pub struct ActiveJobset { pub struct ActiveJobset {
pub id: Uuid, pub id: Uuid,
@ -398,7 +403,7 @@ pub struct JobsetInput {
pub created_at: DateTime<Utc>, pub created_at: DateTime<Utc>,
} }
/// Release channel — tracks the latest "good" evaluation for a jobset. /// Tracks the latest "good" evaluation for a jobset.
#[derive(Debug, Clone, Serialize, Deserialize, FromRow)] #[derive(Debug, Clone, Serialize, Deserialize, FromRow)]
pub struct Channel { pub struct Channel {
pub id: Uuid, pub id: Uuid,
@ -430,6 +435,21 @@ pub struct RemoteBuilder {
pub last_failure: Option<DateTime<Utc>>, pub last_failure: Option<DateTime<Utc>>,
} }
/// Parameters for creating or updating a remote builder.
#[derive(Debug, Clone)]
pub struct RemoteBuilderParams<'a> {
pub name: &'a str,
pub ssh_uri: &'a str,
pub systems: &'a [String],
pub max_jobs: i32,
pub speed_factor: i32,
pub supported_features: &'a [String],
pub mandatory_features: &'a [String],
pub enabled: bool,
pub public_host_key: Option<&'a str>,
pub ssh_key_file: Option<&'a str>,
}
/// User account for authentication and personalization /// User account for authentication and personalization
#[derive(Debug, Clone, Serialize, Deserialize, FromRow)] #[derive(Debug, Clone, Serialize, Deserialize, FromRow)]
pub struct User { pub struct User {
@ -488,6 +508,33 @@ pub struct UserSession {
pub last_used_at: Option<DateTime<Utc>>, pub last_used_at: Option<DateTime<Utc>>,
} }
/// Notification task for reliable delivery with retry
#[derive(Debug, Clone, Serialize, Deserialize, FromRow)]
pub struct NotificationTask {
pub id: Uuid,
pub notification_type: String,
pub payload: serde_json::Value,
pub status: NotificationTaskStatus,
pub attempts: i32,
pub max_attempts: i32,
pub next_retry_at: DateTime<Utc>,
pub last_error: Option<String>,
pub created_at: DateTime<Utc>,
pub completed_at: Option<DateTime<Utc>>,
}
#[derive(
Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize, sqlx::Type,
)]
#[serde(rename_all = "lowercase")]
#[sqlx(type_name = "varchar", rename_all = "lowercase")]
pub enum NotificationTaskStatus {
Pending,
Running,
Completed,
Failed,
}
// Pagination // Pagination
#[derive(Debug, Clone, Serialize, Deserialize)] #[derive(Debug, Clone, Serialize, Deserialize)]

View file

@ -84,6 +84,10 @@ fn to_flake_ref(url: &str) -> String {
} }
/// Probe a flake repository to discover its outputs and suggest jobsets. /// Probe a flake repository to discover its outputs and suggest jobsets.
///
/// # Errors
///
/// Returns error if nix flake show command fails or times out.
pub async fn probe_flake( pub async fn probe_flake(
repo_url: &str, repo_url: &str,
revision: Option<&str>, revision: Option<&str>,
@ -157,13 +161,10 @@ pub async fn probe_flake(
CiError::NixEval(format!("Failed to parse flake show output: {e}")) CiError::NixEval(format!("Failed to parse flake show output: {e}"))
})?; })?;
let top = match raw.as_object() { let Some(top) = raw.as_object() else {
Some(obj) => obj,
None => {
return Err(CiError::NixEval( return Err(CiError::NixEval(
"Unexpected flake show output format".to_string(), "Unexpected flake show output format".to_string(),
)); ));
},
}; };
let mut outputs = Vec::new(); let mut outputs = Vec::new();
@ -220,7 +221,7 @@ pub async fn probe_flake(
} }
// Sort jobsets by priority (highest first) // Sort jobsets by priority (highest first)
suggested_jobsets.sort_by(|a, b| b.priority.cmp(&a.priority)); suggested_jobsets.sort_by_key(|j| std::cmp::Reverse(j.priority));
// Extract metadata from the flake // Extract metadata from the flake
let metadata = FlakeMetadata { let metadata = FlakeMetadata {
@ -441,7 +442,7 @@ mod tests {
}, },
]; ];
jobsets.sort_by(|a, b| b.priority.cmp(&a.priority)); jobsets.sort_by_key(|j| std::cmp::Reverse(j.priority));
assert_eq!(jobsets[0].name, "hydraJobs"); assert_eq!(jobsets[0].name, "hydraJobs");
assert_eq!(jobsets[1].name, "checks"); assert_eq!(jobsets[1].name, "checks");
assert_eq!(jobsets[2].name, "packages"); assert_eq!(jobsets[2].name, "packages");

View file

@ -2,11 +2,13 @@
use std::sync::OnceLock; use std::sync::OnceLock;
use sqlx::PgPool;
use tracing::{error, info, warn}; use tracing::{error, info, warn};
use crate::{ use crate::{
config::{EmailConfig, NotificationsConfig}, config::{EmailConfig, NotificationsConfig},
models::{Build, BuildStatus, Project}, models::{Build, BuildStatus, Project},
repo,
}; };
/// Shared HTTP client for all notification dispatches. /// Shared HTTP client for all notification dispatches.
@ -17,15 +19,170 @@ fn http_client() -> &'static reqwest::Client {
} }
/// Dispatch all configured notifications for a completed build. /// Dispatch all configured notifications for a completed build.
/// If retry queue is enabled, enqueues tasks; otherwise sends immediately.
pub async fn dispatch_build_finished( pub async fn dispatch_build_finished(
pool: Option<&PgPool>,
build: &Build, build: &Build,
project: &Project, project: &Project,
commit_hash: &str, commit_hash: &str,
config: &NotificationsConfig, config: &NotificationsConfig,
) { ) {
// 1. Run command notification // If retry queue is enabled and pool is available, enqueue tasks
if let Some(ref cmd) = config.run_command { if config.enable_retry_queue
run_command_notification(cmd, build, project).await; && let Some(pool) = pool
{
enqueue_notifications(pool, build, project, commit_hash, config).await;
return;
}
// Otherwise, send immediately (legacy fire-and-forget behavior)
send_notifications_immediate(build, project, commit_hash, config).await;
}
/// Enqueue notification tasks for reliable delivery with retry
async fn enqueue_notifications(
pool: &PgPool,
build: &Build,
project: &Project,
commit_hash: &str,
config: &NotificationsConfig,
) {
let max_attempts = config.max_retry_attempts;
// 1. Generic webhook notification
if let Some(ref url) = config.webhook_url {
let payload = serde_json::json!({
"type": "webhook",
"url": url,
"build_id": build.id,
"build_status": build.status,
"build_job": build.job_name,
"build_drv": build.drv_path,
"build_output": build.build_output_path,
"project_name": project.name,
"project_url": project.repository_url,
"commit_hash": commit_hash,
});
if let Err(e) =
repo::notification_tasks::create(pool, "webhook", payload, max_attempts)
.await
{
error!(build_id = %build.id, "Failed to enqueue webhook notification: {e}");
}
}
// 2. GitHub commit status
if let Some(ref token) = config.github_token
&& project.repository_url.contains("github.com")
{
let payload = serde_json::json!({
"type": "github_status",
"token": token,
"repository_url": project.repository_url,
"commit_hash": commit_hash,
"build_id": build.id,
"build_status": build.status,
"build_job": build.job_name,
});
if let Err(e) = repo::notification_tasks::create(
pool,
"github_status",
payload,
max_attempts,
)
.await
{
error!(build_id = %build.id, "Failed to enqueue GitHub status notification: {e}");
}
}
// 3. Gitea/Forgejo commit status
if let (Some(url), Some(token)) = (&config.gitea_url, &config.gitea_token) {
let payload = serde_json::json!({
"type": "gitea_status",
"base_url": url,
"token": token,
"repository_url": project.repository_url,
"commit_hash": commit_hash,
"build_id": build.id,
"build_status": build.status,
"build_job": build.job_name,
});
if let Err(e) = repo::notification_tasks::create(
pool,
"gitea_status",
payload,
max_attempts,
)
.await
{
error!(build_id = %build.id, "Failed to enqueue Gitea status notification: {e}");
}
}
// 4. GitLab commit status
if let (Some(url), Some(token)) = (&config.gitlab_url, &config.gitlab_token) {
let payload = serde_json::json!({
"type": "gitlab_status",
"base_url": url,
"token": token,
"repository_url": project.repository_url,
"commit_hash": commit_hash,
"build_id": build.id,
"build_status": build.status,
"build_job": build.job_name,
});
if let Err(e) = repo::notification_tasks::create(
pool,
"gitlab_status",
payload,
max_attempts,
)
.await
{
error!(build_id = %build.id, "Failed to enqueue GitLab status notification: {e}");
}
}
// 5. Email notification
let is_failure = !build.status.is_success();
if let Some(ref email_config) = config.email
&& (!email_config.on_failure_only || is_failure)
{
let payload = serde_json::json!({
"type": "email",
"config": email_config,
"build_id": build.id,
"build_status": build.status,
"build_job": build.job_name,
"build_drv": build.drv_path,
"build_output": build.build_output_path,
"project_name": project.name,
});
if let Err(e) =
repo::notification_tasks::create(pool, "email", payload, max_attempts)
.await
{
error!(build_id = %build.id, "Failed to enqueue email notification: {e}");
}
}
}
/// Send notifications immediately (legacy fire-and-forget behavior)
async fn send_notifications_immediate(
build: &Build,
project: &Project,
commit_hash: &str,
config: &NotificationsConfig,
) {
// 1. Generic webhook notification
if let Some(ref url) = config.webhook_url {
webhook_notification(url, build, project, commit_hash).await;
} }
// 2. GitHub commit status // 2. GitHub commit status
@ -56,7 +213,12 @@ pub async fn dispatch_build_finished(
} }
} }
async fn run_command_notification(cmd: &str, build: &Build, project: &Project) { async fn webhook_notification(
url: &str,
build: &Build,
project: &Project,
commit_hash: &str,
) {
let status_str = match build.status { let status_str = match build.status {
BuildStatus::Succeeded | BuildStatus::CachedFailure => "success", BuildStatus::Succeeded | BuildStatus::CachedFailure => "success",
BuildStatus::Failed BuildStatus::Failed
@ -72,32 +234,29 @@ async fn run_command_notification(cmd: &str, build: &Build, project: &Project) {
BuildStatus::Pending | BuildStatus::Running => "pending", BuildStatus::Pending | BuildStatus::Running => "pending",
}; };
let result = tokio::process::Command::new("sh") let payload = serde_json::json!({
.arg("-c") "build_id": build.id,
.arg(cmd) "build_status": status_str,
.env("FC_BUILD_ID", build.id.to_string()) "build_job": build.job_name,
.env("FC_BUILD_STATUS", status_str) "build_drv": build.drv_path,
.env("FC_BUILD_JOB", &build.job_name) "build_output": build.build_output_path.as_deref().unwrap_or(""),
.env("FC_BUILD_DRV", &build.drv_path) "project_name": project.name,
.env("FC_PROJECT_NAME", &project.name) "project_url": project.repository_url,
.env("FC_PROJECT_URL", &project.repository_url) "commit_hash": commit_hash,
.env( });
"FC_BUILD_OUTPUT",
build.build_output_path.as_deref().unwrap_or(""),
)
.output()
.await;
match result { match http_client().post(url).json(&payload).send().await {
Ok(output) => { Ok(resp) if resp.status().is_success() => {
if output.status.success() { info!(build_id = %build.id, "Webhook notification sent");
info!(build_id = %build.id, "RunCommand completed successfully");
} else {
let stderr = String::from_utf8_lossy(&output.stderr);
warn!(build_id = %build.id, "RunCommand failed: {stderr}");
}
}, },
Err(e) => error!(build_id = %build.id, "RunCommand execution failed: {e}"), Ok(resp) => {
warn!(
build_id = %build.id,
status = %resp.status(),
"Webhook notification rejected"
);
},
Err(e) => error!(build_id = %build.id, "Webhook notification failed: {e}"),
} }
} }
@ -108,9 +267,7 @@ async fn set_github_status(
build: &Build, build: &Build,
) { ) {
// Parse owner/repo from URL // Parse owner/repo from URL
let (owner, repo) = if let Some(v) = parse_github_repo(repo_url) { let Some((owner, repo)) = parse_github_repo(repo_url) else {
v
} else {
warn!("Cannot parse GitHub owner/repo from {repo_url}"); warn!("Cannot parse GitHub owner/repo from {repo_url}");
return; return;
}; };
@ -171,9 +328,7 @@ async fn set_gitea_status(
build: &Build, build: &Build,
) { ) {
// Parse owner/repo from URL (try to extract from the gitea URL) // Parse owner/repo from URL (try to extract from the gitea URL)
let (owner, repo) = if let Some(v) = parse_gitea_repo(repo_url, base_url) { let Some((owner, repo)) = parse_gitea_repo(repo_url, base_url) else {
v
} else {
warn!("Cannot parse Gitea owner/repo from {repo_url}"); warn!("Cannot parse Gitea owner/repo from {repo_url}");
return; return;
}; };
@ -231,9 +386,7 @@ async fn set_gitlab_status(
build: &Build, build: &Build,
) { ) {
// Parse project path from URL // Parse project path from URL
let project_path = if let Some(p) = parse_gitlab_project(repo_url, base_url) { let Some(project_path) = parse_gitlab_project(repo_url, base_url) else {
p
} else {
warn!("Cannot parse GitLab project from {repo_url}"); warn!("Cannot parse GitLab project from {repo_url}");
return; return;
}; };
@ -446,6 +599,320 @@ async fn send_email_notification(
} }
} }
/// Process a notification task from the retry queue
///
/// # Errors
///
/// Returns error if notification delivery fails.
pub async fn process_notification_task(
task: &crate::models::NotificationTask,
) -> Result<(), String> {
let task_type = task.notification_type.as_str();
let payload = &task.payload;
match task_type {
"webhook" => {
let url = payload["url"]
.as_str()
.ok_or("Missing url in webhook payload")?;
let status_str = match payload["build_status"].as_str() {
Some("succeeded" | "cached_failure") => "success",
Some("failed") => "failure",
Some("cancelled") => "cancelled",
Some("aborted") => "aborted",
Some("unsupported_system") => "skipped",
_ => "pending",
};
let body = serde_json::json!({
"build_id": payload["build_id"],
"build_status": status_str,
"build_job": payload["build_job"],
"build_drv": payload["build_drv"],
"build_output": payload["build_output"],
"project_name": payload["project_name"],
"project_url": payload["project_url"],
"commit_hash": payload["commit_hash"],
});
let resp = http_client()
.post(url)
.json(&body)
.send()
.await
.map_err(|e| format!("HTTP request failed: {e}"))?;
if !resp.status().is_success() {
return Err(format!("Webhook returned status: {}", resp.status()));
}
Ok(())
},
"github_status" => {
let token = payload["token"]
.as_str()
.ok_or("Missing token in github_status payload")?;
let repo_url = payload["repository_url"]
.as_str()
.ok_or("Missing repository_url")?;
let commit = payload["commit_hash"]
.as_str()
.ok_or("Missing commit_hash")?;
let job_name =
payload["build_job"].as_str().ok_or("Missing build_job")?;
let (owner, repo) = parse_github_repo(repo_url)
.ok_or_else(|| format!("Cannot parse GitHub repo from {repo_url}"))?;
let (state, description) = match payload["build_status"].as_str() {
Some("succeeded" | "cached_failure") => ("success", "Build succeeded"),
Some("failed") => ("failure", "Build failed"),
Some("running") => ("pending", "Build in progress"),
Some("cancelled") => ("error", "Build cancelled"),
_ => ("pending", "Build queued"),
};
let url = format!(
"https://api.github.com/repos/{owner}/{repo}/statuses/{commit}"
);
let body = serde_json::json!({
"state": state,
"description": description,
"context": format!("fc/{job_name}"),
});
let resp = http_client()
.post(&url)
.header("Authorization", format!("token {token}"))
.header("User-Agent", "fc-ci")
.header("Accept", "application/vnd.github+json")
.json(&body)
.send()
.await
.map_err(|e| format!("GitHub API request failed: {e}"))?;
if !resp.status().is_success() {
let status = resp.status();
let text = resp.text().await.unwrap_or_default();
return Err(format!("GitHub API returned {status}: {text}"));
}
Ok(())
},
"gitea_status" => {
let base_url = payload["base_url"]
.as_str()
.ok_or("Missing base_url in gitea_status payload")?;
let token = payload["token"].as_str().ok_or("Missing token")?;
let repo_url = payload["repository_url"]
.as_str()
.ok_or("Missing repository_url")?;
let commit = payload["commit_hash"]
.as_str()
.ok_or("Missing commit_hash")?;
let job_name =
payload["build_job"].as_str().ok_or("Missing build_job")?;
let (owner, repo) = parse_gitea_repo(repo_url, base_url)
.ok_or_else(|| format!("Cannot parse Gitea repo from {repo_url}"))?;
let (state, description) = match payload["build_status"].as_str() {
Some("succeeded" | "cached_failure") => ("success", "Build succeeded"),
Some("failed") => ("failure", "Build failed"),
Some("running") => ("pending", "Build in progress"),
Some("cancelled") => ("error", "Build cancelled"),
_ => ("pending", "Build queued"),
};
let url =
format!("{base_url}/api/v1/repos/{owner}/{repo}/statuses/{commit}");
let body = serde_json::json!({
"state": state,
"description": description,
"context": format!("fc/{job_name}"),
});
let resp = http_client()
.post(&url)
.header("Authorization", format!("token {token}"))
.json(&body)
.send()
.await
.map_err(|e| format!("Gitea API request failed: {e}"))?;
if !resp.status().is_success() {
let status = resp.status();
let text = resp.text().await.unwrap_or_default();
return Err(format!("Gitea API returned {status}: {text}"));
}
Ok(())
},
"gitlab_status" => {
let base_url = payload["base_url"]
.as_str()
.ok_or("Missing base_url in gitlab_status payload")?;
let token = payload["token"].as_str().ok_or("Missing token")?;
let repo_url = payload["repository_url"]
.as_str()
.ok_or("Missing repository_url")?;
let commit = payload["commit_hash"]
.as_str()
.ok_or("Missing commit_hash")?;
let job_name =
payload["build_job"].as_str().ok_or("Missing build_job")?;
let project_path =
parse_gitlab_project(repo_url, base_url).ok_or_else(|| {
format!("Cannot parse GitLab project from {repo_url}")
})?;
let (state, description) = match payload["build_status"].as_str() {
Some("succeeded" | "cached_failure") => ("success", "Build succeeded"),
Some("failed") => ("failed", "Build failed"),
Some("running") => ("running", "Build in progress"),
Some("cancelled") => ("canceled", "Build cancelled"),
_ => ("pending", "Build queued"),
};
let encoded_project = urlencoding::encode(&project_path);
let url = format!(
"{}/api/v4/projects/{}/statuses/{}",
base_url.trim_end_matches('/'),
encoded_project,
commit
);
let body = serde_json::json!({
"state": state,
"description": description,
"name": format!("fc/{job_name}"),
});
let resp = http_client()
.post(&url)
.header("PRIVATE-TOKEN", token)
.json(&body)
.send()
.await
.map_err(|e| format!("GitLab API request failed: {e}"))?;
if !resp.status().is_success() {
let status = resp.status();
let text = resp.text().await.unwrap_or_default();
return Err(format!("GitLab API returned {status}: {text}"));
}
Ok(())
},
"email" => {
use lettre::{
AsyncSmtpTransport,
AsyncTransport,
Message,
Tokio1Executor,
transport::smtp::authentication::Credentials,
};
// Email sending is complex, so we'll reuse the existing function
// by deserializing the config from payload
let email_config: EmailConfig =
serde_json::from_value(payload["config"].clone())
.map_err(|e| format!("Failed to deserialize email config: {e}"))?;
// Create a minimal Build struct from payload
let build_id = payload["build_id"]
.as_str()
.and_then(|s| uuid::Uuid::parse_str(s).ok())
.ok_or("Invalid build_id")?;
let job_name = payload["build_job"]
.as_str()
.ok_or("Missing build_job")?
.to_string();
let drv_path = payload["build_drv"]
.as_str()
.ok_or("Missing build_drv")?
.to_string();
let build_output_path =
payload["build_output"].as_str().map(String::from);
let status_str = payload["build_status"]
.as_str()
.ok_or("Missing build_status")?;
let status = match status_str {
"succeeded" => BuildStatus::Succeeded,
_ => BuildStatus::Failed,
};
let project_name = payload["project_name"]
.as_str()
.ok_or("Missing project_name")?;
let status_display = match status {
BuildStatus::Succeeded => "SUCCESS",
_ => "FAILURE",
};
let subject =
format!("[FC] {status_display} - {job_name} ({project_name})");
let body = format!(
"Build notification from FC CI\n\nProject: {}\nJob: {}\nStatus: \
{}\nDerivation: {}\nOutput: {}\nBuild ID: {}\n",
project_name,
job_name,
status_display,
drv_path,
build_output_path.as_deref().unwrap_or("N/A"),
build_id,
);
for to_addr in &email_config.to_addresses {
let email = Message::builder()
.from(
email_config
.from_address
.parse()
.map_err(|e| format!("Invalid from address: {e}"))?,
)
.to(
to_addr
.parse()
.map_err(|e| format!("Invalid to address: {e}"))?,
)
.subject(&subject)
.body(body.clone())
.map_err(|e| format!("Failed to build email: {e}"))?;
let mut mailer_builder = if email_config.tls {
AsyncSmtpTransport::<Tokio1Executor>::relay(&email_config.smtp_host)
.map_err(|e| format!("Failed to create SMTP transport: {e}"))?
} else {
AsyncSmtpTransport::<Tokio1Executor>::builder_dangerous(
&email_config.smtp_host,
)
}
.port(email_config.smtp_port);
if let (Some(user), Some(pass)) =
(&email_config.smtp_user, &email_config.smtp_password)
{
mailer_builder = mailer_builder
.credentials(Credentials::new(user.clone(), pass.clone()));
}
let mailer = mailer_builder.build();
mailer
.send(email)
.await
.map_err(|e| format!("Failed to send email: {e}"))?;
}
Ok(())
},
_ => Err(format!("Unknown notification type: {task_type}")),
}
}
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;

View file

@ -6,6 +6,11 @@ use crate::{
models::ApiKey, models::ApiKey,
}; };
/// Create a new API key.
///
/// # Errors
///
/// Returns error if database insert fails or key already exists.
pub async fn create( pub async fn create(
pool: &PgPool, pool: &PgPool,
name: &str, name: &str,
@ -31,6 +36,11 @@ pub async fn create(
}) })
} }
/// Insert or update an API key by hash.
///
/// # Errors
///
/// Returns error if database operation fails.
pub async fn upsert( pub async fn upsert(
pool: &PgPool, pool: &PgPool,
name: &str, name: &str,
@ -50,6 +60,11 @@ pub async fn upsert(
.map_err(CiError::Database) .map_err(CiError::Database)
} }
/// Find an API key by its hash.
///
/// # Errors
///
/// Returns error if database query fails.
pub async fn get_by_hash( pub async fn get_by_hash(
pool: &PgPool, pool: &PgPool,
key_hash: &str, key_hash: &str,
@ -61,6 +76,11 @@ pub async fn get_by_hash(
.map_err(CiError::Database) .map_err(CiError::Database)
} }
/// List all API keys.
///
/// # Errors
///
/// Returns error if database query fails.
pub async fn list(pool: &PgPool) -> Result<Vec<ApiKey>> { pub async fn list(pool: &PgPool) -> Result<Vec<ApiKey>> {
sqlx::query_as::<_, ApiKey>("SELECT * FROM api_keys ORDER BY created_at DESC") sqlx::query_as::<_, ApiKey>("SELECT * FROM api_keys ORDER BY created_at DESC")
.fetch_all(pool) .fetch_all(pool)
@ -68,6 +88,11 @@ pub async fn list(pool: &PgPool) -> Result<Vec<ApiKey>> {
.map_err(CiError::Database) .map_err(CiError::Database)
} }
/// Delete an API key by ID.
///
/// # Errors
///
/// Returns error if database delete fails or key not found.
pub async fn delete(pool: &PgPool, id: Uuid) -> Result<()> { pub async fn delete(pool: &PgPool, id: Uuid) -> Result<()> {
let result = sqlx::query("DELETE FROM api_keys WHERE id = $1") let result = sqlx::query("DELETE FROM api_keys WHERE id = $1")
.bind(id) .bind(id)
@ -79,6 +104,11 @@ pub async fn delete(pool: &PgPool, id: Uuid) -> Result<()> {
Ok(()) Ok(())
} }
/// Update the `last_used_at` timestamp for an API key.
///
/// # Errors
///
/// Returns error if database update fails.
pub async fn touch_last_used(pool: &PgPool, id: Uuid) -> Result<()> { pub async fn touch_last_used(pool: &PgPool, id: Uuid) -> Result<()> {
sqlx::query("UPDATE api_keys SET last_used_at = NOW() WHERE id = $1") sqlx::query("UPDATE api_keys SET last_used_at = NOW() WHERE id = $1")
.bind(id) .bind(id)

View file

@ -6,6 +6,11 @@ use crate::{
models::BuildDependency, models::BuildDependency,
}; };
/// Create a build dependency relationship.
///
/// # Errors
///
/// Returns error if database insert fails or dependency already exists.
pub async fn create( pub async fn create(
pool: &PgPool, pool: &PgPool,
build_id: Uuid, build_id: Uuid,
@ -31,6 +36,11 @@ pub async fn create(
}) })
} }
/// List all dependencies for a build.
///
/// # Errors
///
/// Returns error if database query fails.
pub async fn list_for_build( pub async fn list_for_build(
pool: &PgPool, pool: &PgPool,
build_id: Uuid, build_id: Uuid,
@ -46,6 +56,10 @@ pub async fn list_for_build(
/// Batch check if all dependency builds are completed for multiple builds at /// Batch check if all dependency builds are completed for multiple builds at
/// once. Returns a map from `build_id` to whether all deps are completed. /// once. Returns a map from `build_id` to whether all deps are completed.
///
/// # Errors
///
/// Returns error if database query fails.
pub async fn check_deps_for_builds( pub async fn check_deps_for_builds(
pool: &PgPool, pool: &PgPool,
build_ids: &[Uuid], build_ids: &[Uuid],
@ -77,6 +91,10 @@ pub async fn check_deps_for_builds(
} }
/// Check if all dependency builds for a given build are completed. /// Check if all dependency builds for a given build are completed.
///
/// # Errors
///
/// Returns error if database query fails.
pub async fn all_deps_completed(pool: &PgPool, build_id: Uuid) -> Result<bool> { pub async fn all_deps_completed(pool: &PgPool, build_id: Uuid) -> Result<bool> {
let row: (i64,) = sqlx::query_as( let row: (i64,) = sqlx::query_as(
"SELECT COUNT(*) FROM build_dependencies bd JOIN builds b ON \ "SELECT COUNT(*) FROM build_dependencies bd JOIN builds b ON \

View file

@ -7,6 +7,8 @@ use crate::{
models::BuildMetric, models::BuildMetric,
}; };
type PercentileRow = (DateTime<Utc>, Option<f64>, Option<f64>, Option<f64>);
/// Time-series data point for metrics visualization. /// Time-series data point for metrics visualization.
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
pub struct TimeseriesPoint { pub struct TimeseriesPoint {
@ -32,6 +34,11 @@ pub struct DurationPercentiles {
pub p99: Option<f64>, pub p99: Option<f64>,
} }
/// Insert or update a build metric.
///
/// # Errors
///
/// Returns error if database operation fails.
pub async fn upsert( pub async fn upsert(
pool: &PgPool, pool: &PgPool,
build_id: Uuid, build_id: Uuid,
@ -54,6 +61,11 @@ pub async fn upsert(
.map_err(CiError::Database) .map_err(CiError::Database)
} }
/// Calculate build failure rate over a time window.
///
/// # Errors
///
/// Returns error if database query fails.
pub async fn calculate_failure_rate( pub async fn calculate_failure_rate(
pool: &PgPool, pool: &PgPool,
project_id: Option<Uuid>, project_id: Option<Uuid>,
@ -87,6 +99,10 @@ pub async fn calculate_failure_rate(
/// Get build success/failure counts over time. /// Get build success/failure counts over time.
/// Buckets builds by time interval for charting. /// Buckets builds by time interval for charting.
///
/// # Errors
///
/// Returns error if database query fails.
pub async fn get_build_stats_timeseries( pub async fn get_build_stats_timeseries(
pool: &PgPool, pool: &PgPool,
project_id: Option<Uuid>, project_id: Option<Uuid>,
@ -136,6 +152,10 @@ pub async fn get_build_stats_timeseries(
} }
/// Get build duration percentiles over time. /// Get build duration percentiles over time.
///
/// # Errors
///
/// Returns error if database query fails.
pub async fn get_duration_percentiles_timeseries( pub async fn get_duration_percentiles_timeseries(
pool: &PgPool, pool: &PgPool,
project_id: Option<Uuid>, project_id: Option<Uuid>,
@ -143,8 +163,7 @@ pub async fn get_duration_percentiles_timeseries(
hours: i32, hours: i32,
bucket_minutes: i32, bucket_minutes: i32,
) -> Result<Vec<DurationPercentiles>> { ) -> Result<Vec<DurationPercentiles>> {
let rows: Vec<(DateTime<Utc>, Option<f64>, Option<f64>, Option<f64>)> = let rows: Vec<PercentileRow> = sqlx::query_as(
sqlx::query_as(
"SELECT "SELECT
date_trunc('minute', b.completed_at) + date_trunc('minute', b.completed_at) +
(EXTRACT(MINUTE FROM b.completed_at)::int / $4) * INTERVAL '1 minute' \ (EXTRACT(MINUTE FROM b.completed_at)::int / $4) * INTERVAL '1 minute' \
@ -190,6 +209,10 @@ pub async fn get_duration_percentiles_timeseries(
} }
/// Get queue depth over time. /// Get queue depth over time.
///
/// # Errors
///
/// Returns error if database query fails.
pub async fn get_queue_depth_timeseries( pub async fn get_queue_depth_timeseries(
pool: &PgPool, pool: &PgPool,
hours: i32, hours: i32,
@ -228,6 +251,10 @@ pub async fn get_queue_depth_timeseries(
} }
/// Get per-system build distribution. /// Get per-system build distribution.
///
/// # Errors
///
/// Returns error if database query fails.
pub async fn get_system_distribution( pub async fn get_system_distribution(
pool: &PgPool, pool: &PgPool,
project_id: Option<Uuid>, project_id: Option<Uuid>,

View file

@ -6,6 +6,11 @@ use crate::{
models::{BuildProduct, CreateBuildProduct}, models::{BuildProduct, CreateBuildProduct},
}; };
/// Create a build product record.
///
/// # Errors
///
/// Returns error if database insert fails.
pub async fn create( pub async fn create(
pool: &PgPool, pool: &PgPool,
input: CreateBuildProduct, input: CreateBuildProduct,
@ -27,6 +32,11 @@ pub async fn create(
.map_err(CiError::Database) .map_err(CiError::Database)
} }
/// Get a build product by ID.
///
/// # Errors
///
/// Returns error if database query fails or product not found.
pub async fn get(pool: &PgPool, id: Uuid) -> Result<BuildProduct> { pub async fn get(pool: &PgPool, id: Uuid) -> Result<BuildProduct> {
sqlx::query_as::<_, BuildProduct>( sqlx::query_as::<_, BuildProduct>(
"SELECT * FROM build_products WHERE id = $1", "SELECT * FROM build_products WHERE id = $1",
@ -37,6 +47,11 @@ pub async fn get(pool: &PgPool, id: Uuid) -> Result<BuildProduct> {
.ok_or_else(|| CiError::NotFound(format!("Build product {id} not found"))) .ok_or_else(|| CiError::NotFound(format!("Build product {id} not found")))
} }
/// List all build products for a build.
///
/// # Errors
///
/// Returns error if database query fails.
pub async fn list_for_build( pub async fn list_for_build(
pool: &PgPool, pool: &PgPool,
build_id: Uuid, build_id: Uuid,

View file

@ -6,6 +6,11 @@ use crate::{
models::{BuildStep, CreateBuildStep}, models::{BuildStep, CreateBuildStep},
}; };
/// Create a build step record.
///
/// # Errors
///
/// Returns error if database insert fails or step already exists.
pub async fn create( pub async fn create(
pool: &PgPool, pool: &PgPool,
input: CreateBuildStep, input: CreateBuildStep,
@ -32,6 +37,11 @@ pub async fn create(
}) })
} }
/// Mark a build step as completed.
///
/// # Errors
///
/// Returns error if database update fails or step not found.
pub async fn complete( pub async fn complete(
pool: &PgPool, pool: &PgPool,
id: Uuid, id: Uuid,
@ -52,6 +62,11 @@ pub async fn complete(
.ok_or_else(|| CiError::NotFound(format!("Build step {id} not found"))) .ok_or_else(|| CiError::NotFound(format!("Build step {id} not found")))
} }
/// List all build steps for a build.
///
/// # Errors
///
/// Returns error if database query fails.
pub async fn list_for_build( pub async fn list_for_build(
pool: &PgPool, pool: &PgPool,
build_id: Uuid, build_id: Uuid,

View file

@ -6,6 +6,11 @@ use crate::{
models::{Build, BuildStats, BuildStatus, CreateBuild}, models::{Build, BuildStats, BuildStatus, CreateBuild},
}; };
/// Create a new build record in pending state.
///
/// # Errors
///
/// Returns error if database insert fails or job already exists.
pub async fn create(pool: &PgPool, input: CreateBuild) -> Result<Build> { pub async fn create(pool: &PgPool, input: CreateBuild) -> Result<Build> {
let is_aggregate = input.is_aggregate.unwrap_or(false); let is_aggregate = input.is_aggregate.unwrap_or(false);
sqlx::query_as::<_, Build>( sqlx::query_as::<_, Build>(
@ -35,6 +40,11 @@ pub async fn create(pool: &PgPool, input: CreateBuild) -> Result<Build> {
}) })
} }
/// Find a succeeded build by derivation path (for build result caching).
///
/// # Errors
///
/// Returns error if database query fails.
pub async fn get_completed_by_drv_path( pub async fn get_completed_by_drv_path(
pool: &PgPool, pool: &PgPool,
drv_path: &str, drv_path: &str,
@ -48,6 +58,11 @@ pub async fn get_completed_by_drv_path(
.map_err(CiError::Database) .map_err(CiError::Database)
} }
/// Get a build by ID.
///
/// # Errors
///
/// Returns error if database query fails or build not found.
pub async fn get(pool: &PgPool, id: Uuid) -> Result<Build> { pub async fn get(pool: &PgPool, id: Uuid) -> Result<Build> {
sqlx::query_as::<_, Build>("SELECT * FROM builds WHERE id = $1") sqlx::query_as::<_, Build>("SELECT * FROM builds WHERE id = $1")
.bind(id) .bind(id)
@ -56,6 +71,11 @@ pub async fn get(pool: &PgPool, id: Uuid) -> Result<Build> {
.ok_or_else(|| CiError::NotFound(format!("Build {id} not found"))) .ok_or_else(|| CiError::NotFound(format!("Build {id} not found")))
} }
/// List all builds for a given evaluation.
///
/// # Errors
///
/// Returns error if database query fails.
pub async fn list_for_evaluation( pub async fn list_for_evaluation(
pool: &PgPool, pool: &PgPool,
evaluation_id: Uuid, evaluation_id: Uuid,
@ -69,6 +89,12 @@ pub async fn list_for_evaluation(
.map_err(CiError::Database) .map_err(CiError::Database)
} }
/// List pending builds, prioritizing non-aggregate jobs.
/// Returns up to `limit * worker_count` builds.
///
/// # Errors
///
/// Returns error if database query fails.
pub async fn list_pending( pub async fn list_pending(
pool: &PgPool, pool: &PgPool,
limit: i64, limit: i64,
@ -99,6 +125,10 @@ pub async fn list_pending(
/// Atomically claim a pending build by setting it to running. /// Atomically claim a pending build by setting it to running.
/// Returns `None` if the build was already claimed by another worker. /// Returns `None` if the build was already claimed by another worker.
///
/// # Errors
///
/// Returns error if database update fails.
pub async fn start(pool: &PgPool, id: Uuid) -> Result<Option<Build>> { pub async fn start(pool: &PgPool, id: Uuid) -> Result<Option<Build>> {
sqlx::query_as::<_, Build>( sqlx::query_as::<_, Build>(
"UPDATE builds SET status = 'running', started_at = NOW() WHERE id = $1 \ "UPDATE builds SET status = 'running', started_at = NOW() WHERE id = $1 \
@ -110,6 +140,11 @@ pub async fn start(pool: &PgPool, id: Uuid) -> Result<Option<Build>> {
.map_err(CiError::Database) .map_err(CiError::Database)
} }
/// Mark a build as completed with final status and outputs.
///
/// # Errors
///
/// Returns error if database update fails or build not found.
pub async fn complete( pub async fn complete(
pool: &PgPool, pool: &PgPool,
id: Uuid, id: Uuid,
@ -132,6 +167,11 @@ pub async fn complete(
.ok_or_else(|| CiError::NotFound(format!("Build {id} not found"))) .ok_or_else(|| CiError::NotFound(format!("Build {id} not found")))
} }
/// List recent builds ordered by creation time.
///
/// # Errors
///
/// Returns error if database query fails.
pub async fn list_recent(pool: &PgPool, limit: i64) -> Result<Vec<Build>> { pub async fn list_recent(pool: &PgPool, limit: i64) -> Result<Vec<Build>> {
sqlx::query_as::<_, Build>( sqlx::query_as::<_, Build>(
"SELECT * FROM builds ORDER BY created_at DESC LIMIT $1", "SELECT * FROM builds ORDER BY created_at DESC LIMIT $1",
@ -142,6 +182,11 @@ pub async fn list_recent(pool: &PgPool, limit: i64) -> Result<Vec<Build>> {
.map_err(CiError::Database) .map_err(CiError::Database)
} }
/// List all builds for a project.
///
/// # Errors
///
/// Returns error if database query fails.
pub async fn list_for_project( pub async fn list_for_project(
pool: &PgPool, pool: &PgPool,
project_id: Uuid, project_id: Uuid,
@ -157,6 +202,11 @@ pub async fn list_for_project(
.map_err(CiError::Database) .map_err(CiError::Database)
} }
/// Get aggregate build statistics.
///
/// # Errors
///
/// Returns error if database query fails.
pub async fn get_stats(pool: &PgPool) -> Result<BuildStats> { pub async fn get_stats(pool: &PgPool) -> Result<BuildStats> {
match sqlx::query_as::<_, BuildStats>("SELECT * FROM build_stats") match sqlx::query_as::<_, BuildStats>("SELECT * FROM build_stats")
.fetch_optional(pool) .fetch_optional(pool)
@ -178,6 +228,10 @@ pub async fn get_stats(pool: &PgPool) -> Result<BuildStats> {
/// Reset builds that were left in 'running' state (orphaned by a crashed /// Reset builds that were left in 'running' state (orphaned by a crashed
/// runner). Limited to 50 builds per call to prevent thundering herd. /// runner). Limited to 50 builds per call to prevent thundering herd.
///
/// # Errors
///
/// Returns error if database update fails.
pub async fn reset_orphaned( pub async fn reset_orphaned(
pool: &PgPool, pool: &PgPool,
older_than_secs: i64, older_than_secs: i64,
@ -197,6 +251,10 @@ pub async fn reset_orphaned(
/// List builds with optional `evaluation_id`, status, system, and `job_name` /// List builds with optional `evaluation_id`, status, system, and `job_name`
/// filters, with pagination. /// filters, with pagination.
///
/// # Errors
///
/// Returns error if database query fails.
pub async fn list_filtered( pub async fn list_filtered(
pool: &PgPool, pool: &PgPool,
evaluation_id: Option<Uuid>, evaluation_id: Option<Uuid>,
@ -223,6 +281,11 @@ pub async fn list_filtered(
.map_err(CiError::Database) .map_err(CiError::Database)
} }
/// Count builds matching filter criteria.
///
/// # Errors
///
/// Returns error if database query fails.
pub async fn count_filtered( pub async fn count_filtered(
pool: &PgPool, pool: &PgPool,
evaluation_id: Option<Uuid>, evaluation_id: Option<Uuid>,
@ -247,6 +310,10 @@ pub async fn count_filtered(
/// Return the subset of the given build IDs whose status is 'cancelled'. /// Return the subset of the given build IDs whose status is 'cancelled'.
/// Used by the cancel-checker loop to detect builds cancelled while running. /// Used by the cancel-checker loop to detect builds cancelled while running.
///
/// # Errors
///
/// Returns error if database query fails.
pub async fn get_cancelled_among( pub async fn get_cancelled_among(
pool: &PgPool, pool: &PgPool,
build_ids: &[Uuid], build_ids: &[Uuid],
@ -265,6 +332,11 @@ pub async fn get_cancelled_among(
Ok(rows.into_iter().map(|(id,)| id).collect()) Ok(rows.into_iter().map(|(id,)| id).collect())
} }
/// Cancel a build.
///
/// # Errors
///
/// Returns error if database update fails or build not in cancellable state.
pub async fn cancel(pool: &PgPool, id: Uuid) -> Result<Build> { pub async fn cancel(pool: &PgPool, id: Uuid) -> Result<Build> {
sqlx::query_as::<_, Build>( sqlx::query_as::<_, Build>(
"UPDATE builds SET status = 'cancelled', completed_at = NOW() WHERE id = \ "UPDATE builds SET status = 'cancelled', completed_at = NOW() WHERE id = \
@ -281,6 +353,10 @@ pub async fn cancel(pool: &PgPool, id: Uuid) -> Result<Build> {
} }
/// Cancel a build and all its transitive dependents. /// Cancel a build and all its transitive dependents.
///
/// # Errors
///
/// Returns error if database query fails.
pub async fn cancel_cascade(pool: &PgPool, id: Uuid) -> Result<Vec<Build>> { pub async fn cancel_cascade(pool: &PgPool, id: Uuid) -> Result<Vec<Build>> {
let mut cancelled = Vec::new(); let mut cancelled = Vec::new();
@ -312,7 +388,11 @@ pub async fn cancel_cascade(pool: &PgPool, id: Uuid) -> Result<Vec<Build>> {
} }
/// Restart a build by resetting it to pending state. /// Restart a build by resetting it to pending state.
/// Only works for failed, succeeded, cancelled, or cached_failure builds. /// Only works for failed, succeeded, cancelled, or `cached_failure` builds.
///
/// # Errors
///
/// Returns error if database update fails or build not in restartable state.
pub async fn restart(pool: &PgPool, id: Uuid) -> Result<Build> { pub async fn restart(pool: &PgPool, id: Uuid) -> Result<Build> {
let build = sqlx::query_as::<_, Build>( let build = sqlx::query_as::<_, Build>(
"UPDATE builds SET status = 'pending', started_at = NULL, completed_at = \ "UPDATE builds SET status = 'pending', started_at = NULL, completed_at = \
@ -339,6 +419,10 @@ pub async fn restart(pool: &PgPool, id: Uuid) -> Result<Build> {
} }
/// Mark a build's outputs as signed. /// Mark a build's outputs as signed.
///
/// # Errors
///
/// Returns error if database update fails.
pub async fn mark_signed(pool: &PgPool, id: Uuid) -> Result<()> { pub async fn mark_signed(pool: &PgPool, id: Uuid) -> Result<()> {
sqlx::query("UPDATE builds SET signed = true WHERE id = $1") sqlx::query("UPDATE builds SET signed = true WHERE id = $1")
.bind(id) .bind(id)
@ -350,6 +434,10 @@ pub async fn mark_signed(pool: &PgPool, id: Uuid) -> Result<()> {
/// Batch-fetch completed builds by derivation paths. /// Batch-fetch completed builds by derivation paths.
/// Returns a map from `drv_path` to Build for deduplication. /// Returns a map from `drv_path` to Build for deduplication.
///
/// # Errors
///
/// Returns error if database query fails.
pub async fn get_completed_by_drv_paths( pub async fn get_completed_by_drv_paths(
pool: &PgPool, pool: &PgPool,
drv_paths: &[String], drv_paths: &[String],
@ -375,6 +463,10 @@ pub async fn get_completed_by_drv_paths(
} }
/// Return the set of build IDs that have `keep = true` (GC-pinned). /// Return the set of build IDs that have `keep = true` (GC-pinned).
///
/// # Errors
///
/// Returns error if database query fails.
pub async fn list_pinned_ids( pub async fn list_pinned_ids(
pool: &PgPool, pool: &PgPool,
) -> Result<std::collections::HashSet<Uuid>> { ) -> Result<std::collections::HashSet<Uuid>> {
@ -387,6 +479,10 @@ pub async fn list_pinned_ids(
} }
/// Set the `keep` (GC pin) flag on a build. /// Set the `keep` (GC pin) flag on a build.
///
/// # Errors
///
/// Returns error if database update fails or build not found.
pub async fn set_keep(pool: &PgPool, id: Uuid, keep: bool) -> Result<Build> { pub async fn set_keep(pool: &PgPool, id: Uuid, keep: bool) -> Result<Build> {
sqlx::query_as::<_, Build>( sqlx::query_as::<_, Build>(
"UPDATE builds SET keep = $1 WHERE id = $2 RETURNING *", "UPDATE builds SET keep = $1 WHERE id = $2 RETURNING *",
@ -399,6 +495,10 @@ pub async fn set_keep(pool: &PgPool, id: Uuid, keep: bool) -> Result<Build> {
} }
/// Set the `builder_id` for a build. /// Set the `builder_id` for a build.
///
/// # Errors
///
/// Returns error if database update fails.
pub async fn set_builder( pub async fn set_builder(
pool: &PgPool, pool: &PgPool,
id: Uuid, id: Uuid,

View file

@ -7,6 +7,11 @@ use crate::{
models::{Channel, CreateChannel}, models::{Channel, CreateChannel},
}; };
/// Create a release channel.
///
/// # Errors
///
/// Returns error if database insert fails or channel already exists.
pub async fn create(pool: &PgPool, input: CreateChannel) -> Result<Channel> { pub async fn create(pool: &PgPool, input: CreateChannel) -> Result<Channel> {
sqlx::query_as::<_, Channel>( sqlx::query_as::<_, Channel>(
"INSERT INTO channels (project_id, name, jobset_id) VALUES ($1, $2, $3) \ "INSERT INTO channels (project_id, name, jobset_id) VALUES ($1, $2, $3) \
@ -30,6 +35,11 @@ pub async fn create(pool: &PgPool, input: CreateChannel) -> Result<Channel> {
}) })
} }
/// Get a channel by ID.
///
/// # Errors
///
/// Returns error if database query fails or channel not found.
pub async fn get(pool: &PgPool, id: Uuid) -> Result<Channel> { pub async fn get(pool: &PgPool, id: Uuid) -> Result<Channel> {
sqlx::query_as::<_, Channel>("SELECT * FROM channels WHERE id = $1") sqlx::query_as::<_, Channel>("SELECT * FROM channels WHERE id = $1")
.bind(id) .bind(id)
@ -38,6 +48,11 @@ pub async fn get(pool: &PgPool, id: Uuid) -> Result<Channel> {
.ok_or_else(|| CiError::NotFound(format!("Channel {id} not found"))) .ok_or_else(|| CiError::NotFound(format!("Channel {id} not found")))
} }
/// List all channels for a project.
///
/// # Errors
///
/// Returns error if database query fails.
pub async fn list_for_project( pub async fn list_for_project(
pool: &PgPool, pool: &PgPool,
project_id: Uuid, project_id: Uuid,
@ -51,6 +66,11 @@ pub async fn list_for_project(
.map_err(CiError::Database) .map_err(CiError::Database)
} }
/// List all channels.
///
/// # Errors
///
/// Returns error if database query fails.
pub async fn list_all(pool: &PgPool) -> Result<Vec<Channel>> { pub async fn list_all(pool: &PgPool) -> Result<Vec<Channel>> {
sqlx::query_as::<_, Channel>("SELECT * FROM channels ORDER BY name") sqlx::query_as::<_, Channel>("SELECT * FROM channels ORDER BY name")
.fetch_all(pool) .fetch_all(pool)
@ -59,6 +79,10 @@ pub async fn list_all(pool: &PgPool) -> Result<Vec<Channel>> {
} }
/// Promote an evaluation to a channel (set it as the current evaluation). /// Promote an evaluation to a channel (set it as the current evaluation).
///
/// # Errors
///
/// Returns error if database update fails or channel not found.
pub async fn promote( pub async fn promote(
pool: &PgPool, pool: &PgPool,
channel_id: Uuid, channel_id: Uuid,
@ -75,6 +99,11 @@ pub async fn promote(
.ok_or_else(|| CiError::NotFound(format!("Channel {channel_id} not found"))) .ok_or_else(|| CiError::NotFound(format!("Channel {channel_id} not found")))
} }
/// Delete a channel.
///
/// # Errors
///
/// Returns error if database delete fails or channel not found.
pub async fn delete(pool: &PgPool, id: Uuid) -> Result<()> { pub async fn delete(pool: &PgPool, id: Uuid) -> Result<()> {
let result = sqlx::query("DELETE FROM channels WHERE id = $1") let result = sqlx::query("DELETE FROM channels WHERE id = $1")
.bind(id) .bind(id)
@ -88,6 +117,10 @@ pub async fn delete(pool: &PgPool, id: Uuid) -> Result<()> {
} }
/// Upsert a channel (insert or update on conflict). /// Upsert a channel (insert or update on conflict).
///
/// # Errors
///
/// Returns error if database operation fails.
pub async fn upsert( pub async fn upsert(
pool: &PgPool, pool: &PgPool,
project_id: Uuid, project_id: Uuid,
@ -109,6 +142,10 @@ pub async fn upsert(
/// Sync channels from declarative config. /// Sync channels from declarative config.
/// Deletes channels not in the declarative list and upserts those that are. /// Deletes channels not in the declarative list and upserts those that are.
///
/// # Errors
///
/// Returns error if database operations fail.
pub async fn sync_for_project( pub async fn sync_for_project(
pool: &PgPool, pool: &PgPool,
project_id: Uuid, project_id: Uuid,
@ -146,6 +183,10 @@ pub async fn sync_for_project(
/// Find the channel for a jobset and auto-promote if all builds in the /// Find the channel for a jobset and auto-promote if all builds in the
/// evaluation succeeded. /// evaluation succeeded.
///
/// # Errors
///
/// Returns error if database operations fail.
pub async fn auto_promote_if_complete( pub async fn auto_promote_if_complete(
pool: &PgPool, pool: &PgPool,
jobset_id: Uuid, jobset_id: Uuid,
@ -166,7 +207,7 @@ pub async fn auto_promote_if_complete(
return Ok(()); return Ok(());
} }
// All builds completed promote to any channels tracking this jobset // All builds completed, promote to any channels tracking this jobset
let channels = let channels =
sqlx::query_as::<_, Channel>("SELECT * FROM channels WHERE jobset_id = $1") sqlx::query_as::<_, Channel>("SELECT * FROM channels WHERE jobset_id = $1")
.bind(jobset_id) .bind(jobset_id)

View file

@ -6,6 +6,11 @@ use crate::{
models::{CreateEvaluation, Evaluation, EvaluationStatus}, models::{CreateEvaluation, Evaluation, EvaluationStatus},
}; };
/// Create a new evaluation in pending state.
///
/// # Errors
///
/// Returns error if database insert fails or evaluation already exists.
pub async fn create( pub async fn create(
pool: &PgPool, pool: &PgPool,
input: CreateEvaluation, input: CreateEvaluation,
@ -36,6 +41,11 @@ pub async fn create(
}) })
} }
/// Get an evaluation by ID.
///
/// # Errors
///
/// Returns error if database query fails or evaluation not found.
pub async fn get(pool: &PgPool, id: Uuid) -> Result<Evaluation> { pub async fn get(pool: &PgPool, id: Uuid) -> Result<Evaluation> {
sqlx::query_as::<_, Evaluation>("SELECT * FROM evaluations WHERE id = $1") sqlx::query_as::<_, Evaluation>("SELECT * FROM evaluations WHERE id = $1")
.bind(id) .bind(id)
@ -44,6 +54,11 @@ pub async fn get(pool: &PgPool, id: Uuid) -> Result<Evaluation> {
.ok_or_else(|| CiError::NotFound(format!("Evaluation {id} not found"))) .ok_or_else(|| CiError::NotFound(format!("Evaluation {id} not found")))
} }
/// List all evaluations for a jobset.
///
/// # Errors
///
/// Returns error if database query fails.
pub async fn list_for_jobset( pub async fn list_for_jobset(
pool: &PgPool, pool: &PgPool,
jobset_id: Uuid, jobset_id: Uuid,
@ -60,6 +75,10 @@ pub async fn list_for_jobset(
/// List evaluations with optional `jobset_id` and status filters, with /// List evaluations with optional `jobset_id` and status filters, with
/// pagination. /// pagination.
///
/// # Errors
///
/// Returns error if database query fails.
pub async fn list_filtered( pub async fn list_filtered(
pool: &PgPool, pool: &PgPool,
jobset_id: Option<Uuid>, jobset_id: Option<Uuid>,
@ -81,6 +100,11 @@ pub async fn list_filtered(
.map_err(CiError::Database) .map_err(CiError::Database)
} }
/// Count evaluations matching filter criteria.
///
/// # Errors
///
/// Returns error if database query fails.
pub async fn count_filtered( pub async fn count_filtered(
pool: &PgPool, pool: &PgPool,
jobset_id: Option<Uuid>, jobset_id: Option<Uuid>,
@ -98,6 +122,11 @@ pub async fn count_filtered(
Ok(row.0) Ok(row.0)
} }
/// Update evaluation status and optional error message.
///
/// # Errors
///
/// Returns error if database update fails or evaluation not found.
pub async fn update_status( pub async fn update_status(
pool: &PgPool, pool: &PgPool,
id: Uuid, id: Uuid,
@ -116,6 +145,11 @@ pub async fn update_status(
.ok_or_else(|| CiError::NotFound(format!("Evaluation {id} not found"))) .ok_or_else(|| CiError::NotFound(format!("Evaluation {id} not found")))
} }
/// Get the latest evaluation for a jobset.
///
/// # Errors
///
/// Returns error if database query fails.
pub async fn get_latest( pub async fn get_latest(
pool: &PgPool, pool: &PgPool,
jobset_id: Uuid, jobset_id: Uuid,
@ -131,6 +165,10 @@ pub async fn get_latest(
} }
/// Set the inputs hash for an evaluation (used for eval caching). /// Set the inputs hash for an evaluation (used for eval caching).
///
/// # Errors
///
/// Returns error if database update fails.
pub async fn set_inputs_hash( pub async fn set_inputs_hash(
pool: &PgPool, pool: &PgPool,
id: Uuid, id: Uuid,
@ -147,6 +185,10 @@ pub async fn set_inputs_hash(
/// Check if an evaluation with the same `inputs_hash` already exists for this /// Check if an evaluation with the same `inputs_hash` already exists for this
/// jobset. /// jobset.
///
/// # Errors
///
/// Returns error if database query fails.
pub async fn get_by_inputs_hash( pub async fn get_by_inputs_hash(
pool: &PgPool, pool: &PgPool,
jobset_id: Uuid, jobset_id: Uuid,
@ -163,6 +205,11 @@ pub async fn get_by_inputs_hash(
.map_err(CiError::Database) .map_err(CiError::Database)
} }
/// Count total evaluations.
///
/// # Errors
///
/// Returns error if database query fails.
pub async fn count(pool: &PgPool) -> Result<i64> { pub async fn count(pool: &PgPool) -> Result<i64> {
let row: (i64,) = sqlx::query_as("SELECT COUNT(*) FROM evaluations") let row: (i64,) = sqlx::query_as("SELECT COUNT(*) FROM evaluations")
.fetch_one(pool) .fetch_one(pool)
@ -171,7 +218,11 @@ pub async fn count(pool: &PgPool) -> Result<i64> {
Ok(row.0) Ok(row.0)
} }
/// Get an evaluation by jobset_id and commit_hash. /// Get an evaluation by `jobset_id` and `commit_hash`.
///
/// # Errors
///
/// Returns error if database query fails.
pub async fn get_by_jobset_and_commit( pub async fn get_by_jobset_and_commit(
pool: &PgPool, pool: &PgPool,
jobset_id: Uuid, jobset_id: Uuid,

View file

@ -6,6 +6,11 @@ use crate::{
models::BuildStatus, models::BuildStatus,
}; };
/// Check if a derivation path is in the failed paths cache.
///
/// # Errors
///
/// Returns error if database query fails.
pub async fn is_cached_failure(pool: &PgPool, drv_path: &str) -> Result<bool> { pub async fn is_cached_failure(pool: &PgPool, drv_path: &str) -> Result<bool> {
let row: Option<(bool,)> = let row: Option<(bool,)> =
sqlx::query_as("SELECT true FROM failed_paths_cache WHERE drv_path = $1") sqlx::query_as("SELECT true FROM failed_paths_cache WHERE drv_path = $1")
@ -17,6 +22,11 @@ pub async fn is_cached_failure(pool: &PgPool, drv_path: &str) -> Result<bool> {
Ok(row.is_some()) Ok(row.is_some())
} }
/// Insert a failed derivation path into the cache.
///
/// # Errors
///
/// Returns error if database insert fails.
pub async fn insert( pub async fn insert(
pool: &PgPool, pool: &PgPool,
drv_path: &str, drv_path: &str,
@ -40,6 +50,11 @@ pub async fn insert(
Ok(()) Ok(())
} }
/// Remove a derivation path from the failed paths cache.
///
/// # Errors
///
/// Returns error if database delete fails.
pub async fn invalidate(pool: &PgPool, drv_path: &str) -> Result<()> { pub async fn invalidate(pool: &PgPool, drv_path: &str) -> Result<()> {
sqlx::query("DELETE FROM failed_paths_cache WHERE drv_path = $1") sqlx::query("DELETE FROM failed_paths_cache WHERE drv_path = $1")
.bind(drv_path) .bind(drv_path)
@ -50,6 +65,11 @@ pub async fn invalidate(pool: &PgPool, drv_path: &str) -> Result<()> {
Ok(()) Ok(())
} }
/// Remove expired entries from the failed paths cache.
///
/// # Errors
///
/// Returns error if database delete fails.
pub async fn cleanup_expired(pool: &PgPool, ttl_seconds: u64) -> Result<u64> { pub async fn cleanup_expired(pool: &PgPool, ttl_seconds: u64) -> Result<u64> {
let result = sqlx::query( let result = sqlx::query(
"DELETE FROM failed_paths_cache WHERE failed_at < NOW() - \ "DELETE FROM failed_paths_cache WHERE failed_at < NOW() - \

View file

@ -7,6 +7,11 @@ use crate::{
models::JobsetInput, models::JobsetInput,
}; };
/// Create a new jobset input.
///
/// # Errors
///
/// Returns error if database insert fails or input already exists.
pub async fn create( pub async fn create(
pool: &PgPool, pool: &PgPool,
jobset_id: Uuid, jobset_id: Uuid,
@ -38,6 +43,11 @@ pub async fn create(
}) })
} }
/// List all inputs for a jobset.
///
/// # Errors
///
/// Returns error if database query fails.
pub async fn list_for_jobset( pub async fn list_for_jobset(
pool: &PgPool, pool: &PgPool,
jobset_id: Uuid, jobset_id: Uuid,
@ -51,6 +61,11 @@ pub async fn list_for_jobset(
.map_err(CiError::Database) .map_err(CiError::Database)
} }
/// Delete a jobset input.
///
/// # Errors
///
/// Returns error if database delete fails or input not found.
pub async fn delete(pool: &PgPool, id: Uuid) -> Result<()> { pub async fn delete(pool: &PgPool, id: Uuid) -> Result<()> {
let result = sqlx::query("DELETE FROM jobset_inputs WHERE id = $1") let result = sqlx::query("DELETE FROM jobset_inputs WHERE id = $1")
.bind(id) .bind(id)
@ -63,6 +78,10 @@ pub async fn delete(pool: &PgPool, id: Uuid) -> Result<()> {
} }
/// Upsert a jobset input (insert or update on conflict). /// Upsert a jobset input (insert or update on conflict).
///
/// # Errors
///
/// Returns error if database operation fails.
pub async fn upsert( pub async fn upsert(
pool: &PgPool, pool: &PgPool,
jobset_id: Uuid, jobset_id: Uuid,
@ -89,6 +108,10 @@ pub async fn upsert(
/// Sync jobset inputs from declarative config. /// Sync jobset inputs from declarative config.
/// Deletes inputs not in the config and upserts those that are. /// Deletes inputs not in the config and upserts those that are.
///
/// # Errors
///
/// Returns error if database operations fail.
pub async fn sync_for_jobset( pub async fn sync_for_jobset(
pool: &PgPool, pool: &PgPool,
jobset_id: Uuid, jobset_id: Uuid,

View file

@ -6,6 +6,11 @@ use crate::{
models::{ActiveJobset, CreateJobset, Jobset, JobsetState, UpdateJobset}, models::{ActiveJobset, CreateJobset, Jobset, JobsetState, UpdateJobset},
}; };
/// Create a new jobset with defaults applied.
///
/// # Errors
///
/// Returns error if database insert fails or jobset already exists.
pub async fn create(pool: &PgPool, input: CreateJobset) -> Result<Jobset> { pub async fn create(pool: &PgPool, input: CreateJobset) -> Result<Jobset> {
let state = input.state.unwrap_or(JobsetState::Enabled); let state = input.state.unwrap_or(JobsetState::Enabled);
// Sync enabled with state if state was explicitly set, otherwise use // Sync enabled with state if state was explicitly set, otherwise use
@ -50,6 +55,11 @@ pub async fn create(pool: &PgPool, input: CreateJobset) -> Result<Jobset> {
}) })
} }
/// Get a jobset by ID.
///
/// # Errors
///
/// Returns error if database query fails or jobset not found.
pub async fn get(pool: &PgPool, id: Uuid) -> Result<Jobset> { pub async fn get(pool: &PgPool, id: Uuid) -> Result<Jobset> {
sqlx::query_as::<_, Jobset>("SELECT * FROM jobsets WHERE id = $1") sqlx::query_as::<_, Jobset>("SELECT * FROM jobsets WHERE id = $1")
.bind(id) .bind(id)
@ -58,6 +68,11 @@ pub async fn get(pool: &PgPool, id: Uuid) -> Result<Jobset> {
.ok_or_else(|| CiError::NotFound(format!("Jobset {id} not found"))) .ok_or_else(|| CiError::NotFound(format!("Jobset {id} not found")))
} }
/// List all jobsets for a project.
///
/// # Errors
///
/// Returns error if database query fails.
pub async fn list_for_project( pub async fn list_for_project(
pool: &PgPool, pool: &PgPool,
project_id: Uuid, project_id: Uuid,
@ -76,6 +91,11 @@ pub async fn list_for_project(
.map_err(CiError::Database) .map_err(CiError::Database)
} }
/// Count jobsets for a project.
///
/// # Errors
///
/// Returns error if database query fails.
pub async fn count_for_project(pool: &PgPool, project_id: Uuid) -> Result<i64> { pub async fn count_for_project(pool: &PgPool, project_id: Uuid) -> Result<i64> {
let row: (i64,) = let row: (i64,) =
sqlx::query_as("SELECT COUNT(*) FROM jobsets WHERE project_id = $1") sqlx::query_as("SELECT COUNT(*) FROM jobsets WHERE project_id = $1")
@ -86,6 +106,11 @@ pub async fn count_for_project(pool: &PgPool, project_id: Uuid) -> Result<i64> {
Ok(row.0) Ok(row.0)
} }
/// Update a jobset with partial fields.
///
/// # Errors
///
/// Returns error if database update fails or jobset not found.
pub async fn update( pub async fn update(
pool: &PgPool, pool: &PgPool,
id: Uuid, id: Uuid,
@ -139,6 +164,11 @@ pub async fn update(
}) })
} }
/// Delete a jobset.
///
/// # Errors
///
/// Returns error if database delete fails or jobset not found.
pub async fn delete(pool: &PgPool, id: Uuid) -> Result<()> { pub async fn delete(pool: &PgPool, id: Uuid) -> Result<()> {
let result = sqlx::query("DELETE FROM jobsets WHERE id = $1") let result = sqlx::query("DELETE FROM jobsets WHERE id = $1")
.bind(id) .bind(id)
@ -152,6 +182,11 @@ pub async fn delete(pool: &PgPool, id: Uuid) -> Result<()> {
Ok(()) Ok(())
} }
/// Insert or update a jobset by name.
///
/// # Errors
///
/// Returns error if database operation fails.
pub async fn upsert(pool: &PgPool, input: CreateJobset) -> Result<Jobset> { pub async fn upsert(pool: &PgPool, input: CreateJobset) -> Result<Jobset> {
let state = input.state.unwrap_or(JobsetState::Enabled); let state = input.state.unwrap_or(JobsetState::Enabled);
// Sync enabled with state if state was explicitly set, otherwise use // Sync enabled with state if state was explicitly set, otherwise use
@ -191,6 +226,11 @@ pub async fn upsert(pool: &PgPool, input: CreateJobset) -> Result<Jobset> {
.map_err(CiError::Database) .map_err(CiError::Database)
} }
/// List all active jobsets with project info.
///
/// # Errors
///
/// Returns error if database query fails.
pub async fn list_active(pool: &PgPool) -> Result<Vec<ActiveJobset>> { pub async fn list_active(pool: &PgPool) -> Result<Vec<ActiveJobset>> {
sqlx::query_as::<_, ActiveJobset>("SELECT * FROM active_jobsets") sqlx::query_as::<_, ActiveJobset>("SELECT * FROM active_jobsets")
.fetch_all(pool) .fetch_all(pool)
@ -199,6 +239,10 @@ pub async fn list_active(pool: &PgPool) -> Result<Vec<ActiveJobset>> {
} }
/// Mark a one-shot jobset as complete (set state to disabled). /// Mark a one-shot jobset as complete (set state to disabled).
///
/// # Errors
///
/// Returns error if database update fails.
pub async fn mark_one_shot_complete(pool: &PgPool, id: Uuid) -> Result<()> { pub async fn mark_one_shot_complete(pool: &PgPool, id: Uuid) -> Result<()> {
sqlx::query( sqlx::query(
"UPDATE jobsets SET state = 'disabled', enabled = false WHERE id = $1 AND \ "UPDATE jobsets SET state = 'disabled', enabled = false WHERE id = $1 AND \
@ -212,6 +256,10 @@ pub async fn mark_one_shot_complete(pool: &PgPool, id: Uuid) -> Result<()> {
} }
/// Update the `last_checked_at` timestamp for a jobset. /// Update the `last_checked_at` timestamp for a jobset.
///
/// # Errors
///
/// Returns error if database update fails.
pub async fn update_last_checked(pool: &PgPool, id: Uuid) -> Result<()> { pub async fn update_last_checked(pool: &PgPool, id: Uuid) -> Result<()> {
sqlx::query("UPDATE jobsets SET last_checked_at = NOW() WHERE id = $1") sqlx::query("UPDATE jobsets SET last_checked_at = NOW() WHERE id = $1")
.bind(id) .bind(id)
@ -222,6 +270,10 @@ pub async fn update_last_checked(pool: &PgPool, id: Uuid) -> Result<()> {
} }
/// Check if a jobset has any running builds. /// Check if a jobset has any running builds.
///
/// # Errors
///
/// Returns error if database query fails.
pub async fn has_running_builds( pub async fn has_running_builds(
pool: &PgPool, pool: &PgPool,
jobset_id: Uuid, jobset_id: Uuid,
@ -240,6 +292,10 @@ pub async fn has_running_builds(
/// List jobsets that are due for evaluation based on their `check_interval`. /// List jobsets that are due for evaluation based on their `check_interval`.
/// Returns jobsets where `last_checked_at` is NULL or older than /// Returns jobsets where `last_checked_at` is NULL or older than
/// `check_interval` seconds. /// `check_interval` seconds.
///
/// # Errors
///
/// Returns error if database query fails.
pub async fn list_due_for_eval( pub async fn list_due_for_eval(
pool: &PgPool, pool: &PgPool,
limit: i64, limit: i64,

View file

@ -10,6 +10,7 @@ pub mod failed_paths_cache;
pub mod jobset_inputs; pub mod jobset_inputs;
pub mod jobsets; pub mod jobsets;
pub mod notification_configs; pub mod notification_configs;
pub mod notification_tasks;
pub mod project_members; pub mod project_members;
pub mod projects; pub mod projects;
pub mod remote_builders; pub mod remote_builders;

View file

@ -7,6 +7,11 @@ use crate::{
models::{CreateNotificationConfig, NotificationConfig}, models::{CreateNotificationConfig, NotificationConfig},
}; };
/// Create a new notification config.
///
/// # Errors
///
/// Returns error if database insert fails or config already exists.
pub async fn create( pub async fn create(
pool: &PgPool, pool: &PgPool,
input: CreateNotificationConfig, input: CreateNotificationConfig,
@ -33,6 +38,11 @@ pub async fn create(
}) })
} }
/// List all enabled notification configs for a project.
///
/// # Errors
///
/// Returns error if database query fails.
pub async fn list_for_project( pub async fn list_for_project(
pool: &PgPool, pool: &PgPool,
project_id: Uuid, project_id: Uuid,
@ -47,6 +57,11 @@ pub async fn list_for_project(
.map_err(CiError::Database) .map_err(CiError::Database)
} }
/// Delete a notification config.
///
/// # Errors
///
/// Returns error if database delete fails or config not found.
pub async fn delete(pool: &PgPool, id: Uuid) -> Result<()> { pub async fn delete(pool: &PgPool, id: Uuid) -> Result<()> {
let result = sqlx::query("DELETE FROM notification_configs WHERE id = $1") let result = sqlx::query("DELETE FROM notification_configs WHERE id = $1")
.bind(id) .bind(id)
@ -61,6 +76,10 @@ pub async fn delete(pool: &PgPool, id: Uuid) -> Result<()> {
} }
/// Upsert a notification config (insert or update on conflict). /// Upsert a notification config (insert or update on conflict).
///
/// # Errors
///
/// Returns error if database operation fails.
pub async fn upsert( pub async fn upsert(
pool: &PgPool, pool: &PgPool,
project_id: Uuid, project_id: Uuid,
@ -85,6 +104,10 @@ pub async fn upsert(
/// Sync notification configs from declarative config. /// Sync notification configs from declarative config.
/// Deletes configs not in the declarative list and upserts those that are. /// Deletes configs not in the declarative list and upserts those that are.
///
/// # Errors
///
/// Returns error if database operations fail.
pub async fn sync_for_project( pub async fn sync_for_project(
pool: &PgPool, pool: &PgPool,
project_id: Uuid, project_id: Uuid,

View file

@ -0,0 +1,215 @@
//! Database operations for notification task retry queue
use sqlx::PgPool;
use uuid::Uuid;
use crate::{error::Result, models::NotificationTask};
/// Create a new notification task for later delivery
///
/// # Errors
///
/// Returns error if database insert fails.
pub async fn create(
pool: &PgPool,
notification_type: &str,
payload: serde_json::Value,
max_attempts: i32,
) -> Result<NotificationTask> {
let task = sqlx::query_as::<_, NotificationTask>(
r"
INSERT INTO notification_tasks (notification_type, payload, max_attempts)
VALUES ($1, $2, $3)
RETURNING *
",
)
.bind(notification_type)
.bind(payload)
.bind(max_attempts)
.fetch_one(pool)
.await?;
Ok(task)
}
/// Fetch pending tasks that are ready for retry
///
/// # Errors
///
/// Returns error if database query fails.
pub async fn list_pending(
pool: &PgPool,
limit: i32,
) -> Result<Vec<NotificationTask>> {
let tasks = sqlx::query_as::<_, NotificationTask>(
r"
SELECT *
FROM notification_tasks
WHERE status = 'pending'
AND next_retry_at <= NOW()
ORDER BY next_retry_at ASC
LIMIT $1
",
)
.bind(limit)
.fetch_all(pool)
.await?;
Ok(tasks)
}
/// Mark a task as running (claimed by worker)
///
/// # Errors
///
/// Returns error if database update fails.
pub async fn mark_running(pool: &PgPool, task_id: Uuid) -> Result<()> {
sqlx::query(
r"
UPDATE notification_tasks
SET status = 'running',
attempts = attempts + 1
WHERE id = $1
",
)
.bind(task_id)
.execute(pool)
.await?;
Ok(())
}
/// Mark a task as completed successfully
///
/// # Errors
///
/// Returns error if database update fails.
pub async fn mark_completed(pool: &PgPool, task_id: Uuid) -> Result<()> {
sqlx::query(
r"
UPDATE notification_tasks
SET status = 'completed',
completed_at = NOW()
WHERE id = $1
",
)
.bind(task_id)
.execute(pool)
.await?;
Ok(())
}
/// Mark a task as failed and schedule retry with exponential backoff
/// Backoff formula: 1s, 2s, 4s, 8s, 16s...
///
/// # Errors
///
/// Returns error if database update fails.
pub async fn mark_failed_and_retry(
pool: &PgPool,
task_id: Uuid,
error: &str,
) -> Result<()> {
sqlx::query(
r"
UPDATE notification_tasks
SET status = CASE
WHEN attempts >= max_attempts THEN 'failed'::varchar
ELSE 'pending'::varchar
END,
last_error = $2,
next_retry_at = CASE
WHEN attempts >= max_attempts THEN NOW()
ELSE NOW() + (POWER(2, attempts - 1) || ' seconds')::interval
END,
completed_at = CASE
WHEN attempts >= max_attempts THEN NOW()
ELSE NULL
END
WHERE id = $1
",
)
.bind(task_id)
.bind(error)
.execute(pool)
.await?;
Ok(())
}
/// Get task by ID
///
/// # Errors
///
/// Returns error if database query fails.
pub async fn get(pool: &PgPool, task_id: Uuid) -> Result<NotificationTask> {
let task = sqlx::query_as::<_, NotificationTask>(
r"
SELECT * FROM notification_tasks WHERE id = $1
",
)
.bind(task_id)
.fetch_one(pool)
.await?;
Ok(task)
}
/// Clean up old completed/failed tasks (older than retention days)
///
/// # Errors
///
/// Returns error if database delete fails.
pub async fn cleanup_old_tasks(
pool: &PgPool,
retention_days: i64,
) -> Result<u64> {
let result = sqlx::query(
r"
DELETE FROM notification_tasks
WHERE status IN ('completed', 'failed')
AND (completed_at < NOW() - ($1 || ' days')::interval
OR created_at < NOW() - ($1 || ' days')::interval)
",
)
.bind(retention_days)
.execute(pool)
.await?;
Ok(result.rows_affected())
}
/// Count pending tasks (for monitoring)
///
/// # Errors
///
/// Returns error if database query fails.
pub async fn count_pending(pool: &PgPool) -> Result<i64> {
let count: (i64,) = sqlx::query_as(
r"
SELECT COUNT(*) FROM notification_tasks WHERE status = 'pending'
",
)
.fetch_one(pool)
.await?;
Ok(count.0)
}
/// Count failed tasks (for monitoring)
///
/// # Errors
///
/// Returns error if database query fails.
pub async fn count_failed(pool: &PgPool) -> Result<i64> {
let count: (i64,) = sqlx::query_as(
r"
SELECT COUNT(*) FROM notification_tasks WHERE status = 'failed'
",
)
.fetch_one(pool)
.await?;
Ok(count.0)
}

View file

@ -12,6 +12,10 @@ use crate::{
}; };
/// Add a member to a project with role validation /// Add a member to a project with role validation
///
/// # Errors
///
/// Returns error if validation fails or database insert fails.
pub async fn create( pub async fn create(
pool: &PgPool, pool: &PgPool,
project_id: Uuid, project_id: Uuid,
@ -43,6 +47,10 @@ pub async fn create(
} }
/// Get a project member by ID /// Get a project member by ID
///
/// # Errors
///
/// Returns error if database query fails or member not found.
pub async fn get(pool: &PgPool, id: Uuid) -> Result<ProjectMember> { pub async fn get(pool: &PgPool, id: Uuid) -> Result<ProjectMember> {
sqlx::query_as::<_, ProjectMember>( sqlx::query_as::<_, ProjectMember>(
"SELECT * FROM project_members WHERE id = $1", "SELECT * FROM project_members WHERE id = $1",
@ -61,6 +69,10 @@ pub async fn get(pool: &PgPool, id: Uuid) -> Result<ProjectMember> {
} }
/// Get a project member by project and user /// Get a project member by project and user
///
/// # Errors
///
/// Returns error if database query fails.
pub async fn get_by_project_and_user( pub async fn get_by_project_and_user(
pool: &PgPool, pool: &PgPool,
project_id: Uuid, project_id: Uuid,
@ -77,6 +89,10 @@ pub async fn get_by_project_and_user(
} }
/// List all members of a project /// List all members of a project
///
/// # Errors
///
/// Returns error if database query fails.
pub async fn list_for_project( pub async fn list_for_project(
pool: &PgPool, pool: &PgPool,
project_id: Uuid, project_id: Uuid,
@ -91,6 +107,10 @@ pub async fn list_for_project(
} }
/// List all projects a user is a member of /// List all projects a user is a member of
///
/// # Errors
///
/// Returns error if database query fails.
pub async fn list_for_user( pub async fn list_for_user(
pool: &PgPool, pool: &PgPool,
user_id: Uuid, user_id: Uuid,
@ -105,6 +125,10 @@ pub async fn list_for_user(
} }
/// Update a project member's role with validation /// Update a project member's role with validation
///
/// # Errors
///
/// Returns error if validation fails or database update fails.
pub async fn update( pub async fn update(
pool: &PgPool, pool: &PgPool,
id: Uuid, id: Uuid,
@ -135,6 +159,10 @@ pub async fn update(
} }
/// Remove a member from a project /// Remove a member from a project
///
/// # Errors
///
/// Returns error if database delete fails or member not found.
pub async fn delete(pool: &PgPool, id: Uuid) -> Result<()> { pub async fn delete(pool: &PgPool, id: Uuid) -> Result<()> {
let result = sqlx::query("DELETE FROM project_members WHERE id = $1") let result = sqlx::query("DELETE FROM project_members WHERE id = $1")
.bind(id) .bind(id)
@ -147,6 +175,10 @@ pub async fn delete(pool: &PgPool, id: Uuid) -> Result<()> {
} }
/// Remove a specific user from a project /// Remove a specific user from a project
///
/// # Errors
///
/// Returns error if database delete fails or user not found.
pub async fn delete_by_project_and_user( pub async fn delete_by_project_and_user(
pool: &PgPool, pool: &PgPool,
project_id: Uuid, project_id: Uuid,
@ -168,6 +200,10 @@ pub async fn delete_by_project_and_user(
} }
/// Check if a user has a specific role or higher in a project /// Check if a user has a specific role or higher in a project
///
/// # Errors
///
/// Returns error if database query fails.
pub async fn check_permission( pub async fn check_permission(
pool: &PgPool, pool: &PgPool,
project_id: Uuid, project_id: Uuid,
@ -186,6 +222,10 @@ pub async fn check_permission(
} }
/// Upsert a project member (insert or update on conflict). /// Upsert a project member (insert or update on conflict).
///
/// # Errors
///
/// Returns error if validation fails or database operation fails.
pub async fn upsert( pub async fn upsert(
pool: &PgPool, pool: &PgPool,
project_id: Uuid, project_id: Uuid,
@ -211,6 +251,10 @@ pub async fn upsert(
/// Sync project members from declarative config. /// Sync project members from declarative config.
/// Deletes members not in the declarative list and upserts those that are. /// Deletes members not in the declarative list and upserts those that are.
///
/// # Errors
///
/// Returns error if database operations fail.
pub async fn sync_for_project( pub async fn sync_for_project(
pool: &PgPool, pool: &PgPool,
project_id: Uuid, project_id: Uuid,

View file

@ -6,6 +6,11 @@ use crate::{
models::{CreateProject, Project, UpdateProject}, models::{CreateProject, Project, UpdateProject},
}; };
/// Create a new project.
///
/// # Errors
///
/// Returns error if database insert fails or project name already exists.
pub async fn create(pool: &PgPool, input: CreateProject) -> Result<Project> { pub async fn create(pool: &PgPool, input: CreateProject) -> Result<Project> {
sqlx::query_as::<_, Project>( sqlx::query_as::<_, Project>(
"INSERT INTO projects (name, description, repository_url) VALUES ($1, $2, \ "INSERT INTO projects (name, description, repository_url) VALUES ($1, $2, \
@ -26,6 +31,11 @@ pub async fn create(pool: &PgPool, input: CreateProject) -> Result<Project> {
}) })
} }
/// Get a project by ID.
///
/// # Errors
///
/// Returns error if database query fails or project not found.
pub async fn get(pool: &PgPool, id: Uuid) -> Result<Project> { pub async fn get(pool: &PgPool, id: Uuid) -> Result<Project> {
sqlx::query_as::<_, Project>("SELECT * FROM projects WHERE id = $1") sqlx::query_as::<_, Project>("SELECT * FROM projects WHERE id = $1")
.bind(id) .bind(id)
@ -34,6 +44,11 @@ pub async fn get(pool: &PgPool, id: Uuid) -> Result<Project> {
.ok_or_else(|| CiError::NotFound(format!("Project {id} not found"))) .ok_or_else(|| CiError::NotFound(format!("Project {id} not found")))
} }
/// Get a project by name.
///
/// # Errors
///
/// Returns error if database query fails or project not found.
pub async fn get_by_name(pool: &PgPool, name: &str) -> Result<Project> { pub async fn get_by_name(pool: &PgPool, name: &str) -> Result<Project> {
sqlx::query_as::<_, Project>("SELECT * FROM projects WHERE name = $1") sqlx::query_as::<_, Project>("SELECT * FROM projects WHERE name = $1")
.bind(name) .bind(name)
@ -42,6 +57,11 @@ pub async fn get_by_name(pool: &PgPool, name: &str) -> Result<Project> {
.ok_or_else(|| CiError::NotFound(format!("Project '{name}' not found"))) .ok_or_else(|| CiError::NotFound(format!("Project '{name}' not found")))
} }
/// List projects with pagination.
///
/// # Errors
///
/// Returns error if database query fails.
pub async fn list( pub async fn list(
pool: &PgPool, pool: &PgPool,
limit: i64, limit: i64,
@ -57,6 +77,11 @@ pub async fn list(
.map_err(CiError::Database) .map_err(CiError::Database)
} }
/// Count total number of projects.
///
/// # Errors
///
/// Returns error if database query fails.
pub async fn count(pool: &PgPool) -> Result<i64> { pub async fn count(pool: &PgPool) -> Result<i64> {
let row: (i64,) = sqlx::query_as("SELECT COUNT(*) FROM projects") let row: (i64,) = sqlx::query_as("SELECT COUNT(*) FROM projects")
.fetch_one(pool) .fetch_one(pool)
@ -65,12 +90,17 @@ pub async fn count(pool: &PgPool) -> Result<i64> {
Ok(row.0) Ok(row.0)
} }
/// Update a project with partial fields.
///
/// # Errors
///
/// Returns error if database update fails or project not found.
pub async fn update( pub async fn update(
pool: &PgPool, pool: &PgPool,
id: Uuid, id: Uuid,
input: UpdateProject, input: UpdateProject,
) -> Result<Project> { ) -> Result<Project> {
// Build dynamic update — only set provided fields // Dynamic update - only set provided fields
let existing = get(pool, id).await?; let existing = get(pool, id).await?;
let name = input.name.unwrap_or(existing.name); let name = input.name.unwrap_or(existing.name);
@ -97,6 +127,11 @@ pub async fn update(
}) })
} }
/// Insert or update a project by name.
///
/// # Errors
///
/// Returns error if database operation fails.
pub async fn upsert(pool: &PgPool, input: CreateProject) -> Result<Project> { pub async fn upsert(pool: &PgPool, input: CreateProject) -> Result<Project> {
sqlx::query_as::<_, Project>( sqlx::query_as::<_, Project>(
"INSERT INTO projects (name, description, repository_url) VALUES ($1, $2, \ "INSERT INTO projects (name, description, repository_url) VALUES ($1, $2, \
@ -111,6 +146,11 @@ pub async fn upsert(pool: &PgPool, input: CreateProject) -> Result<Project> {
.map_err(CiError::Database) .map_err(CiError::Database)
} }
/// Delete a project by ID.
///
/// # Errors
///
/// Returns error if database delete fails or project not found.
pub async fn delete(pool: &PgPool, id: Uuid) -> Result<()> { pub async fn delete(pool: &PgPool, id: Uuid) -> Result<()> {
let result = sqlx::query("DELETE FROM projects WHERE id = $1") let result = sqlx::query("DELETE FROM projects WHERE id = $1")
.bind(id) .bind(id)

View file

@ -7,6 +7,11 @@ use crate::{
models::{CreateRemoteBuilder, RemoteBuilder}, models::{CreateRemoteBuilder, RemoteBuilder},
}; };
/// Create a new remote builder.
///
/// # Errors
///
/// Returns error if database insert fails or builder already exists.
pub async fn create( pub async fn create(
pool: &PgPool, pool: &PgPool,
input: CreateRemoteBuilder, input: CreateRemoteBuilder,
@ -40,6 +45,11 @@ pub async fn create(
}) })
} }
/// Get a remote builder by ID.
///
/// # Errors
///
/// Returns error if database query fails or builder not found.
pub async fn get(pool: &PgPool, id: Uuid) -> Result<RemoteBuilder> { pub async fn get(pool: &PgPool, id: Uuid) -> Result<RemoteBuilder> {
sqlx::query_as::<_, RemoteBuilder>( sqlx::query_as::<_, RemoteBuilder>(
"SELECT * FROM remote_builders WHERE id = $1", "SELECT * FROM remote_builders WHERE id = $1",
@ -50,6 +60,11 @@ pub async fn get(pool: &PgPool, id: Uuid) -> Result<RemoteBuilder> {
.ok_or_else(|| CiError::NotFound(format!("Remote builder {id} not found"))) .ok_or_else(|| CiError::NotFound(format!("Remote builder {id} not found")))
} }
/// List all remote builders.
///
/// # Errors
///
/// Returns error if database query fails.
pub async fn list(pool: &PgPool) -> Result<Vec<RemoteBuilder>> { pub async fn list(pool: &PgPool) -> Result<Vec<RemoteBuilder>> {
sqlx::query_as::<_, RemoteBuilder>( sqlx::query_as::<_, RemoteBuilder>(
"SELECT * FROM remote_builders ORDER BY speed_factor DESC, name", "SELECT * FROM remote_builders ORDER BY speed_factor DESC, name",
@ -59,6 +74,11 @@ pub async fn list(pool: &PgPool) -> Result<Vec<RemoteBuilder>> {
.map_err(CiError::Database) .map_err(CiError::Database)
} }
/// List all enabled remote builders.
///
/// # Errors
///
/// Returns error if database query fails.
pub async fn list_enabled(pool: &PgPool) -> Result<Vec<RemoteBuilder>> { pub async fn list_enabled(pool: &PgPool) -> Result<Vec<RemoteBuilder>> {
sqlx::query_as::<_, RemoteBuilder>( sqlx::query_as::<_, RemoteBuilder>(
"SELECT * FROM remote_builders WHERE enabled = true ORDER BY speed_factor \ "SELECT * FROM remote_builders WHERE enabled = true ORDER BY speed_factor \
@ -71,6 +91,10 @@ pub async fn list_enabled(pool: &PgPool) -> Result<Vec<RemoteBuilder>> {
/// Find a suitable builder for the given system. /// Find a suitable builder for the given system.
/// Excludes builders that are temporarily disabled due to consecutive failures. /// Excludes builders that are temporarily disabled due to consecutive failures.
///
/// # Errors
///
/// Returns error if database query fails.
pub async fn find_for_system( pub async fn find_for_system(
pool: &PgPool, pool: &PgPool,
system: &str, system: &str,
@ -87,9 +111,14 @@ pub async fn find_for_system(
} }
/// Record a build failure for a remote builder. /// Record a build failure for a remote builder.
/// Increments consecutive_failures (capped at 4), sets last_failure, ///
/// and computes disabled_until with exponential backoff. /// Increments `consecutive_failures` (capped at 4), sets `last_failure`,
/// and computes `disabled_until` with exponential backoff.
/// Backoff formula (from Hydra): delta = 60 * 3^(min(failures, 4) - 1) seconds. /// Backoff formula (from Hydra): delta = 60 * 3^(min(failures, 4) - 1) seconds.
///
/// # Errors
///
/// Returns error if database update fails or builder not found.
pub async fn record_failure(pool: &PgPool, id: Uuid) -> Result<RemoteBuilder> { pub async fn record_failure(pool: &PgPool, id: Uuid) -> Result<RemoteBuilder> {
sqlx::query_as::<_, RemoteBuilder>( sqlx::query_as::<_, RemoteBuilder>(
"UPDATE remote_builders SET consecutive_failures = \ "UPDATE remote_builders SET consecutive_failures = \
@ -105,7 +134,11 @@ pub async fn record_failure(pool: &PgPool, id: Uuid) -> Result<RemoteBuilder> {
} }
/// Record a build success for a remote builder. /// Record a build success for a remote builder.
/// Resets consecutive_failures and clears disabled_until. /// Resets `consecutive_failures` and clears `disabled_until`.
///
/// # Errors
///
/// Returns error if database update fails or builder not found.
pub async fn record_success(pool: &PgPool, id: Uuid) -> Result<RemoteBuilder> { pub async fn record_success(pool: &PgPool, id: Uuid) -> Result<RemoteBuilder> {
sqlx::query_as::<_, RemoteBuilder>( sqlx::query_as::<_, RemoteBuilder>(
"UPDATE remote_builders SET consecutive_failures = 0, disabled_until = \ "UPDATE remote_builders SET consecutive_failures = 0, disabled_until = \
@ -117,12 +150,17 @@ pub async fn record_success(pool: &PgPool, id: Uuid) -> Result<RemoteBuilder> {
.ok_or_else(|| CiError::NotFound(format!("Remote builder {id} not found"))) .ok_or_else(|| CiError::NotFound(format!("Remote builder {id} not found")))
} }
/// Update a remote builder with partial fields.
///
/// # Errors
///
/// Returns error if database update fails or builder not found.
pub async fn update( pub async fn update(
pool: &PgPool, pool: &PgPool,
id: Uuid, id: Uuid,
input: crate::models::UpdateRemoteBuilder, input: crate::models::UpdateRemoteBuilder,
) -> Result<RemoteBuilder> { ) -> Result<RemoteBuilder> {
// Build dynamic update — use COALESCE pattern // Dynamic update using COALESCE pattern
sqlx::query_as::<_, RemoteBuilder>( sqlx::query_as::<_, RemoteBuilder>(
"UPDATE remote_builders SET name = COALESCE($1, name), ssh_uri = \ "UPDATE remote_builders SET name = COALESCE($1, name), ssh_uri = \
COALESCE($2, ssh_uri), systems = COALESCE($3, systems), max_jobs = \ COALESCE($2, ssh_uri), systems = COALESCE($3, systems), max_jobs = \
@ -148,6 +186,11 @@ pub async fn update(
.ok_or_else(|| CiError::NotFound(format!("Remote builder {id} not found"))) .ok_or_else(|| CiError::NotFound(format!("Remote builder {id} not found")))
} }
/// Delete a remote builder.
///
/// # Errors
///
/// Returns error if database delete fails or builder not found.
pub async fn delete(pool: &PgPool, id: Uuid) -> Result<()> { pub async fn delete(pool: &PgPool, id: Uuid) -> Result<()> {
let result = sqlx::query("DELETE FROM remote_builders WHERE id = $1") let result = sqlx::query("DELETE FROM remote_builders WHERE id = $1")
.bind(id) .bind(id)
@ -160,6 +203,11 @@ pub async fn delete(pool: &PgPool, id: Uuid) -> Result<()> {
Ok(()) Ok(())
} }
/// Count total remote builders.
///
/// # Errors
///
/// Returns error if database query fails.
pub async fn count(pool: &PgPool) -> Result<i64> { pub async fn count(pool: &PgPool) -> Result<i64> {
let row: (i64,) = sqlx::query_as("SELECT COUNT(*) FROM remote_builders") let row: (i64,) = sqlx::query_as("SELECT COUNT(*) FROM remote_builders")
.fetch_one(pool) .fetch_one(pool)
@ -169,18 +217,13 @@ pub async fn count(pool: &PgPool) -> Result<i64> {
} }
/// Upsert a remote builder (insert or update on conflict by name). /// Upsert a remote builder (insert or update on conflict by name).
///
/// # Errors
///
/// Returns error if database operation fails.
pub async fn upsert( pub async fn upsert(
pool: &PgPool, pool: &PgPool,
name: &str, params: &crate::models::RemoteBuilderParams<'_>,
ssh_uri: &str,
systems: &[String],
max_jobs: i32,
speed_factor: i32,
supported_features: &[String],
mandatory_features: &[String],
enabled: bool,
public_host_key: Option<&str>,
ssh_key_file: Option<&str>,
) -> Result<RemoteBuilder> { ) -> Result<RemoteBuilder> {
sqlx::query_as::<_, RemoteBuilder>( sqlx::query_as::<_, RemoteBuilder>(
"INSERT INTO remote_builders (name, ssh_uri, systems, max_jobs, \ "INSERT INTO remote_builders (name, ssh_uri, systems, max_jobs, \
@ -194,16 +237,16 @@ pub async fn upsert(
remote_builders.public_host_key), ssh_key_file = \ remote_builders.public_host_key), ssh_key_file = \
COALESCE(EXCLUDED.ssh_key_file, remote_builders.ssh_key_file) RETURNING *", COALESCE(EXCLUDED.ssh_key_file, remote_builders.ssh_key_file) RETURNING *",
) )
.bind(name) .bind(params.name)
.bind(ssh_uri) .bind(params.ssh_uri)
.bind(systems) .bind(params.systems)
.bind(max_jobs) .bind(params.max_jobs)
.bind(speed_factor) .bind(params.speed_factor)
.bind(supported_features) .bind(params.supported_features)
.bind(mandatory_features) .bind(params.mandatory_features)
.bind(enabled) .bind(params.enabled)
.bind(public_host_key) .bind(params.public_host_key)
.bind(ssh_key_file) .bind(params.ssh_key_file)
.fetch_one(pool) .fetch_one(pool)
.await .await
.map_err(CiError::Database) .map_err(CiError::Database)
@ -211,6 +254,10 @@ pub async fn upsert(
/// Sync remote builders from declarative config. /// Sync remote builders from declarative config.
/// Deletes builders not in the declarative list and upserts those that are. /// Deletes builders not in the declarative list and upserts those that are.
///
/// # Errors
///
/// Returns error if database operations fail.
pub async fn sync_all( pub async fn sync_all(
pool: &PgPool, pool: &PgPool,
builders: &[DeclarativeRemoteBuilder], builders: &[DeclarativeRemoteBuilder],
@ -227,20 +274,19 @@ pub async fn sync_all(
// Upsert each builder // Upsert each builder
for builder in builders { for builder in builders {
upsert( let params = crate::models::RemoteBuilderParams {
pool, name: &builder.name,
&builder.name, ssh_uri: &builder.ssh_uri,
&builder.ssh_uri, systems: &builder.systems,
&builder.systems, max_jobs: builder.max_jobs,
builder.max_jobs, speed_factor: builder.speed_factor,
builder.speed_factor, supported_features: &builder.supported_features,
&builder.supported_features, mandatory_features: &builder.mandatory_features,
&builder.mandatory_features, enabled: builder.enabled,
builder.enabled, public_host_key: builder.public_host_key.as_deref(),
builder.public_host_key.as_deref(), ssh_key_file: builder.ssh_key_file.as_deref(),
builder.ssh_key_file.as_deref(), };
) upsert(pool, &params).await?;
.await?;
} }
Ok(()) Ok(())

View file

@ -146,6 +146,10 @@ pub struct SearchResults {
} }
/// Execute a comprehensive search across all entities /// Execute a comprehensive search across all entities
///
/// # Errors
///
/// Returns error if database query fails.
pub async fn search( pub async fn search(
pool: &PgPool, pool: &PgPool,
params: &SearchParams, params: &SearchParams,
@ -511,6 +515,10 @@ async fn search_builds(
} }
/// Quick search - simple text search across entities /// Quick search - simple text search across entities
///
/// # Errors
///
/// Returns error if database query fails.
pub async fn quick_search( pub async fn quick_search(
pool: &PgPool, pool: &PgPool,
query: &str, query: &str,

View file

@ -9,6 +9,10 @@ use crate::{
}; };
/// Create a new starred job /// Create a new starred job
///
/// # Errors
///
/// Returns error if database insert fails or job already starred.
pub async fn create( pub async fn create(
pool: &PgPool, pool: &PgPool,
user_id: Uuid, user_id: Uuid,
@ -35,6 +39,10 @@ pub async fn create(
} }
/// Get a starred job by ID /// Get a starred job by ID
///
/// # Errors
///
/// Returns error if database query fails or starred job not found.
pub async fn get(pool: &PgPool, id: Uuid) -> Result<StarredJob> { pub async fn get(pool: &PgPool, id: Uuid) -> Result<StarredJob> {
sqlx::query_as::<_, StarredJob>("SELECT * FROM starred_jobs WHERE id = $1") sqlx::query_as::<_, StarredJob>("SELECT * FROM starred_jobs WHERE id = $1")
.bind(id) .bind(id)
@ -51,6 +59,10 @@ pub async fn get(pool: &PgPool, id: Uuid) -> Result<StarredJob> {
} }
/// List starred jobs for a user with pagination /// List starred jobs for a user with pagination
///
/// # Errors
///
/// Returns error if database query fails.
pub async fn list_for_user( pub async fn list_for_user(
pool: &PgPool, pool: &PgPool,
user_id: Uuid, user_id: Uuid,
@ -70,6 +82,10 @@ pub async fn list_for_user(
} }
/// Count starred jobs for a user /// Count starred jobs for a user
///
/// # Errors
///
/// Returns error if database query fails.
pub async fn count_for_user(pool: &PgPool, user_id: Uuid) -> Result<i64> { pub async fn count_for_user(pool: &PgPool, user_id: Uuid) -> Result<i64> {
let (count,): (i64,) = let (count,): (i64,) =
sqlx::query_as("SELECT COUNT(*) FROM starred_jobs WHERE user_id = $1") sqlx::query_as("SELECT COUNT(*) FROM starred_jobs WHERE user_id = $1")
@ -80,6 +96,10 @@ pub async fn count_for_user(pool: &PgPool, user_id: Uuid) -> Result<i64> {
} }
/// Check if a user has starred a specific job /// Check if a user has starred a specific job
///
/// # Errors
///
/// Returns error if database query fails.
pub async fn is_starred( pub async fn is_starred(
pool: &PgPool, pool: &PgPool,
user_id: Uuid, user_id: Uuid,
@ -101,6 +121,10 @@ pub async fn is_starred(
} }
/// Delete a starred job /// Delete a starred job
///
/// # Errors
///
/// Returns error if database delete fails or starred job not found.
pub async fn delete(pool: &PgPool, id: Uuid) -> Result<()> { pub async fn delete(pool: &PgPool, id: Uuid) -> Result<()> {
let result = sqlx::query("DELETE FROM starred_jobs WHERE id = $1") let result = sqlx::query("DELETE FROM starred_jobs WHERE id = $1")
.bind(id) .bind(id)
@ -113,6 +137,10 @@ pub async fn delete(pool: &PgPool, id: Uuid) -> Result<()> {
} }
/// Delete a starred job by user and job details /// Delete a starred job by user and job details
///
/// # Errors
///
/// Returns error if database delete fails or starred job not found.
pub async fn delete_by_job( pub async fn delete_by_job(
pool: &PgPool, pool: &PgPool,
user_id: Uuid, user_id: Uuid,
@ -137,6 +165,10 @@ pub async fn delete_by_job(
} }
/// Delete all starred jobs for a user (when user is deleted) /// Delete all starred jobs for a user (when user is deleted)
///
/// # Errors
///
/// Returns error if database delete fails.
pub async fn delete_all_for_user(pool: &PgPool, user_id: Uuid) -> Result<()> { pub async fn delete_all_for_user(pool: &PgPool, user_id: Uuid) -> Result<()> {
sqlx::query("DELETE FROM starred_jobs WHERE user_id = $1") sqlx::query("DELETE FROM starred_jobs WHERE user_id = $1")
.bind(user_id) .bind(user_id)

View file

@ -17,6 +17,10 @@ use crate::{
}; };
/// Hash a password using argon2id /// Hash a password using argon2id
///
/// # Errors
///
/// Returns error if password hashing fails.
pub fn hash_password(password: &str) -> Result<String> { pub fn hash_password(password: &str) -> Result<String> {
use argon2::{ use argon2::{
Argon2, Argon2,
@ -33,6 +37,10 @@ pub fn hash_password(password: &str) -> Result<String> {
} }
/// Verify a password against a hash /// Verify a password against a hash
///
/// # Errors
///
/// Returns error if password hash parsing fails.
pub fn verify_password(password: &str, hash: &str) -> Result<bool> { pub fn verify_password(password: &str, hash: &str) -> Result<bool> {
use argon2::{Argon2, PasswordHash, PasswordVerifier}; use argon2::{Argon2, PasswordHash, PasswordVerifier};
@ -47,6 +55,10 @@ pub fn verify_password(password: &str, hash: &str) -> Result<bool> {
} }
/// Create a new user with validation /// Create a new user with validation
///
/// # Errors
///
/// Returns error if validation fails or database insert fails.
pub async fn create(pool: &PgPool, data: &CreateUser) -> Result<User> { pub async fn create(pool: &PgPool, data: &CreateUser) -> Result<User> {
// Validate username // Validate username
validate_username(&data.username) validate_username(&data.username)
@ -94,6 +106,10 @@ pub async fn create(pool: &PgPool, data: &CreateUser) -> Result<User> {
} }
/// Authenticate a user with username and password /// Authenticate a user with username and password
///
/// # Errors
///
/// Returns error if credentials are invalid or database query fails.
pub async fn authenticate( pub async fn authenticate(
pool: &PgPool, pool: &PgPool,
creds: &LoginCredentials, creds: &LoginCredentials,
@ -129,6 +145,10 @@ pub async fn authenticate(
} }
/// Get a user by ID /// Get a user by ID
///
/// # Errors
///
/// Returns error if database query fails or user not found.
pub async fn get(pool: &PgPool, id: Uuid) -> Result<User> { pub async fn get(pool: &PgPool, id: Uuid) -> Result<User> {
sqlx::query_as::<_, User>("SELECT * FROM users WHERE id = $1") sqlx::query_as::<_, User>("SELECT * FROM users WHERE id = $1")
.bind(id) .bind(id)
@ -145,6 +165,10 @@ pub async fn get(pool: &PgPool, id: Uuid) -> Result<User> {
} }
/// Get a user by username /// Get a user by username
///
/// # Errors
///
/// Returns error if database query fails.
pub async fn get_by_username( pub async fn get_by_username(
pool: &PgPool, pool: &PgPool,
username: &str, username: &str,
@ -157,6 +181,10 @@ pub async fn get_by_username(
} }
/// Get a user by email /// Get a user by email
///
/// # Errors
///
/// Returns error if database query fails.
pub async fn get_by_email(pool: &PgPool, email: &str) -> Result<Option<User>> { pub async fn get_by_email(pool: &PgPool, email: &str) -> Result<Option<User>> {
sqlx::query_as::<_, User>("SELECT * FROM users WHERE email = $1") sqlx::query_as::<_, User>("SELECT * FROM users WHERE email = $1")
.bind(email) .bind(email)
@ -166,6 +194,10 @@ pub async fn get_by_email(pool: &PgPool, email: &str) -> Result<Option<User>> {
} }
/// List all users with pagination /// List all users with pagination
///
/// # Errors
///
/// Returns error if database query fails.
pub async fn list(pool: &PgPool, limit: i64, offset: i64) -> Result<Vec<User>> { pub async fn list(pool: &PgPool, limit: i64, offset: i64) -> Result<Vec<User>> {
sqlx::query_as::<_, User>( sqlx::query_as::<_, User>(
"SELECT * FROM users ORDER BY created_at DESC LIMIT $1 OFFSET $2", "SELECT * FROM users ORDER BY created_at DESC LIMIT $1 OFFSET $2",
@ -178,6 +210,10 @@ pub async fn list(pool: &PgPool, limit: i64, offset: i64) -> Result<Vec<User>> {
} }
/// Count total users /// Count total users
///
/// # Errors
///
/// Returns error if database query fails.
pub async fn count(pool: &PgPool) -> Result<i64> { pub async fn count(pool: &PgPool) -> Result<i64> {
let (count,): (i64,) = sqlx::query_as("SELECT COUNT(*) FROM users") let (count,): (i64,) = sqlx::query_as("SELECT COUNT(*) FROM users")
.fetch_one(pool) .fetch_one(pool)
@ -186,6 +222,10 @@ pub async fn count(pool: &PgPool) -> Result<i64> {
} }
/// Update a user with the provided data /// Update a user with the provided data
///
/// # Errors
///
/// Returns error if validation fails or database update fails.
pub async fn update( pub async fn update(
pool: &PgPool, pool: &PgPool,
id: Uuid, id: Uuid,
@ -220,6 +260,10 @@ pub async fn update(
} }
/// Update user email with validation /// Update user email with validation
///
/// # Errors
///
/// Returns error if validation fails or database update fails.
pub async fn update_email( pub async fn update_email(
pool: &PgPool, pool: &PgPool,
id: Uuid, id: Uuid,
@ -245,6 +289,10 @@ pub async fn update_email(
} }
/// Update user full name with validation /// Update user full name with validation
///
/// # Errors
///
/// Returns error if validation fails or database update fails.
pub async fn update_full_name( pub async fn update_full_name(
pool: &PgPool, pool: &PgPool,
id: Uuid, id: Uuid,
@ -263,6 +311,10 @@ pub async fn update_full_name(
} }
/// Update user password with validation /// Update user password with validation
///
/// # Errors
///
/// Returns error if validation fails or database update fails.
pub async fn update_password( pub async fn update_password(
pool: &PgPool, pool: &PgPool,
id: Uuid, id: Uuid,
@ -281,6 +333,10 @@ pub async fn update_password(
} }
/// Update user role with validation /// Update user role with validation
///
/// # Errors
///
/// Returns error if validation fails or database update fails.
pub async fn update_role(pool: &PgPool, id: Uuid, role: &str) -> Result<()> { pub async fn update_role(pool: &PgPool, id: Uuid, role: &str) -> Result<()> {
validate_role(role, VALID_ROLES) validate_role(role, VALID_ROLES)
.map_err(|e| CiError::Validation(e.to_string()))?; .map_err(|e| CiError::Validation(e.to_string()))?;
@ -294,6 +350,10 @@ pub async fn update_role(pool: &PgPool, id: Uuid, role: &str) -> Result<()> {
} }
/// Enable/disable user /// Enable/disable user
///
/// # Errors
///
/// Returns error if database update fails.
pub async fn set_enabled(pool: &PgPool, id: Uuid, enabled: bool) -> Result<()> { pub async fn set_enabled(pool: &PgPool, id: Uuid, enabled: bool) -> Result<()> {
sqlx::query("UPDATE users SET enabled = $1 WHERE id = $2") sqlx::query("UPDATE users SET enabled = $1 WHERE id = $2")
.bind(enabled) .bind(enabled)
@ -304,6 +364,10 @@ pub async fn set_enabled(pool: &PgPool, id: Uuid, enabled: bool) -> Result<()> {
} }
/// Set public dashboard preference /// Set public dashboard preference
///
/// # Errors
///
/// Returns error if database update fails.
pub async fn set_public_dashboard( pub async fn set_public_dashboard(
pool: &PgPool, pool: &PgPool,
id: Uuid, id: Uuid,
@ -318,6 +382,10 @@ pub async fn set_public_dashboard(
} }
/// Delete a user /// Delete a user
///
/// # Errors
///
/// Returns error if database delete fails or user not found.
pub async fn delete(pool: &PgPool, id: Uuid) -> Result<()> { pub async fn delete(pool: &PgPool, id: Uuid) -> Result<()> {
let result = sqlx::query("DELETE FROM users WHERE id = $1") let result = sqlx::query("DELETE FROM users WHERE id = $1")
.bind(id) .bind(id)
@ -330,6 +398,10 @@ pub async fn delete(pool: &PgPool, id: Uuid) -> Result<()> {
} }
/// Create or update OAuth user /// Create or update OAuth user
///
/// # Errors
///
/// Returns error if validation fails or database operation fails.
pub async fn upsert_oauth_user( pub async fn upsert_oauth_user(
pool: &PgPool, pool: &PgPool,
username: &str, username: &str,
@ -399,6 +471,10 @@ pub async fn upsert_oauth_user(
} }
/// Create a new session for a user. Returns (`session_token`, `session_id`). /// Create a new session for a user. Returns (`session_token`, `session_id`).
///
/// # Errors
///
/// Returns error if database insert fails.
pub async fn create_session( pub async fn create_session(
pool: &PgPool, pool: &PgPool,
user_id: Uuid, user_id: Uuid,
@ -427,6 +503,10 @@ pub async fn create_session(
} }
/// Validate a session token and return the associated user if valid. /// Validate a session token and return the associated user if valid.
///
/// # Errors
///
/// Returns error if database query fails.
pub async fn validate_session( pub async fn validate_session(
pool: &PgPool, pool: &PgPool,
token: &str, token: &str,
@ -444,8 +524,8 @@ pub async fn validate_session(
.await?; .await?;
// Update last_used_at // Update last_used_at
if result.is_some() { if result.is_some()
if let Err(e) = sqlx::query( && let Err(e) = sqlx::query(
"UPDATE user_sessions SET last_used_at = NOW() WHERE session_token_hash \ "UPDATE user_sessions SET last_used_at = NOW() WHERE session_token_hash \
= $1", = $1",
) )
@ -455,7 +535,6 @@ pub async fn validate_session(
{ {
tracing::warn!(token_hash = %token_hash, "Failed to update session last_used_at: {e}"); tracing::warn!(token_hash = %token_hash, "Failed to update session last_used_at: {e}");
} }
}
Ok(result) Ok(result)
} }

View file

@ -7,6 +7,11 @@ use crate::{
models::{CreateWebhookConfig, WebhookConfig}, models::{CreateWebhookConfig, WebhookConfig},
}; };
/// Create a new webhook config.
///
/// # Errors
///
/// Returns error if database insert fails or config already exists.
pub async fn create( pub async fn create(
pool: &PgPool, pool: &PgPool,
input: CreateWebhookConfig, input: CreateWebhookConfig,
@ -34,6 +39,11 @@ pub async fn create(
}) })
} }
/// Get a webhook config by ID.
///
/// # Errors
///
/// Returns error if database query fails or config not found.
pub async fn get(pool: &PgPool, id: Uuid) -> Result<WebhookConfig> { pub async fn get(pool: &PgPool, id: Uuid) -> Result<WebhookConfig> {
sqlx::query_as::<_, WebhookConfig>( sqlx::query_as::<_, WebhookConfig>(
"SELECT * FROM webhook_configs WHERE id = $1", "SELECT * FROM webhook_configs WHERE id = $1",
@ -44,6 +54,11 @@ pub async fn get(pool: &PgPool, id: Uuid) -> Result<WebhookConfig> {
.ok_or_else(|| CiError::NotFound(format!("Webhook config {id} not found"))) .ok_or_else(|| CiError::NotFound(format!("Webhook config {id} not found")))
} }
/// List all webhook configs for a project.
///
/// # Errors
///
/// Returns error if database query fails.
pub async fn list_for_project( pub async fn list_for_project(
pool: &PgPool, pool: &PgPool,
project_id: Uuid, project_id: Uuid,
@ -58,6 +73,11 @@ pub async fn list_for_project(
.map_err(CiError::Database) .map_err(CiError::Database)
} }
/// Get a webhook config by project and forge type.
///
/// # Errors
///
/// Returns error if database query fails.
pub async fn get_by_project_and_forge( pub async fn get_by_project_and_forge(
pool: &PgPool, pool: &PgPool,
project_id: Uuid, project_id: Uuid,
@ -74,6 +94,11 @@ pub async fn get_by_project_and_forge(
.map_err(CiError::Database) .map_err(CiError::Database)
} }
/// Delete a webhook config.
///
/// # Errors
///
/// Returns error if database delete fails or config not found.
pub async fn delete(pool: &PgPool, id: Uuid) -> Result<()> { pub async fn delete(pool: &PgPool, id: Uuid) -> Result<()> {
let result = sqlx::query("DELETE FROM webhook_configs WHERE id = $1") let result = sqlx::query("DELETE FROM webhook_configs WHERE id = $1")
.bind(id) .bind(id)
@ -86,6 +111,10 @@ pub async fn delete(pool: &PgPool, id: Uuid) -> Result<()> {
} }
/// Upsert a webhook config (insert or update on conflict). /// Upsert a webhook config (insert or update on conflict).
///
/// # Errors
///
/// Returns error if database operation fails.
pub async fn upsert( pub async fn upsert(
pool: &PgPool, pool: &PgPool,
project_id: Uuid, project_id: Uuid,
@ -110,6 +139,10 @@ pub async fn upsert(
/// Sync webhook configs from declarative config. /// Sync webhook configs from declarative config.
/// Deletes configs not in the declarative list and upserts those that are. /// Deletes configs not in the declarative list and upserts those that are.
///
/// # Errors
///
/// Returns error if database operations fail.
pub async fn sync_for_project( pub async fn sync_for_project(
pool: &PgPool, pool: &PgPool,
project_id: Uuid, project_id: Uuid,

View file

@ -82,15 +82,13 @@ fn is_internal_host(host: &str) -> bool {
return true; return true;
} }
// Block 172.16-31.x.x // Block 172.16-31.x.x
if host.starts_with("172.") { if host.starts_with("172.")
if let Some(second_octet) = host.split('.').nth(1) { && let Some(second_octet) = host.split('.').nth(1)
if let Ok(n) = second_octet.parse::<u8>() { && let Ok(n) = second_octet.parse::<u8>()
if (16..=31).contains(&n) { && (16..=31).contains(&n)
{
return true; return true;
} }
}
}
}
// Block 192.168.x.x // Block 192.168.x.x
if host.starts_with("192.168.") { if host.starts_with("192.168.") {
return true; return true;
@ -100,6 +98,11 @@ fn is_internal_host(host: &str) -> bool {
/// Trait for validating request DTOs before persisting. /// Trait for validating request DTOs before persisting.
pub trait Validate { pub trait Validate {
/// Validate the DTO.
///
/// # Errors
///
/// Returns error if validation fails.
fn validate(&self) -> Result<(), String>; fn validate(&self) -> Result<(), String>;
} }
@ -129,19 +132,23 @@ fn validate_repository_url(url: &str) -> Result<(), String> {
); );
} }
// Reject URLs targeting common internal/metadata endpoints // Reject URLs targeting common internal/metadata endpoints
if let Some(host) = extract_host_from_url(url) { if let Some(host) = extract_host_from_url(url)
if is_internal_host(&host) { && is_internal_host(&host)
{
return Err( return Err(
"repository_url must not target internal or metadata addresses" "repository_url must not target internal or metadata addresses"
.to_string(), .to_string(),
); );
} }
}
Ok(()) Ok(())
} }
/// Validate that a URL uses one of the allowed schemes. /// Validate that a URL uses one of the allowed schemes.
/// Logs a warning when insecure schemes (`file`, `http`) are used. /// Logs a warning when insecure schemes (`file`, `http`) are used.
///
/// # Errors
///
/// Returns error if URL scheme is not in the allowed list.
pub fn validate_url_scheme( pub fn validate_url_scheme(
url: &str, url: &str,
allowed_schemes: &[String], allowed_schemes: &[String],
@ -187,6 +194,11 @@ fn validate_description(desc: &str) -> Result<(), String> {
Ok(()) Ok(())
} }
/// Validate nix expression format.
///
/// # Errors
///
/// Returns error if expression contains invalid characters or path traversal.
pub fn validate_nix_expression(expr: &str) -> Result<(), String> { pub fn validate_nix_expression(expr: &str) -> Result<(), String> {
if expr.is_empty() { if expr.is_empty() {
return Err("nix_expression cannot be empty".to_string()); return Err("nix_expression cannot be empty".to_string());
@ -465,7 +477,7 @@ mod tests {
#[test] #[test]
fn store_path_rejects_just_prefix() { fn store_path_rejects_just_prefix() {
// "/nix/store/" alone has no hash, but structurally starts_with and has no // "/nix/store/" alone has no hash, but structurally starts_with and has no
// .., so it passes. This is fine the DB lookup won't find anything // .., so it passes. This is fine - the DB lookup won't find anything
// for it. // for it.
assert!(is_valid_store_path("/nix/store/")); assert!(is_valid_store_path("/nix/store/"));
} }
@ -554,7 +566,7 @@ mod tests {
#[test] #[test]
fn test_create_project_invalid_name() { fn test_create_project_invalid_name() {
let p = CreateProject { let p = CreateProject {
name: "".to_string(), name: String::new(),
description: None, description: None,
repository_url: "https://github.com/test/repo".to_string(), repository_url: "https://github.com/test/repo".to_string(),
}; };

View file

@ -34,6 +34,10 @@ impl std::error::Error for ValidationError {}
/// Requirements: /// Requirements:
/// - 3-32 characters /// - 3-32 characters
/// - Alphanumeric, underscore, hyphen only /// - Alphanumeric, underscore, hyphen only
///
/// # Errors
///
/// Returns error if username format is invalid.
pub fn validate_username(username: &str) -> Result<(), ValidationError> { pub fn validate_username(username: &str) -> Result<(), ValidationError> {
if username.is_empty() { if username.is_empty() {
return Err(ValidationError { return Err(ValidationError {
@ -55,6 +59,10 @@ pub fn validate_username(username: &str) -> Result<(), ValidationError> {
} }
/// Validate email format /// Validate email format
///
/// # Errors
///
/// Returns error if email format is invalid.
pub fn validate_email(email: &str) -> Result<(), ValidationError> { pub fn validate_email(email: &str) -> Result<(), ValidationError> {
if email.is_empty() { if email.is_empty() {
return Err(ValidationError { return Err(ValidationError {
@ -80,6 +88,10 @@ pub fn validate_email(email: &str) -> Result<(), ValidationError> {
/// - At least one lowercase letter /// - At least one lowercase letter
/// - At least one number /// - At least one number
/// - At least one special character /// - At least one special character
///
/// # Errors
///
/// Returns error if password does not meet requirements.
pub fn validate_password(password: &str) -> Result<(), ValidationError> { pub fn validate_password(password: &str) -> Result<(), ValidationError> {
if password.len() < 12 { if password.len() < 12 {
return Err(ValidationError { return Err(ValidationError {
@ -128,6 +140,10 @@ pub fn validate_password(password: &str) -> Result<(), ValidationError> {
} }
/// Validate role against allowed roles /// Validate role against allowed roles
///
/// # Errors
///
/// Returns error if role is not in the allowed list.
pub fn validate_role( pub fn validate_role(
role: &str, role: &str,
allowed: &[&str], allowed: &[&str],
@ -152,6 +168,10 @@ pub fn validate_role(
/// Validate full name (optional field) /// Validate full name (optional field)
/// - Max 255 characters /// - Max 255 characters
/// - Must not contain control characters /// - Must not contain control characters
///
/// # Errors
///
/// Returns error if full name contains invalid characters or is too long.
pub fn validate_full_name(name: &str) -> Result<(), ValidationError> { pub fn validate_full_name(name: &str) -> Result<(), ValidationError> {
if name.len() > 255 { if name.len() > 255 {
return Err(ValidationError { return Err(ValidationError {
@ -174,6 +194,10 @@ pub fn validate_full_name(name: &str) -> Result<(), ValidationError> {
/// Requirements: /// Requirements:
/// - 1-255 characters /// - 1-255 characters
/// - Alphanumeric + common path characters /// - Alphanumeric + common path characters
///
/// # Errors
///
/// Returns error if job name format is invalid.
pub fn validate_job_name(name: &str) -> Result<(), ValidationError> { pub fn validate_job_name(name: &str) -> Result<(), ValidationError> {
if name.is_empty() { if name.is_empty() {
return Err(ValidationError { return Err(ValidationError {

View file

@ -21,8 +21,7 @@ async fn test_database_connection() -> anyhow::Result<()> {
Err(e) => { Err(e) => {
println!( println!(
"Skipping test_database_connection: no PostgreSQL instance available \ "Skipping test_database_connection: no PostgreSQL instance available \
- {}", - {e}"
e
); );
return Ok(()); return Ok(());
}, },
@ -38,7 +37,7 @@ async fn test_database_connection() -> anyhow::Result<()> {
assert!(!info.version.is_empty()); assert!(!info.version.is_empty());
// Test pool stats // Test pool stats
let stats = db.get_pool_stats().await; let stats = db.get_pool_stats();
assert!(stats.size >= 1); assert!(stats.size >= 1);
db.close().await; db.close().await;
@ -58,8 +57,7 @@ async fn test_database_health_check() -> anyhow::Result<()> {
Err(e) => { Err(e) => {
println!( println!(
"Skipping test_database_health_check: no PostgreSQL instance \ "Skipping test_database_health_check: no PostgreSQL instance \
available - {}", available - {e}"
e
); );
return Ok(()); return Ok(());
}, },
@ -83,8 +81,7 @@ async fn test_connection_info() -> anyhow::Result<()> {
Ok(pool) => pool, Ok(pool) => pool,
Err(e) => { Err(e) => {
println!( println!(
"Skipping test_connection_info: no PostgreSQL instance available - {}", "Skipping test_connection_info: no PostgreSQL instance available - {e}"
e
); );
return Ok(()); return Ok(());
}, },
@ -104,8 +101,7 @@ async fn test_connection_info() -> anyhow::Result<()> {
Ok(db) => db, Ok(db) => db,
Err(e) => { Err(e) => {
println!( println!(
"Skipping test_connection_info: database connection failed - {}", "Skipping test_connection_info: database connection failed - {e}"
e
); );
pool.close().await; pool.close().await;
return Ok(()); return Ok(());
@ -141,14 +137,13 @@ async fn test_pool_stats() -> anyhow::Result<()> {
Ok(db) => db, Ok(db) => db,
Err(e) => { Err(e) => {
println!( println!(
"Skipping test_pool_stats: no PostgreSQL instance available - {}", "Skipping test_pool_stats: no PostgreSQL instance available - {e}"
e
); );
return Ok(()); return Ok(());
}, },
}; };
let stats = db.get_pool_stats().await; let stats = db.get_pool_stats();
assert!(stats.size >= 1); assert!(stats.size >= 1);
assert!(stats.idle >= 1); assert!(stats.idle >= 1);
@ -173,12 +168,12 @@ async fn test_database_config_validation() -> anyhow::Result<()> {
assert!(config.validate().is_ok()); assert!(config.validate().is_ok());
// Invalid URL // Invalid URL
let mut config = config.clone(); let mut config = config;
config.url = "invalid://url".to_string(); config.url = "invalid://url".to_string();
assert!(config.validate().is_err()); assert!(config.validate().is_err());
// Empty URL // Empty URL
config.url = "".to_string(); config.url = String::new();
assert!(config.validate().is_err()); assert!(config.validate().is_err());
// Zero max connections // Zero max connections

View file

@ -20,12 +20,9 @@ async fn test_database_connection_full() -> anyhow::Result<()> {
}; };
// Try to connect, skip test if database is not available // Try to connect, skip test if database is not available
let db = match Database::new(config).await { let Ok(db) = Database::new(config).await else {
Ok(db) => db,
Err(_) => {
println!("Skipping database test: no PostgreSQL instance available"); println!("Skipping database test: no PostgreSQL instance available");
return Ok(()); return Ok(());
},
}; };
// Test health check // Test health check
@ -38,7 +35,7 @@ async fn test_database_connection_full() -> anyhow::Result<()> {
assert!(!info.version.is_empty()); assert!(!info.version.is_empty());
// Test pool stats // Test pool stats
let stats = db.get_pool_stats().await; let stats = db.get_pool_stats();
assert!(stats.size >= 1); assert!(stats.size >= 1);
assert!(stats.idle >= 1); assert!(stats.idle >= 1);
assert_eq!(stats.size, stats.idle + stats.active); assert_eq!(stats.size, stats.idle + stats.active);
@ -67,21 +64,21 @@ fn test_config_loading() -> anyhow::Result<()> {
#[test] #[test]
fn test_config_validation() -> anyhow::Result<()> { fn test_config_validation() -> anyhow::Result<()> {
// Test valid config // Test valid config
let config = Config::default(); let base_config = Config::default();
assert!(config.validate().is_ok()); assert!(base_config.validate().is_ok());
// Test invalid database URL // Test invalid database URL
let mut config = config.clone(); let mut config = base_config.clone();
config.database.url = "invalid://url".to_string(); config.database.url = "invalid://url".to_string();
assert!(config.validate().is_err()); assert!(config.validate().is_err());
// Test invalid port // Test invalid port
let mut config = config.clone(); let mut config = base_config.clone();
config.server.port = 0; config.server.port = 0;
assert!(config.validate().is_err()); assert!(config.validate().is_err());
// Test invalid connections // Test invalid connections
let mut config = config.clone(); let mut config = base_config.clone();
config.database.max_connections = 0; config.database.max_connections = 0;
assert!(config.validate().is_err()); assert!(config.validate().is_err());
@ -90,12 +87,12 @@ fn test_config_validation() -> anyhow::Result<()> {
assert!(config.validate().is_err()); assert!(config.validate().is_err());
// Test invalid evaluator settings // Test invalid evaluator settings
let mut config = config.clone(); let mut config = base_config.clone();
config.evaluator.poll_interval = 0; config.evaluator.poll_interval = 0;
assert!(config.validate().is_err()); assert!(config.validate().is_err());
// Test invalid queue runner settings // Test invalid queue runner settings
let mut config = config.clone(); let mut config = base_config;
config.queue_runner.workers = 0; config.queue_runner.workers = 0;
assert!(config.validate().is_err()); assert!(config.validate().is_err());
@ -109,12 +106,12 @@ fn test_database_config_validation() -> anyhow::Result<()> {
assert!(config.validate().is_ok()); assert!(config.validate().is_ok());
// Test invalid URL // Test invalid URL
let mut config = config.clone(); let mut config = config;
config.url = "invalid://url".to_string(); config.url = "invalid://url".to_string();
assert!(config.validate().is_err()); assert!(config.validate().is_err());
// Test empty URL // Test empty URL
config.url = "".to_string(); config.url = String::new();
assert!(config.validate().is_err()); assert!(config.validate().is_err());
// Test zero max connections // Test zero max connections

View file

@ -1,15 +1,12 @@
//! Integration tests for repository CRUD operations. //! Integration tests for repository CRUD operations.
//! Requires TEST_DATABASE_URL to be set to a PostgreSQL connection string. //! Requires `TEST_DATABASE_URL` to be set to a `PostgreSQL` connection string.
use fc_common::{models::*, repo}; use fc_common::{models::*, repo};
async fn get_pool() -> Option<sqlx::PgPool> { async fn get_pool() -> Option<sqlx::PgPool> {
let url = match std::env::var("TEST_DATABASE_URL") { let Ok(url) = std::env::var("TEST_DATABASE_URL") else {
Ok(url) => url,
Err(_) => {
println!("Skipping repo test: TEST_DATABASE_URL not set"); println!("Skipping repo test: TEST_DATABASE_URL not set");
return None; return None;
},
}; };
let pool = sqlx::postgres::PgPoolOptions::new() let pool = sqlx::postgres::PgPoolOptions::new()
@ -85,7 +82,7 @@ async fn create_test_build(
evaluation_id: eval_id, evaluation_id: eval_id,
job_name: job_name.to_string(), job_name: job_name.to_string(),
drv_path: drv_path.to_string(), drv_path: drv_path.to_string(),
system: system.map(|s| s.to_string()), system: system.map(std::string::ToString::to_string),
outputs: None, outputs: None,
is_aggregate: None, is_aggregate: None,
constituents: None, constituents: None,
@ -98,9 +95,8 @@ async fn create_test_build(
#[tokio::test] #[tokio::test]
async fn test_project_crud() { async fn test_project_crud() {
let pool = match get_pool().await { let Some(pool) = get_pool().await else {
Some(p) => p, return;
None => return,
}; };
// Create // Create
@ -148,9 +144,8 @@ async fn test_project_crud() {
#[tokio::test] #[tokio::test]
async fn test_project_unique_constraint() { async fn test_project_unique_constraint() {
let pool = match get_pool().await { let Some(pool) = get_pool().await else {
Some(p) => p, return;
None => return,
}; };
let name = format!("unique-test-{}", uuid::Uuid::new_v4()); let name = format!("unique-test-{}", uuid::Uuid::new_v4());
@ -176,9 +171,8 @@ async fn test_project_unique_constraint() {
#[tokio::test] #[tokio::test]
async fn test_jobset_crud() { async fn test_jobset_crud() {
let pool = match get_pool().await { let Some(pool) = get_pool().await else {
Some(p) => p, return;
None => return,
}; };
let project = create_test_project(&pool, "jobset").await; let project = create_test_project(&pool, "jobset").await;
@ -242,9 +236,8 @@ async fn test_jobset_crud() {
#[tokio::test] #[tokio::test]
async fn test_evaluation_and_build_lifecycle() { async fn test_evaluation_and_build_lifecycle() {
let pool = match get_pool().await { let Some(pool) = get_pool().await else {
Some(p) => p, return;
None => return,
}; };
// Set up project and jobset // Set up project and jobset
@ -391,9 +384,8 @@ async fn test_evaluation_and_build_lifecycle() {
#[tokio::test] #[tokio::test]
async fn test_not_found_errors() { async fn test_not_found_errors() {
let pool = match get_pool().await { let Some(pool) = get_pool().await else {
Some(p) => p, return;
None => return,
}; };
let fake_id = uuid::Uuid::new_v4(); let fake_id = uuid::Uuid::new_v4();
@ -423,9 +415,8 @@ async fn test_not_found_errors() {
#[tokio::test] #[tokio::test]
async fn test_batch_get_completed_by_drv_paths() { async fn test_batch_get_completed_by_drv_paths() {
let pool = match get_pool().await { let Some(pool) = get_pool().await else {
Some(p) => p, return;
None => return,
}; };
let project = create_test_project(&pool, "batch-drv").await; let project = create_test_project(&pool, "batch-drv").await;
@ -493,9 +484,8 @@ async fn test_batch_get_completed_by_drv_paths() {
#[tokio::test] #[tokio::test]
async fn test_batch_check_deps_for_builds() { async fn test_batch_check_deps_for_builds() {
let pool = match get_pool().await { let Some(pool) = get_pool().await else {
Some(p) => p, return;
None => return,
}; };
let project = create_test_project(&pool, "batch-deps").await; let project = create_test_project(&pool, "batch-deps").await;
@ -568,9 +558,8 @@ async fn test_batch_check_deps_for_builds() {
#[tokio::test] #[tokio::test]
async fn test_list_filtered_with_system_filter() { async fn test_list_filtered_with_system_filter() {
let pool = match get_pool().await { let Some(pool) = get_pool().await else {
Some(p) => p, return;
None => return,
}; };
let project = create_test_project(&pool, "filter-sys").await; let project = create_test_project(&pool, "filter-sys").await;
@ -641,9 +630,8 @@ async fn test_list_filtered_with_system_filter() {
#[tokio::test] #[tokio::test]
async fn test_list_filtered_with_job_name_filter() { async fn test_list_filtered_with_job_name_filter() {
let pool = match get_pool().await { let Some(pool) = get_pool().await else {
Some(p) => p, return;
None => return,
}; };
let project = create_test_project(&pool, "filter-job").await; let project = create_test_project(&pool, "filter-job").await;
@ -705,9 +693,8 @@ async fn test_list_filtered_with_job_name_filter() {
#[tokio::test] #[tokio::test]
async fn test_reset_orphaned_batch_limit() { async fn test_reset_orphaned_batch_limit() {
let pool = match get_pool().await { let Some(pool) = get_pool().await else {
Some(p) => p, return;
None => return,
}; };
let project = create_test_project(&pool, "orphan").await; let project = create_test_project(&pool, "orphan").await;
@ -747,9 +734,8 @@ async fn test_reset_orphaned_batch_limit() {
#[tokio::test] #[tokio::test]
async fn test_build_cancel_cascade() { async fn test_build_cancel_cascade() {
let pool = match get_pool().await { let Some(pool) = get_pool().await else {
Some(p) => p, return;
None => return,
}; };
let project = create_test_project(&pool, "cancel-cascade").await; let project = create_test_project(&pool, "cancel-cascade").await;
@ -786,9 +772,8 @@ async fn test_build_cancel_cascade() {
#[tokio::test] #[tokio::test]
async fn test_dedup_by_drv_path() { async fn test_dedup_by_drv_path() {
let pool = match get_pool().await { let Some(pool) = get_pool().await else {
Some(p) => p, return;
None => return,
}; };
let project = create_test_project(&pool, "dedup").await; let project = create_test_project(&pool, "dedup").await;

View file

@ -1,16 +1,13 @@
//! Integration tests for advanced search functionality //! Integration tests for advanced search functionality
//! Requires TEST_DATABASE_URL to be set to a PostgreSQL connection string. //! Requires `TEST_DATABASE_URL` to be set to a `PostgreSQL` connection string.
use fc_common::{BuildStatus, models::*, repo, repo::search::*}; use fc_common::{BuildStatus, models::*, repo, repo::search::*};
use uuid::Uuid; use uuid::Uuid;
async fn get_pool() -> Option<sqlx::PgPool> { async fn get_pool() -> Option<sqlx::PgPool> {
let url = match std::env::var("TEST_DATABASE_URL") { let Ok(url) = std::env::var("TEST_DATABASE_URL") else {
Ok(url) => url,
Err(_) => {
println!("Skipping search test: TEST_DATABASE_URL not set"); println!("Skipping search test: TEST_DATABASE_URL not set");
return None; return None;
},
}; };
let pool = sqlx::postgres::PgPoolOptions::new() let pool = sqlx::postgres::PgPoolOptions::new()
@ -27,9 +24,8 @@ async fn get_pool() -> Option<sqlx::PgPool> {
#[tokio::test] #[tokio::test]
async fn test_project_search() { async fn test_project_search() {
let pool = match get_pool().await { let Some(pool) = get_pool().await else {
Some(p) => p, return;
None => return,
}; };
// Create test projects // Create test projects
@ -93,9 +89,8 @@ async fn test_project_search() {
#[tokio::test] #[tokio::test]
async fn test_build_search_with_filters() { async fn test_build_search_with_filters() {
let pool = match get_pool().await { let Some(pool) = get_pool().await else {
Some(p) => p, return;
None => return,
}; };
// Setup: project -> jobset -> evaluation -> builds // Setup: project -> jobset -> evaluation -> builds
@ -209,7 +204,7 @@ async fn test_build_search_with_filters() {
// Search with status filter (succeeded) // Search with status filter (succeeded)
let params = SearchParams { let params = SearchParams {
query: "".to_string(), query: String::new(),
entities: vec![SearchEntity::Builds], entities: vec![SearchEntity::Builds],
limit: 10, limit: 10,
offset: 0, offset: 0,
@ -240,9 +235,8 @@ async fn test_build_search_with_filters() {
#[tokio::test] #[tokio::test]
async fn test_multi_entity_search() { async fn test_multi_entity_search() {
let pool = match get_pool().await { let Some(pool) = get_pool().await else {
Some(p) => p, return;
None => return,
}; };
// Create project with jobset, evaluation, and build // Create project with jobset, evaluation, and build
@ -324,9 +318,8 @@ async fn test_multi_entity_search() {
#[tokio::test] #[tokio::test]
async fn test_search_pagination() { async fn test_search_pagination() {
let pool = match get_pool().await { let Some(pool) = get_pool().await else {
Some(p) => p, return;
None => return,
}; };
// Create multiple projects // Create multiple projects
@ -334,7 +327,7 @@ async fn test_search_pagination() {
for i in 0..5 { for i in 0..5 {
let project = repo::projects::create(&pool, CreateProject { let project = repo::projects::create(&pool, CreateProject {
name: format!("page-test-{}-{}", i, Uuid::new_v4().simple()), name: format!("page-test-{}-{}", i, Uuid::new_v4().simple()),
description: Some(format!("Page test project {}", i)), description: Some(format!("Page test project {i}")),
repository_url: "https://github.com/test/page".to_string(), repository_url: "https://github.com/test/page".to_string(),
}) })
.await .await
@ -385,9 +378,8 @@ async fn test_search_pagination() {
#[tokio::test] #[tokio::test]
async fn test_search_sorting() { async fn test_search_sorting() {
let pool = match get_pool().await { let Some(pool) = get_pool().await else {
Some(p) => p, return;
None => return,
}; };
// Create projects in reverse alphabetical order // Create projects in reverse alphabetical order
@ -433,14 +425,13 @@ async fn test_search_sorting() {
#[tokio::test] #[tokio::test]
async fn test_empty_search() { async fn test_empty_search() {
let pool = match get_pool().await { let Some(pool) = get_pool().await else {
Some(p) => p, return;
None => return,
}; };
// Empty query should return all entities (up to limit) // Empty query should return all entities (up to limit)
let params = SearchParams { let params = SearchParams {
query: "".to_string(), query: String::new(),
entities: vec![SearchEntity::Projects], entities: vec![SearchEntity::Projects],
limit: 10, limit: 10,
offset: 0, offset: 0,
@ -459,9 +450,8 @@ async fn test_empty_search() {
#[tokio::test] #[tokio::test]
async fn test_quick_search() { async fn test_quick_search() {
let pool = match get_pool().await { let Some(pool) = get_pool().await else {
Some(p) => p, return;
None => return,
}; };
// Create test data: project -> jobset -> evaluation -> build // Create test data: project -> jobset -> evaluation -> build

View file

@ -1,17 +1,14 @@
//! Integration tests for user management - CRUD, authentication, and //! Integration tests for user management - CRUD, authentication, and
//! relationships. Requires TEST_DATABASE_URL to be set to a PostgreSQL //! relationships. Requires `TEST_DATABASE_URL` to be set to a `PostgreSQL`
//! connection string. //! connection string.
use fc_common::{models::*, repo}; use fc_common::{models::*, repo};
use uuid::Uuid; use uuid::Uuid;
async fn get_pool() -> Option<sqlx::PgPool> { async fn get_pool() -> Option<sqlx::PgPool> {
let url = match std::env::var("TEST_DATABASE_URL") { let Ok(url) = std::env::var("TEST_DATABASE_URL") else {
Ok(url) => url,
Err(_) => {
println!("Skipping repo test: TEST_DATABASE_URL not set"); println!("Skipping repo test: TEST_DATABASE_URL not set");
return None; return None;
},
}; };
let pool = sqlx::postgres::PgPoolOptions::new() let pool = sqlx::postgres::PgPoolOptions::new()
@ -28,13 +25,12 @@ async fn get_pool() -> Option<sqlx::PgPool> {
#[tokio::test] #[tokio::test]
async fn test_user_crud() { async fn test_user_crud() {
let pool = match get_pool().await { let Some(pool) = get_pool().await else {
Some(p) => p, return;
None => return,
}; };
let username = format!("test-user-{}", Uuid::new_v4().simple()); let username = format!("test-user-{}", Uuid::new_v4().simple());
let email = format!("{}@example.com", username); let email = format!("{username}@example.com");
// Create user // Create user
let user = repo::users::create(&pool, &CreateUser { let user = repo::users::create(&pool, &CreateUser {
@ -82,7 +78,7 @@ async fn test_user_crud() {
assert!(count > 0); assert!(count > 0);
// Update email // Update email
let new_email = format!("updated-{}", email); let new_email = format!("updated-{email}");
let updated = repo::users::update_email(&pool, user.id, &new_email) let updated = repo::users::update_email(&pool, user.id, &new_email)
.await .await
.expect("update email"); .expect("update email");
@ -135,9 +131,8 @@ async fn test_user_crud() {
#[tokio::test] #[tokio::test]
async fn test_user_authentication() { async fn test_user_authentication() {
let pool = match get_pool().await { let Some(pool) = get_pool().await else {
Some(p) => p, return;
None => return,
}; };
let username = format!("auth-test-{}", Uuid::new_v4().simple()); let username = format!("auth-test-{}", Uuid::new_v4().simple());
@ -146,7 +141,7 @@ async fn test_user_authentication() {
// Create user // Create user
let user = repo::users::create(&pool, &CreateUser { let user = repo::users::create(&pool, &CreateUser {
username: username.clone(), username: username.clone(),
email: format!("{}@example.com", username), email: format!("{username}@example.com"),
full_name: None, full_name: None,
password: password.to_string(), password: password.to_string(),
role: None, role: None,
@ -234,13 +229,12 @@ async fn test_password_hashing() {
#[tokio::test] #[tokio::test]
async fn test_user_unique_constraints() { async fn test_user_unique_constraints() {
let pool = match get_pool().await { let Some(pool) = get_pool().await else {
Some(p) => p, return;
None => return,
}; };
let username = format!("unique-{}", Uuid::new_v4().simple()); let username = format!("unique-{}", Uuid::new_v4().simple());
let email = format!("{}@example.com", username); let email = format!("{username}@example.com");
// Create first user // Create first user
let _ = repo::users::create(&pool, &CreateUser { let _ = repo::users::create(&pool, &CreateUser {
@ -256,7 +250,7 @@ async fn test_user_unique_constraints() {
// Try to create with same username // Try to create with same username
let result = repo::users::create(&pool, &CreateUser { let result = repo::users::create(&pool, &CreateUser {
username: username.clone(), username: username.clone(),
email: format!("other-{}", email), email: format!("other-{email}"),
full_name: None, full_name: None,
password: "password".to_string(), password: "password".to_string(),
role: None, role: None,
@ -266,7 +260,7 @@ async fn test_user_unique_constraints() {
// Try to create with same email // Try to create with same email
let result = repo::users::create(&pool, &CreateUser { let result = repo::users::create(&pool, &CreateUser {
username: format!("other-{}", username), username: format!("other-{username}"),
email: email.clone(), email: email.clone(),
full_name: None, full_name: None,
password: "password".to_string(), password: "password".to_string(),
@ -285,13 +279,12 @@ async fn test_user_unique_constraints() {
#[tokio::test] #[tokio::test]
async fn test_oauth_user_creation() { async fn test_oauth_user_creation() {
let pool = match get_pool().await { let Some(pool) = get_pool().await else {
Some(p) => p, return;
None => return,
}; };
let username = format!("oauth-user-{}", Uuid::new_v4().simple()); let username = format!("oauth-user-{}", Uuid::new_v4().simple());
let email = format!("{}@github.com", username); let email = format!("{username}@github.com");
let oauth_provider_id = format!("github_{}", Uuid::new_v4().simple()); let oauth_provider_id = format!("github_{}", Uuid::new_v4().simple());
// Create OAuth user // Create OAuth user
@ -330,9 +323,8 @@ async fn test_oauth_user_creation() {
#[tokio::test] #[tokio::test]
async fn test_starred_jobs_crud() { async fn test_starred_jobs_crud() {
let pool = match get_pool().await { let Some(pool) = get_pool().await else {
Some(p) => p, return;
None => return,
}; };
// Create prerequisite data // Create prerequisite data
@ -442,9 +434,8 @@ async fn test_starred_jobs_crud() {
#[tokio::test] #[tokio::test]
async fn test_starred_jobs_delete_by_job() { async fn test_starred_jobs_delete_by_job() {
let pool = match get_pool().await { let Some(pool) = get_pool().await else {
Some(p) => p, return;
None => return,
}; };
// Setup // Setup
@ -516,9 +507,8 @@ async fn test_starred_jobs_delete_by_job() {
#[tokio::test] #[tokio::test]
async fn test_project_members_crud() { async fn test_project_members_crud() {
let pool = match get_pool().await { let Some(pool) = get_pool().await else {
Some(p) => p, return;
None => return,
}; };
// Setup // Setup
@ -615,9 +605,8 @@ async fn test_project_members_crud() {
#[tokio::test] #[tokio::test]
async fn test_project_members_permissions() { async fn test_project_members_permissions() {
let pool = match get_pool().await { let Some(pool) = get_pool().await else {
Some(p) => p, return;
None => return,
}; };
// Setup // Setup
@ -809,9 +798,8 @@ async fn test_project_members_permissions() {
#[tokio::test] #[tokio::test]
async fn test_user_not_found_errors() { async fn test_user_not_found_errors() {
let pool = match get_pool().await { let Some(pool) = get_pool().await else {
Some(p) => p, return;
None => return,
}; };
let fake_id = Uuid::new_v4(); let fake_id = Uuid::new_v4();

View file

@ -20,6 +20,11 @@ use tokio::sync::Notify;
use tracing::info; use tracing::info;
use uuid::Uuid; use uuid::Uuid;
/// Main evaluator loop. Polls jobsets and runs nix evaluations.
///
/// # Errors
///
/// Returns error if evaluation cycle fails and `strict_errors` is enabled.
pub async fn run( pub async fn run(
pool: PgPool, pool: PgPool,
config: EvaluatorConfig, config: EvaluatorConfig,
@ -57,13 +62,10 @@ async fn run_cycle(
let ready: Vec<_> = active let ready: Vec<_> = active
.into_iter() .into_iter()
.filter(|js| { .filter(|js| {
match js.last_checked_at { js.last_checked_at.is_none_or(|last| {
Some(last) => {
let elapsed = (now - last).num_seconds(); let elapsed = (now - last).num_seconds();
elapsed >= i64::from(js.check_interval) elapsed >= i64::from(js.check_interval)
}, })
None => true, // Never checked, evaluate now
}
}) })
.collect(); .collect();
@ -91,11 +93,10 @@ async fn run_cycle(
|| msg.contains("sqlite") || msg.contains("sqlite")
{ {
tracing::error!( tracing::error!(
"DISK SPACE ISSUE DETECTED: Evaluation failed due to disk space \ "Evaluation failed due to disk space problems. Please free up \
problems. Please free up space on the server:\n- Run \ space on the server:\n- Run `nix-collect-garbage -d` to clean \
`nix-collect-garbage -d` to clean the Nix store\n- Clear \ the Nix store\n- Clear /tmp/fc-evaluator directory\n- Check \
/tmp/fc-evaluator directory\n- Check build logs directory if \ build logs directory if configured"
configured"
); );
} }
} }
@ -129,13 +130,13 @@ async fn evaluate_jobset(
if info.is_critical() { if info.is_critical() {
tracing::error!( tracing::error!(
jobset = %jobset.name, jobset = %jobset.name,
"CRITICAL: Less than 1GB disk space available. {}", "Less than 1GB disk space available. {}",
info.summary() info.summary()
); );
} else if info.is_low() { } else if info.is_low() {
tracing::warn!( tracing::warn!(
jobset = %jobset.name, jobset = %jobset.name,
"LOW: Less than 5GB disk space available. {}", "Less than 5GB disk space available. {}",
info.summary() info.summary()
); );
} }
@ -268,15 +269,22 @@ async fn evaluate_jobset(
evaluation jobset={} commit={}", evaluation jobset={} commit={}",
build_count, jobset.name, commit_hash build_count, jobset.name, commit_hash
); );
if let Err(e) =
repo::jobsets::update_last_checked(pool, jobset.id).await
{
tracing::warn!(
jobset = %jobset.name,
"Failed to update last_checked_at: {e}"
);
}
return Ok(()); return Ok(());
} else { }
info!( info!(
"Evaluation completed but has 0 builds, re-running nix evaluation \ "Evaluation completed but has 0 builds, re-running nix evaluation \
jobset={} commit={}", jobset={} commit={}",
jobset.name, commit_hash jobset.name, commit_hash
); );
} }
}
existing existing
}, },
Err(e) => { Err(e) => {
@ -412,23 +420,20 @@ async fn create_builds_from_eval(
for dep_drv in input_drvs.keys() { for dep_drv in input_drvs.keys() {
if let Some(&dep_build_id) = drv_to_build.get(dep_drv) if let Some(&dep_build_id) = drv_to_build.get(dep_drv)
&& dep_build_id != build_id && dep_build_id != build_id
{ && let Err(e) =
if let Err(e) =
repo::build_dependencies::create(pool, build_id, dep_build_id).await repo::build_dependencies::create(pool, build_id, dep_build_id).await
{ {
tracing::warn!(build_id = %build_id, dep = %dep_build_id, "Failed to create build dependency: {e}"); tracing::warn!(build_id = %build_id, dep = %dep_build_id, "Failed to create build dependency: {e}");
} }
} }
} }
}
// Aggregate constituent dependencies // Aggregate constituent dependencies
if let Some(ref constituents) = job.constituents { if let Some(ref constituents) = job.constituents {
for constituent_name in constituents { for constituent_name in constituents {
if let Some(&dep_build_id) = name_to_build.get(constituent_name) if let Some(&dep_build_id) = name_to_build.get(constituent_name)
&& dep_build_id != build_id && dep_build_id != build_id
{ && let Err(e) =
if let Err(e) =
repo::build_dependencies::create(pool, build_id, dep_build_id).await repo::build_dependencies::create(pool, build_id, dep_build_id).await
{ {
tracing::warn!(build_id = %build_id, dep = %dep_build_id, "Failed to create constituent dependency: {e}"); tracing::warn!(build_id = %build_id, dep = %dep_build_id, "Failed to create constituent dependency: {e}");
@ -436,13 +441,12 @@ async fn create_builds_from_eval(
} }
} }
} }
}
Ok(()) Ok(())
} }
/// Compute a deterministic hash over the commit and all jobset inputs. /// Compute a deterministic hash over the commit and all jobset inputs.
/// Used for evaluation caching skip re-eval when inputs haven't changed. /// Used for evaluation caching, so skip re-eval when inputs haven't changed.
fn compute_inputs_hash(commit_hash: &str, inputs: &[JobsetInput]) -> String { fn compute_inputs_hash(commit_hash: &str, inputs: &[JobsetInput]) -> String {
use sha2::{Digest, Sha256}; use sha2::{Digest, Sha256};
@ -472,6 +476,20 @@ async fn check_declarative_config(
repo_path: &std::path::Path, repo_path: &std::path::Path,
project_id: Uuid, project_id: Uuid,
) { ) {
#[derive(serde::Deserialize)]
struct DeclarativeConfig {
jobsets: Option<Vec<DeclarativeJobset>>,
}
#[derive(serde::Deserialize)]
struct DeclarativeJobset {
name: String,
nix_expression: String,
flake_mode: Option<bool>,
check_interval: Option<i32>,
enabled: Option<bool>,
}
let config_path = repo_path.join(".fc.toml"); let config_path = repo_path.join(".fc.toml");
let alt_config_path = repo_path.join(".fc/config.toml"); let alt_config_path = repo_path.join(".fc/config.toml");
@ -494,20 +512,6 @@ async fn check_declarative_config(
}, },
}; };
#[derive(serde::Deserialize)]
struct DeclarativeConfig {
jobsets: Option<Vec<DeclarativeJobset>>,
}
#[derive(serde::Deserialize)]
struct DeclarativeJobset {
name: String,
nix_expression: String,
flake_mode: Option<bool>,
check_interval: Option<i32>,
enabled: Option<bool>,
}
let config: DeclarativeConfig = match toml::from_str(&content) { let config: DeclarativeConfig = match toml::from_str(&content) {
Ok(c) => c, Ok(c) => c,
Err(e) => { Err(e) => {

View file

@ -7,6 +7,10 @@ use git2::Repository;
/// ///
/// If `branch` is `Some`, resolve `refs/remotes/origin/<branch>` instead of /// If `branch` is `Some`, resolve `refs/remotes/origin/<branch>` instead of
/// HEAD. /// HEAD.
///
/// # Errors
///
/// Returns error if git operations fail.
#[tracing::instrument(skip(work_dir))] #[tracing::instrument(skip(work_dir))]
pub fn clone_or_fetch( pub fn clone_or_fetch(
url: &str, url: &str,
@ -20,7 +24,7 @@ pub fn clone_or_fetch(
let repo = if is_fetch { let repo = if is_fetch {
let repo = Repository::open(&repo_path)?; let repo = Repository::open(&repo_path)?;
// Fetch origin — scope the borrow so `remote` is dropped before we move // Fetch origin. Scope the borrow so `remote` is dropped before we move
// `repo` // `repo`
{ {
let mut remote = repo.find_remote("origin")?; let mut remote = repo.find_remote("origin")?;
@ -34,12 +38,11 @@ pub fn clone_or_fetch(
// Resolve commit from remote refs (which are always up-to-date after fetch). // Resolve commit from remote refs (which are always up-to-date after fetch).
// When no branch is specified, detect the default branch from local HEAD's // When no branch is specified, detect the default branch from local HEAD's
// tracking target. // tracking target.
let branch_name = match branch { let branch_name = if let Some(b) = branch {
Some(b) => b.to_string(), b.to_string()
None => { } else {
let head = repo.head()?; let head = repo.head()?;
head.shorthand().unwrap_or("master").to_string() head.shorthand().unwrap_or("master").to_string()
},
}; };
let remote_ref = format!("refs/remotes/origin/{branch_name}"); let remote_ref = format!("refs/remotes/origin/{branch_name}");

View file

@ -105,6 +105,10 @@ pub fn parse_eval_output(stdout: &str) -> EvalResult {
/// Evaluate nix expressions and return discovered jobs. /// Evaluate nix expressions and return discovered jobs.
/// If `flake_mode` is true, uses nix-eval-jobs with --flake flag. /// If `flake_mode` is true, uses nix-eval-jobs with --flake flag.
/// If `flake_mode` is false, evaluates a legacy expression file. /// If `flake_mode` is false, evaluates a legacy expression file.
///
/// # Errors
///
/// Returns error if nix evaluation command fails or times out.
#[tracing::instrument(skip(config, inputs), fields(flake_mode, nix_expression))] #[tracing::instrument(skip(config, inputs), fields(flake_mode, nix_expression))]
pub async fn evaluate( pub async fn evaluate(
repo_path: &Path, repo_path: &Path,

View file

@ -1,5 +1,5 @@
//! Tests for the git clone/fetch module. //! Tests for the git clone/fetch module.
//! Uses git2 to create a temporary repository, then exercises clone_or_fetch. //! Uses git2 to create a temporary repository, then exercises `clone_or_fetch`.
use git2::{Repository, Signature}; use git2::{Repository, Signature};
use tempfile::TempDir; use tempfile::TempDir;

View file

@ -10,6 +10,11 @@ const MAX_LOG_SIZE: usize = 100 * 1024 * 1024; // 100MB
skip(work_dir, live_log_path), skip(work_dir, live_log_path),
fields(drv_path, store_uri) fields(drv_path, store_uri)
)] )]
/// Run a nix build on a remote builder via SSH.
///
/// # Errors
///
/// Returns error if nix build command fails or times out.
pub async fn run_nix_build_remote( pub async fn run_nix_build_remote(
drv_path: &str, drv_path: &str,
work_dir: &Path, work_dir: &Path,
@ -120,14 +125,11 @@ pub async fn run_nix_build_remote(
}) })
.await; .await;
match result { result.unwrap_or_else(|_| {
Ok(inner) => inner,
Err(_) => {
Err(CiError::Timeout(format!( Err(CiError::Timeout(format!(
"Remote build timed out after {timeout:?}" "Remote build timed out after {timeout:?}"
))) )))
}, })
}
} }
pub struct BuildResult { pub struct BuildResult {
@ -165,6 +167,10 @@ pub fn parse_nix_log_line(line: &str) -> Option<(&'static str, String)> {
/// Run `nix build` for a derivation path. /// Run `nix build` for a derivation path.
/// If `live_log_path` is provided, build output is streamed to that file /// If `live_log_path` is provided, build output is streamed to that file
/// incrementally. /// incrementally.
///
/// # Errors
///
/// Returns error if nix build command fails or times out.
#[tracing::instrument(skip(work_dir, live_log_path), fields(drv_path))] #[tracing::instrument(skip(work_dir, live_log_path), fields(drv_path))]
pub async fn run_nix_build( pub async fn run_nix_build(
drv_path: &str, drv_path: &str,
@ -299,12 +305,9 @@ pub async fn run_nix_build(
}) })
.await; .await;
match result { result.unwrap_or_else(|_| {
Ok(inner) => inner,
Err(_) => {
Err(CiError::Timeout(format!( Err(CiError::Timeout(format!(
"Build timed out after {timeout:?}" "Build timed out after {timeout:?}"
))) )))
}, })
}
} }

View file

@ -90,6 +90,7 @@ async fn main() -> anyhow::Result<()> {
() = gc_loop(gc_config_for_loop, db.pool().clone()) => {} () = gc_loop(gc_config_for_loop, db.pool().clone()) => {}
() = failed_paths_cleanup_loop(db.pool().clone(), failed_paths_ttl, failed_paths_cache) => {} () = failed_paths_cleanup_loop(db.pool().clone(), failed_paths_ttl, failed_paths_cache) => {}
() = cancel_checker_loop(db.pool().clone(), active_builds) => {} () = cancel_checker_loop(db.pool().clone(), active_builds) => {}
() = notification_retry_loop(db.pool().clone(), notifications_config.clone()) => {}
() = shutdown_signal() => { () = shutdown_signal() => {
tracing::info!("Shutdown signal received, draining in-flight builds..."); tracing::info!("Shutdown signal received, draining in-flight builds...");
worker_pool_for_drain.drain(); worker_pool_for_drain.drain();
@ -174,7 +175,7 @@ async fn failed_paths_cleanup_loop(
return std::future::pending().await; return std::future::pending().await;
} }
let interval = std::time::Duration::from_secs(3600); let interval = std::time::Duration::from_hours(1);
loop { loop {
tokio::time::sleep(interval).await; tokio::time::sleep(interval).await;
match fc_common::repo::failed_paths_cache::cleanup_expired(&pool, ttl).await match fc_common::repo::failed_paths_cache::cleanup_expired(&pool, ttl).await
@ -218,6 +219,103 @@ async fn cancel_checker_loop(pool: sqlx::PgPool, active_builds: ActiveBuilds) {
} }
} }
async fn notification_retry_loop(
pool: sqlx::PgPool,
config: fc_common::config::NotificationsConfig,
) {
if !config.enable_retry_queue {
return std::future::pending().await;
}
let poll_interval =
std::time::Duration::from_secs(config.retry_poll_interval);
let retention_days = config.retention_days;
let cleanup_pool = pool.clone();
tokio::spawn(async move {
let cleanup_interval = std::time::Duration::from_hours(1);
loop {
tokio::time::sleep(cleanup_interval).await;
match repo::notification_tasks::cleanup_old_tasks(
&cleanup_pool,
retention_days,
)
.await
{
Ok(count) if count > 0 => {
tracing::info!(count, "Cleaned up old notification tasks");
},
Ok(_) => {},
Err(e) => {
tracing::error!("Notification task cleanup failed: {e}");
},
}
}
});
loop {
tokio::time::sleep(poll_interval).await;
let tasks = match repo::notification_tasks::list_pending(&pool, 10).await {
Ok(t) => t,
Err(e) => {
tracing::warn!("Failed to fetch pending notification tasks: {e}");
continue;
},
};
for task in tasks {
if let Err(e) =
repo::notification_tasks::mark_running(&pool, task.id).await
{
tracing::warn!(task_id = %task.id, "Failed to mark task as running: {e}");
continue;
}
match fc_common::notifications::process_notification_task(&task).await {
Ok(()) => {
if let Err(e) =
repo::notification_tasks::mark_completed(&pool, task.id).await
{
tracing::error!(task_id = %task.id, "Failed to mark task as completed: {e}");
} else {
tracing::info!(
task_id = %task.id,
notification_type = %task.notification_type,
attempts = task.attempts + 1,
"Notification task completed"
);
}
},
Err(err) => {
if let Err(e) = repo::notification_tasks::mark_failed_and_retry(
&pool, task.id, &err,
)
.await
{
tracing::error!(task_id = %task.id, "Failed to update task status: {e}");
} else {
let status_after = if task.attempts + 1 >= task.max_attempts {
"failed permanently"
} else {
"scheduled for retry"
};
tracing::warn!(
task_id = %task.id,
notification_type = %task.notification_type,
attempts = task.attempts + 1,
max_attempts = task.max_attempts,
error = %err,
status = status_after,
"Notification task failed"
);
}
},
}
}
}
}
async fn shutdown_signal() { async fn shutdown_signal() {
let ctrl_c = async { let ctrl_c = async {
tokio::signal::ctrl_c() tokio::signal::ctrl_c()

View file

@ -9,6 +9,12 @@ use tokio::sync::Notify;
use crate::worker::WorkerPool; use crate::worker::WorkerPool;
/// Main queue runner loop. Polls for pending builds and dispatches them to
/// workers.
///
/// # Errors
///
/// Returns error if database operations fail and `strict_errors` is enabled.
pub async fn run( pub async fn run(
pool: PgPool, pool: PgPool,
worker_pool: Arc<WorkerPool>, worker_pool: Arc<WorkerPool>,
@ -42,7 +48,7 @@ pub async fn run(
.await .await
{ {
Ok(true) => { Ok(true) => {
// All constituents done mark aggregate as completed // All constituents done, mark aggregate as completed
tracing::info!( tracing::info!(
build_id = %build.id, build_id = %build.id,
job = %build.job_name, job = %build.job_name,
@ -115,12 +121,15 @@ pub async fn run(
} }
// Failed paths cache: skip known-failing derivations // Failed paths cache: skip known-failing derivations
if failed_paths_cache { if failed_paths_cache
if let Ok(true) = repo::failed_paths_cache::is_cached_failure( && matches!(
repo::failed_paths_cache::is_cached_failure(
&pool, &pool,
&build.drv_path, &build.drv_path,
) )
.await .await,
Ok(true)
)
{ {
tracing::info!( tracing::info!(
build_id = %build.id, drv = %build.drv_path, build_id = %build.id, drv = %build.drv_path,
@ -143,7 +152,6 @@ pub async fn run(
} }
continue; continue;
} }
}
// Dependency-aware scheduling: skip if deps not met // Dependency-aware scheduling: skip if deps not met
match repo::build_dependencies::all_deps_completed(&pool, build.id) match repo::build_dependencies::all_deps_completed(&pool, build.id)

View file

@ -102,11 +102,13 @@ impl WorkerPool {
.await; .await;
} }
pub fn worker_count(&self) -> usize { #[must_use]
pub const fn worker_count(&self) -> usize {
self.worker_count self.worker_count
} }
pub fn active_builds(&self) -> &ActiveBuilds { #[must_use]
pub const fn active_builds(&self) -> &ActiveBuilds {
&self.active_builds &self.active_builds
} }
@ -135,9 +137,8 @@ impl WorkerPool {
tokio::spawn(async move { tokio::spawn(async move {
let result = async { let result = async {
let _permit = match semaphore.acquire().await { let Ok(_permit) = semaphore.acquire().await else {
Ok(p) => p, return;
Err(_) => return,
}; };
if let Err(e) = run_build( if let Err(e) = run_build(
@ -287,7 +288,7 @@ async fn push_to_cache(
/// Build S3 store URI with configuration options. /// Build S3 store URI with configuration options.
/// Nix S3 URIs support query parameters for configuration: /// Nix S3 URIs support query parameters for configuration:
/// s3://bucket?region=us-east-1&endpoint=https://minio.example.com /// <s3://bucket?region=us-east-1&endpoint=https://minio.example.com>
fn build_s3_store_uri( fn build_s3_store_uri(
base_uri: &str, base_uri: &str,
config: Option<&fc_common::config::S3CacheConfig>, config: Option<&fc_common::config::S3CacheConfig>,
@ -325,66 +326,6 @@ fn build_s3_store_uri(
format!("{base_uri}?{query}") format!("{base_uri}?{query}")
} }
#[cfg(test)]
mod tests {
use fc_common::config::S3CacheConfig;
use super::*;
#[test]
fn test_build_s3_store_uri_no_config() {
let result = build_s3_store_uri("s3://my-bucket", None);
assert_eq!(result, "s3://my-bucket");
}
#[test]
fn test_build_s3_store_uri_empty_config() {
let cfg = S3CacheConfig::default();
let result = build_s3_store_uri("s3://my-bucket", Some(&cfg));
assert_eq!(result, "s3://my-bucket");
}
#[test]
fn test_build_s3_store_uri_with_region() {
let cfg = S3CacheConfig {
region: Some("us-east-1".to_string()),
..Default::default()
};
let result = build_s3_store_uri("s3://my-bucket", Some(&cfg));
assert_eq!(result, "s3://my-bucket?region=us-east-1");
}
#[test]
fn test_build_s3_store_uri_with_endpoint_and_path_style() {
let cfg = S3CacheConfig {
endpoint_url: Some("https://minio.example.com".to_string()),
use_path_style: true,
..Default::default()
};
let result = build_s3_store_uri("s3://my-bucket", Some(&cfg));
assert!(result.starts_with("s3://my-bucket?"));
assert!(result.contains("endpoint=https%3A%2F%2Fminio.example.com"));
assert!(result.contains("use-path-style=true"));
}
#[test]
fn test_build_s3_store_uri_all_params() {
let cfg = S3CacheConfig {
region: Some("eu-west-1".to_string()),
endpoint_url: Some("https://s3.example.com".to_string()),
use_path_style: true,
..Default::default()
};
let result = build_s3_store_uri("s3://cache-bucket", Some(&cfg));
assert!(result.starts_with("s3://cache-bucket?"));
assert!(result.contains("region=eu-west-1"));
assert!(result.contains("endpoint=https%3A%2F%2Fs3.example.com"));
assert!(result.contains("use-path-style=true"));
// Verify params are joined with &
assert_eq!(result.matches('&').count(), 2);
}
}
/// Try to run the build on a remote builder if one is available for the build's /// Try to run the build on a remote builder if one is available for the build's
/// system. /// system.
async fn try_remote_build( async fn try_remote_build(
@ -478,7 +419,7 @@ async fn collect_metrics_and_alert(
} }
} }
for path in output_paths.iter() { for path in output_paths {
if let Ok(meta) = tokio::fs::metadata(path).await { if let Ok(meta) = tokio::fs::metadata(path).await {
let size = meta.len(); let size = meta.len();
if let Err(e) = repo::build_metrics::upsert( if let Err(e) = repo::build_metrics::upsert(
@ -497,23 +438,20 @@ async fn collect_metrics_and_alert(
} }
} }
let manager = match alert_manager { let Some(manager) = alert_manager else {
Some(m) => m, return;
None => return,
}; };
if manager.is_enabled() { if manager.is_enabled()
if let Ok(evaluation) = && let Ok(evaluation) =
repo::evaluations::get(pool, build.evaluation_id).await repo::evaluations::get(pool, build.evaluation_id).await
&& let Ok(jobset) = repo::jobsets::get(pool, evaluation.jobset_id).await
{ {
if let Ok(jobset) = repo::jobsets::get(pool, evaluation.jobset_id).await {
manager manager
.check_and_alert(pool, Some(jobset.project_id), Some(jobset.id)) .check_and_alert(pool, Some(jobset.project_id), Some(jobset.id))
.await; .await;
} }
} }
}
}
#[tracing::instrument(skip(pool, build, work_dir, log_config, gc_config, notifications_config, signing_config, cache_upload_config), fields(build_id = %build.id, job = %build.job_name))] #[tracing::instrument(skip(pool, build, work_dir, log_config, gc_config, notifications_config, signing_config, cache_upload_config), fields(build_id = %build.id, job = %build.job_name))]
#[allow(clippy::too_many_arguments)] #[allow(clippy::too_many_arguments)]
@ -561,7 +499,7 @@ async fn run_build(
{ {
Some(r) => Ok(r), Some(r) => Ok(r),
None => { None => {
// No remote builder available or all failed build locally // No remote builder available or all failed, build locally
crate::builder::run_nix_build( crate::builder::run_nix_build(
&build.drv_path, &build.drv_path,
work_dir, work_dir,
@ -705,11 +643,11 @@ async fn run_build(
} }
// Sign outputs at build time // Sign outputs at build time
if sign_outputs(&build_result.output_paths, signing_config).await { if sign_outputs(&build_result.output_paths, signing_config).await
if let Err(e) = repo::builds::mark_signed(pool, build.id).await { && let Err(e) = repo::builds::mark_signed(pool, build.id).await
{
tracing::warn!(build_id = %build.id, "Failed to mark build as signed: {e}"); tracing::warn!(build_id = %build.id, "Failed to mark build as signed: {e}");
} }
}
// Push to external binary cache if configured // Push to external binary cache if configured
if cache_upload_config.enabled if cache_upload_config.enabled
@ -740,9 +678,9 @@ async fn run_build(
collect_metrics_and_alert( collect_metrics_and_alert(
pool, pool,
&build, build,
&build_result.output_paths, &build_result.output_paths,
&alert_manager, alert_manager,
) )
.await; .await;
@ -775,8 +713,7 @@ async fn run_build(
let failure_status = build_result let failure_status = build_result
.exit_code .exit_code
.map(BuildStatus::from_exit_code) .map_or(BuildStatus::Failed, BuildStatus::from_exit_code);
.unwrap_or(BuildStatus::Failed);
repo::builds::complete( repo::builds::complete(
pool, pool,
build.id, build.id,
@ -805,11 +742,11 @@ async fn run_build(
let msg = e.to_string(); let msg = e.to_string();
// Write error log // Write error log
if let Some(ref storage) = log_storage { if let Some(ref storage) = log_storage
if let Err(e) = storage.write_log(&build.id, "", &msg) { && let Err(e) = storage.write_log(&build.id, "", &msg)
{
tracing::warn!(build_id = %build.id, "Failed to write error log: {e}"); tracing::warn!(build_id = %build.id, "Failed to write error log: {e}");
} }
}
// Clean up live log // Clean up live log
let _ = tokio::fs::remove_file(&live_log_path).await; let _ = tokio::fs::remove_file(&live_log_path).await;
@ -834,6 +771,7 @@ async fn run_build(
get_project_for_build(pool, build).await get_project_for_build(pool, build).await
{ {
fc_common::notifications::dispatch_build_finished( fc_common::notifications::dispatch_build_finished(
Some(pool),
&updated_build, &updated_build,
&project, &project,
&commit_hash, &commit_hash,
@ -845,15 +783,73 @@ async fn run_build(
// Auto-promote channels if all builds in the evaluation are done // Auto-promote channels if all builds in the evaluation are done
if updated_build.status.is_success() if updated_build.status.is_success()
&& let Ok(eval) = repo::evaluations::get(pool, build.evaluation_id).await && let Ok(eval) = repo::evaluations::get(pool, build.evaluation_id).await
{ && let Err(e) =
if let Err(e) =
repo::channels::auto_promote_if_complete(pool, eval.jobset_id, eval.id) repo::channels::auto_promote_if_complete(pool, eval.jobset_id, eval.id)
.await .await
{ {
tracing::warn!(build_id = %build.id, "Failed to auto-promote channels: {e}"); tracing::warn!(build_id = %build.id, "Failed to auto-promote channels: {e}");
} }
} }
}
Ok(()) Ok(())
} }
#[cfg(test)]
mod tests {
use fc_common::config::S3CacheConfig;
use super::*;
#[test]
fn test_build_s3_store_uri_no_config() {
let result = build_s3_store_uri("s3://my-bucket", None);
assert_eq!(result, "s3://my-bucket");
}
#[test]
fn test_build_s3_store_uri_empty_config() {
let cfg = S3CacheConfig::default();
let result = build_s3_store_uri("s3://my-bucket", Some(&cfg));
assert_eq!(result, "s3://my-bucket");
}
#[test]
fn test_build_s3_store_uri_with_region() {
let cfg = S3CacheConfig {
region: Some("us-east-1".to_string()),
..Default::default()
};
let result = build_s3_store_uri("s3://my-bucket", Some(&cfg));
assert_eq!(result, "s3://my-bucket?region=us-east-1");
}
#[test]
fn test_build_s3_store_uri_with_endpoint_and_path_style() {
let cfg = S3CacheConfig {
endpoint_url: Some("https://minio.example.com".to_string()),
use_path_style: true,
..Default::default()
};
let result = build_s3_store_uri("s3://my-bucket", Some(&cfg));
assert!(result.starts_with("s3://my-bucket?"));
assert!(result.contains("endpoint=https%3A%2F%2Fminio.example.com"));
assert!(result.contains("use-path-style=true"));
}
#[test]
fn test_build_s3_store_uri_all_params() {
let cfg = S3CacheConfig {
region: Some("eu-west-1".to_string()),
endpoint_url: Some("https://s3.example.com".to_string()),
use_path_style: true,
..Default::default()
};
let result = build_s3_store_uri("s3://cache-bucket", Some(&cfg));
assert!(result.starts_with("s3://cache-bucket?"));
assert!(result.contains("region=eu-west-1"));
assert!(result.contains("endpoint=https%3A%2F%2Fs3.example.com"));
assert!(result.contains("use-path-style=true"));
// Verify params are joined with &
assert_eq!(result.matches('&').count(), 2);
}
}

View file

@ -1,6 +1,6 @@
//! Tests for the queue runner. //! Tests for the queue runner.
//! Nix log parsing tests require no external binaries. //! Nix log parsing tests require no external binaries.
//! Database tests require TEST_DATABASE_URL. //! Database tests require `TEST_DATABASE_URL`.
// Nix log line parsing // Nix log line parsing
@ -65,12 +65,9 @@ fn test_parse_nix_log_empty_line() {
#[tokio::test] #[tokio::test]
async fn test_worker_pool_drain_stops_dispatch() { async fn test_worker_pool_drain_stops_dispatch() {
// Create a minimal worker pool // Create a minimal worker pool
let url = match std::env::var("TEST_DATABASE_URL") { let Ok(url) = std::env::var("TEST_DATABASE_URL") else {
Ok(url) => url,
Err(_) => {
println!("Skipping: TEST_DATABASE_URL not set"); println!("Skipping: TEST_DATABASE_URL not set");
return; return;
},
}; };
let pool = sqlx::postgres::PgPoolOptions::new() let pool = sqlx::postgres::PgPoolOptions::new()
@ -83,7 +80,7 @@ async fn test_worker_pool_drain_stops_dispatch() {
pool, pool,
2, 2,
std::env::temp_dir(), std::env::temp_dir(),
std::time::Duration::from_secs(60), std::time::Duration::from_mins(1),
fc_common::config::LogConfig::default(), fc_common::config::LogConfig::default(),
fc_common::config::GcConfig::default(), fc_common::config::GcConfig::default(),
fc_common::config::NotificationsConfig::default(), fc_common::config::NotificationsConfig::default(),
@ -153,7 +150,7 @@ async fn test_cancellation_token_aborts_select() {
// Simulate a long-running build // Simulate a long-running build
let build_future = async { let build_future = async {
tokio::time::sleep(std::time::Duration::from_secs(60)).await; tokio::time::sleep(std::time::Duration::from_mins(1)).await;
"completed" "completed"
}; };
@ -176,12 +173,9 @@ async fn test_cancellation_token_aborts_select() {
#[tokio::test] #[tokio::test]
async fn test_worker_pool_active_builds_cancel() { async fn test_worker_pool_active_builds_cancel() {
let url = match std::env::var("TEST_DATABASE_URL") { let Ok(url) = std::env::var("TEST_DATABASE_URL") else {
Ok(url) => url,
Err(_) => {
println!("Skipping: TEST_DATABASE_URL not set"); println!("Skipping: TEST_DATABASE_URL not set");
return; return;
},
}; };
let pool = sqlx::postgres::PgPoolOptions::new() let pool = sqlx::postgres::PgPoolOptions::new()
@ -194,7 +188,7 @@ async fn test_worker_pool_active_builds_cancel() {
pool, pool,
2, 2,
std::env::temp_dir(), std::env::temp_dir(),
std::time::Duration::from_secs(60), std::time::Duration::from_mins(1),
fc_common::config::LogConfig::default(), fc_common::config::LogConfig::default(),
fc_common::config::GcConfig::default(), fc_common::config::GcConfig::default(),
fc_common::config::NotificationsConfig::default(), fc_common::config::NotificationsConfig::default(),
@ -228,12 +222,9 @@ async fn test_worker_pool_active_builds_cancel() {
#[tokio::test] #[tokio::test]
async fn test_fair_share_scheduling() { async fn test_fair_share_scheduling() {
let url = match std::env::var("TEST_DATABASE_URL") { let Ok(url) = std::env::var("TEST_DATABASE_URL") else {
Ok(url) => url,
Err(_) => {
println!("Skipping: TEST_DATABASE_URL not set"); println!("Skipping: TEST_DATABASE_URL not set");
return; return;
},
}; };
let pool = sqlx::postgres::PgPoolOptions::new() let pool = sqlx::postgres::PgPoolOptions::new()
@ -447,12 +438,9 @@ async fn test_fair_share_scheduling() {
#[tokio::test] #[tokio::test]
async fn test_atomic_build_claiming() { async fn test_atomic_build_claiming() {
let url = match std::env::var("TEST_DATABASE_URL") { let Ok(url) = std::env::var("TEST_DATABASE_URL") else {
Ok(url) => url,
Err(_) => {
println!("Skipping: TEST_DATABASE_URL not set"); println!("Skipping: TEST_DATABASE_URL not set");
return; return;
},
}; };
let pool = sqlx::postgres::PgPoolOptions::new() let pool = sqlx::postgres::PgPoolOptions::new()
@ -541,12 +529,9 @@ async fn test_atomic_build_claiming() {
#[tokio::test] #[tokio::test]
async fn test_orphan_build_reset() { async fn test_orphan_build_reset() {
let url = match std::env::var("TEST_DATABASE_URL") { let Ok(url) = std::env::var("TEST_DATABASE_URL") else {
Ok(url) => url,
Err(_) => {
println!("Skipping: TEST_DATABASE_URL not set"); println!("Skipping: TEST_DATABASE_URL not set");
return; return;
},
}; };
let pool = sqlx::postgres::PgPoolOptions::new() let pool = sqlx::postgres::PgPoolOptions::new()
@ -647,12 +632,9 @@ async fn test_orphan_build_reset() {
#[tokio::test] #[tokio::test]
async fn test_get_cancelled_among() { async fn test_get_cancelled_among() {
let url = match std::env::var("TEST_DATABASE_URL") { let Ok(url) = std::env::var("TEST_DATABASE_URL") else {
Ok(url) => url,
Err(_) => {
println!("Skipping: TEST_DATABASE_URL not set"); println!("Skipping: TEST_DATABASE_URL not set");
return; return;
},
}; };
let pool = sqlx::postgres::PgPoolOptions::new() let pool = sqlx::postgres::PgPoolOptions::new()

View file

@ -15,6 +15,11 @@ use crate::state::AppState;
/// Write endpoints (POST/PUT/DELETE/PATCH) require a valid key. /// Write endpoints (POST/PUT/DELETE/PATCH) require a valid key.
/// Read endpoints (GET/HEAD/OPTIONS) try to extract optionally (for /// Read endpoints (GET/HEAD/OPTIONS) try to extract optionally (for
/// dashboard admin UI). /// dashboard admin UI).
///
/// # Errors
///
/// Returns unauthorized status if no valid authentication is found for write
/// operations.
pub async fn require_api_key( pub async fn require_api_key(
State(state): State<AppState>, State(state): State<AppState>,
mut request: Request, mut request: Request,
@ -164,6 +169,12 @@ impl FromRequestParts<AppState> for RequireAdmin {
pub struct RequireRoles; pub struct RequireRoles;
impl RequireRoles { impl RequireRoles {
/// Check if the session has one of the allowed roles. Admin always passes.
///
/// # Errors
///
/// Returns unauthorized or forbidden status if authentication fails or role
/// is insufficient.
pub fn check( pub fn check(
extensions: &axum::http::Extensions, extensions: &axum::http::Extensions,
allowed: &[&str], allowed: &[&str],
@ -212,8 +223,9 @@ pub async fn extract_session(
.and_then(|v| v.to_str().ok()) .and_then(|v| v.to_str().ok())
.map(String::from); .map(String::from);
if let Some(ref auth_header) = auth_header { if let Some(ref auth_header) = auth_header
if let Some(token) = auth_header.strip_prefix("Bearer ") { && let Some(token) = auth_header.strip_prefix("Bearer ")
{
use sha2::{Digest, Sha256}; use sha2::{Digest, Sha256};
let mut hasher = Sha256::new(); let mut hasher = Sha256::new();
hasher.update(token.as_bytes()); hasher.update(token.as_bytes());
@ -222,8 +234,18 @@ pub async fn extract_session(
if let Ok(Some(api_key)) = if let Ok(Some(api_key)) =
fc_common::repo::api_keys::get_by_hash(&state.pool, &key_hash).await fc_common::repo::api_keys::get_by_hash(&state.pool, &key_hash).await
{ {
request.extensions_mut().insert(api_key.clone()); // Update last used timestamp asynchronously
let pool = state.pool.clone();
let key_id = api_key.id;
tokio::spawn(async move {
if let Err(e) =
fc_common::repo::api_keys::touch_last_used(&pool, key_id).await
{
tracing::warn!(error = %e, "Failed to update API key last_used timestamp");
} }
});
request.extensions_mut().insert(api_key);
} }
} }
@ -273,9 +295,7 @@ pub async fn extract_session(
} }
fn parse_cookie(header: &str, name: &str) -> Option<String> { fn parse_cookie(header: &str, name: &str) -> Option<String> {
header header.split(';').find_map(|pair| {
.split(';')
.filter_map(|pair| {
let pair = pair.trim(); let pair = pair.trim();
let (k, v) = pair.split_once('=')?; let (k, v) = pair.split_once('=')?;
if k.trim() == name { if k.trim() == name {
@ -284,5 +304,4 @@ fn parse_cookie(header: &str, name: &str) -> Option<String> {
None None
} }
}) })
.next()
} }

View file

@ -96,7 +96,7 @@ async fn system_status(
.await .await
.map_err(|e| ApiError(fc_common::CiError::Database(e)))?; .map_err(|e| ApiError(fc_common::CiError::Database(e)))?;
let stats = fc_common::repo::builds::get_stats(pool) let build_stats = fc_common::repo::builds::get_stats(pool)
.await .await
.map_err(ApiError)?; .map_err(ApiError)?;
let builders = fc_common::repo::remote_builders::count(pool) let builders = fc_common::repo::remote_builders::count(pool)
@ -112,10 +112,10 @@ async fn system_status(
projects_count: projects.0, projects_count: projects.0,
jobsets_count: jobsets.0, jobsets_count: jobsets.0,
evaluations_count: evaluations.0, evaluations_count: evaluations.0,
builds_pending: stats.pending_builds.unwrap_or(0), builds_pending: build_stats.pending_builds.unwrap_or(0),
builds_running: stats.running_builds.unwrap_or(0), builds_running: build_stats.running_builds.unwrap_or(0),
builds_completed: stats.completed_builds.unwrap_or(0), builds_completed: build_stats.completed_builds.unwrap_or(0),
builds_failed: stats.failed_builds.unwrap_or(0), builds_failed: build_stats.failed_builds.unwrap_or(0),
remote_builders: builders, remote_builders: builders,
channels_count: channels.0, channels_count: channels.0,
})) }))

View file

@ -29,11 +29,8 @@ async fn build_badge(
.map_err(ApiError)?; .map_err(ApiError)?;
let jobset = jobsets.iter().find(|j| j.name == jobset_name); let jobset = jobsets.iter().find(|j| j.name == jobset_name);
let jobset = match jobset { let Some(jobset) = jobset else {
Some(j) => j,
None => {
return Ok(shield_svg("build", "not found", "#9f9f9f").into_response()); return Ok(shield_svg("build", "not found", "#9f9f9f").into_response());
},
}; };
// Get latest evaluation // Get latest evaluation
@ -41,13 +38,10 @@ async fn build_badge(
.await .await
.map_err(ApiError)?; .map_err(ApiError)?;
let eval = match eval { let Some(eval) = eval else {
Some(e) => e,
None => {
return Ok( return Ok(
shield_svg("build", "no evaluations", "#9f9f9f").into_response(), shield_svg("build", "no evaluations", "#9f9f9f").into_response(),
); );
},
}; };
// Find the build for this job // Find the build for this job
@ -58,8 +52,7 @@ async fn build_badge(
let build = builds.iter().find(|b| b.job_name == job_name); let build = builds.iter().find(|b| b.job_name == job_name);
let (label, color) = match build { let (label, color) = build.map_or(("not found", "#9f9f9f"), |b| {
Some(b) => {
match b.status { match b.status {
fc_common::BuildStatus::Succeeded => ("passing", "#4c1"), fc_common::BuildStatus::Succeeded => ("passing", "#4c1"),
fc_common::BuildStatus::Failed => ("failing", "#e05d44"), fc_common::BuildStatus::Failed => ("failing", "#e05d44"),
@ -68,21 +61,15 @@ async fn build_badge(
fc_common::BuildStatus::Cancelled => ("cancelled", "#9f9f9f"), fc_common::BuildStatus::Cancelled => ("cancelled", "#9f9f9f"),
fc_common::BuildStatus::DependencyFailed => ("dep failed", "#e05d44"), fc_common::BuildStatus::DependencyFailed => ("dep failed", "#e05d44"),
fc_common::BuildStatus::Aborted => ("aborted", "#9f9f9f"), fc_common::BuildStatus::Aborted => ("aborted", "#9f9f9f"),
fc_common::BuildStatus::FailedWithOutput => { fc_common::BuildStatus::FailedWithOutput => ("failed output", "#e05d44"),
("failed output", "#e05d44")
},
fc_common::BuildStatus::Timeout => ("timeout", "#e05d44"), fc_common::BuildStatus::Timeout => ("timeout", "#e05d44"),
fc_common::BuildStatus::CachedFailure => ("cached fail", "#e05d44"), fc_common::BuildStatus::CachedFailure => ("cached fail", "#e05d44"),
fc_common::BuildStatus::UnsupportedSystem => ("unsupported", "#9f9f9f"), fc_common::BuildStatus::UnsupportedSystem => ("unsupported", "#9f9f9f"),
fc_common::BuildStatus::LogLimitExceeded => ("log limit", "#e05d44"), fc_common::BuildStatus::LogLimitExceeded => ("log limit", "#e05d44"),
fc_common::BuildStatus::NarSizeLimitExceeded => { fc_common::BuildStatus::NarSizeLimitExceeded => ("nar limit", "#e05d44"),
("nar limit", "#e05d44")
},
fc_common::BuildStatus::NonDeterministic => ("non-det", "#e05d44"), fc_common::BuildStatus::NonDeterministic => ("non-det", "#e05d44"),
} }
}, });
None => ("not found", "#9f9f9f"),
};
Ok( Ok(
( (
@ -117,24 +104,16 @@ async fn latest_build(
.map_err(ApiError)?; .map_err(ApiError)?;
let jobset = jobsets.iter().find(|j| j.name == jobset_name); let jobset = jobsets.iter().find(|j| j.name == jobset_name);
let jobset = match jobset { let Some(jobset) = jobset else {
Some(j) => j,
None => {
return Ok((StatusCode::NOT_FOUND, "Jobset not found").into_response()); return Ok((StatusCode::NOT_FOUND, "Jobset not found").into_response());
},
}; };
let eval = fc_common::repo::evaluations::get_latest(&state.pool, jobset.id) let eval = fc_common::repo::evaluations::get_latest(&state.pool, jobset.id)
.await .await
.map_err(ApiError)?; .map_err(ApiError)?;
let eval = match eval { let Some(eval) = eval else {
Some(e) => e, return Ok((StatusCode::NOT_FOUND, "No evaluations found").into_response());
None => {
return Ok(
(StatusCode::NOT_FOUND, "No evaluations found").into_response(),
);
},
}; };
let builds = let builds =
@ -143,10 +122,10 @@ async fn latest_build(
.map_err(ApiError)?; .map_err(ApiError)?;
let build = builds.iter().find(|b| b.job_name == job_name); let build = builds.iter().find(|b| b.job_name == job_name);
match build { build.map_or_else(
Some(b) => Ok(axum::Json(b.clone()).into_response()), || Ok((StatusCode::NOT_FOUND, "Build not found").into_response()),
None => Ok((StatusCode::NOT_FOUND, "Build not found").into_response()), |b| Ok(axum::Json(b.clone()).into_response()),
} )
} }
fn shield_svg(subject: &str, status: &str, color: &str) -> String { fn shield_svg(subject: &str, status: &str, color: &str) -> String {

View file

@ -133,10 +133,10 @@ async fn list_build_products(
async fn build_stats( async fn build_stats(
State(state): State<AppState>, State(state): State<AppState>,
) -> Result<Json<fc_common::BuildStats>, ApiError> { ) -> Result<Json<fc_common::BuildStats>, ApiError> {
let stats = fc_common::repo::builds::get_stats(&state.pool) let build_stats = fc_common::repo::builds::get_stats(&state.pool)
.await .await
.map_err(ApiError)?; .map_err(ApiError)?;
Ok(Json(stats)) Ok(Json(build_stats))
} }
async fn recent_builds( async fn recent_builds(
@ -242,13 +242,10 @@ async fn download_build_product(
}, },
}; };
let stdout = match child.stdout.take() { let Some(stdout) = child.stdout.take() else {
Some(s) => s,
None => {
return Err(ApiError(fc_common::CiError::Build( return Err(ApiError(fc_common::CiError::Build(
"Failed to capture output".to_string(), "Failed to capture output".to_string(),
))); )));
},
}; };
let stream = tokio_util::io::ReaderStream::new(stdout); let stream = tokio_util::io::ReaderStream::new(stdout);

View file

@ -28,7 +28,7 @@ fn first_path_info_entry(
} }
} }
/// Look up a store path by its nix hash, checking both build_products and /// Look up a store path by its nix hash, checking both `build_products` and
/// builds tables. /// builds tables.
async fn find_store_path( async fn find_store_path(
pool: &sqlx::PgPool, pool: &sqlx::PgPool,
@ -64,6 +64,8 @@ async fn narinfo(
State(state): State<AppState>, State(state): State<AppState>,
Path(hash): Path<String>, Path(hash): Path<String>,
) -> Result<Response, ApiError> { ) -> Result<Response, ApiError> {
use std::fmt::Write;
if !state.config.cache.enabled { if !state.config.cache.enabled {
return Ok(StatusCode::NOT_FOUND.into_response()); return Ok(StatusCode::NOT_FOUND.into_response());
} }
@ -97,9 +99,8 @@ async fn narinfo(
Err(_) => return Ok(StatusCode::NOT_FOUND.into_response()), Err(_) => return Ok(StatusCode::NOT_FOUND.into_response()),
}; };
let (entry, path_from_info) = match first_path_info_entry(&parsed) { let Some((entry, path_from_info)) = first_path_info_entry(&parsed) else {
Some(e) => e, return Ok(StatusCode::NOT_FOUND.into_response());
None => return Ok(StatusCode::NOT_FOUND.into_response()),
}; };
let nar_hash = entry.get("narHash").and_then(|v| v.as_str()).unwrap_or(""); let nar_hash = entry.get("narHash").and_then(|v| v.as_str()).unwrap_or("");
@ -132,8 +133,6 @@ async fn narinfo(
let file_hash = nar_hash; let file_hash = nar_hash;
use std::fmt::Write;
let refs_joined = refs.join(" "); let refs_joined = refs.join(" ");
let mut narinfo_text = format!( let mut narinfo_text = format!(
"StorePath: {store_path}\nURL: nar/{hash}.nar.zst\nCompression: \ "StorePath: {store_path}\nURL: nar/{hash}.nar.zst\nCompression: \
@ -142,10 +141,10 @@ async fn narinfo(
); );
if let Some(deriver) = deriver { if let Some(deriver) = deriver {
let _ = write!(narinfo_text, "Deriver: {deriver}\n"); let _ = writeln!(narinfo_text, "Deriver: {deriver}");
} }
if let Some(ca) = ca { if let Some(ca) = ca {
let _ = write!(narinfo_text, "CA: {ca}\n"); let _ = writeln!(narinfo_text, "CA: {ca}");
} }
// Optionally sign if secret key is configured // Optionally sign if secret key is configured
@ -177,9 +176,8 @@ async fn sign_narinfo(narinfo: &str, key_file: &std::path::Path) -> String {
.find(|l| l.starts_with("StorePath: ")) .find(|l| l.starts_with("StorePath: "))
.and_then(|l| l.strip_prefix("StorePath: ")); .and_then(|l| l.strip_prefix("StorePath: "));
let store_path = match store_path { let Some(store_path) = store_path else {
Some(p) => p, return narinfo.to_string();
None => return narinfo.to_string(),
}; };
let output = Command::new("nix") let output = Command::new("nix")
@ -260,9 +258,8 @@ async fn serve_nar_zst(
)) ))
})?; })?;
let nix_stdout = match nix_child.stdout.take() { let Some(nix_stdout) = nix_child.stdout.take() else {
Some(s) => s, return Ok(StatusCode::INTERNAL_SERVER_ERROR.into_response());
None => return Ok(StatusCode::INTERNAL_SERVER_ERROR.into_response()),
}; };
let mut zstd_child = Command::new("zstd") let mut zstd_child = Command::new("zstd")
@ -278,9 +275,8 @@ async fn serve_nar_zst(
)) ))
})?; })?;
let zstd_stdout = match zstd_child.stdout.take() { let Some(zstd_stdout) = zstd_child.stdout.take() else {
Some(s) => s, return Ok(StatusCode::INTERNAL_SERVER_ERROR.into_response());
None => return Ok(StatusCode::INTERNAL_SERVER_ERROR.into_response()),
}; };
let stream = tokio_util::io::ReaderStream::new(zstd_stdout); let stream = tokio_util::io::ReaderStream::new(zstd_stdout);
@ -320,14 +316,12 @@ async fn serve_nar(
.kill_on_drop(true) .kill_on_drop(true)
.spawn(); .spawn();
let mut child = match child { let Ok(mut child) = child else {
Ok(c) => c, return Ok(StatusCode::INTERNAL_SERVER_ERROR.into_response());
Err(_) => return Ok(StatusCode::INTERNAL_SERVER_ERROR.into_response()),
}; };
let stdout = match child.stdout.take() { let Some(stdout) = child.stdout.take() else {
Some(s) => s, return Ok(StatusCode::INTERNAL_SERVER_ERROR.into_response());
None => return Ok(StatusCode::INTERNAL_SERVER_ERROR.into_response()),
}; };
let stream = tokio_util::io::ReaderStream::new(stdout); let stream = tokio_util::io::ReaderStream::new(stdout);
@ -343,7 +337,7 @@ async fn serve_nar(
) )
} }
/// Combined NAR handler — dispatches to zstd or plain based on suffix. /// Dispatches to zstd or plain based on suffix.
/// GET /nix-cache/nar/{hash} where hash includes .nar.zst or .nar suffix /// GET /nix-cache/nar/{hash} where hash includes .nar.zst or .nar suffix
async fn serve_nar_combined( async fn serve_nar_combined(
state: State<AppState>, state: State<AppState>,

View file

@ -63,9 +63,8 @@ async fn create_channel(
// Catch-up: if the jobset already has a completed evaluation, promote now // Catch-up: if the jobset already has a completed evaluation, promote now
if let Ok(Some(eval)) = if let Ok(Some(eval)) =
fc_common::repo::evaluations::get_latest(&state.pool, jobset_id).await fc_common::repo::evaluations::get_latest(&state.pool, jobset_id).await
{ && eval.status == fc_common::models::EvaluationStatus::Completed
if eval.status == fc_common::models::EvaluationStatus::Completed { && let Err(e) = fc_common::repo::channels::auto_promote_if_complete(
if let Err(e) = fc_common::repo::channels::auto_promote_if_complete(
&state.pool, &state.pool,
jobset_id, jobset_id,
eval.id, eval.id,
@ -74,8 +73,6 @@ async fn create_channel(
{ {
tracing::warn!(jobset_id = %jobset_id, "Failed to auto-promote channel: {e}"); tracing::warn!(jobset_id = %jobset_id, "Failed to auto-promote channel: {e}");
} }
}
}
// Re-fetch to include any promotion // Re-fetch to include any promotion
let channel = fc_common::repo::channels::get(&state.pool, channel.id) let channel = fc_common::repo::channels::get(&state.pool, channel.id)
@ -159,13 +156,12 @@ async fn nixexprs_tarball(
let _ = writeln!(nix_src, "in {{"); let _ = writeln!(nix_src, "in {{");
for build in &succeeded { for build in &succeeded {
let output_path = match &build.build_output_path { let Some(output_path) = &build.build_output_path else {
Some(p) => p, continue;
None => continue,
}; };
let system = build.system.as_deref().unwrap_or("x86_64-linux"); let system = build.system.as_deref().unwrap_or("x86_64-linux");
// Sanitize job_name for use as a Nix attribute (replace dots/slashes) // Sanitize job_name for use as a Nix attribute (replace dots/slashes)
let attr_name = build.job_name.replace('.', "-").replace('/', "-"); let attr_name = build.job_name.replace(['.', '/'], "-");
let _ = writeln!( let _ = writeln!(
nix_src, nix_src,
" \"{attr_name}\" = mkFakeDerivation {{ name = \"{}\"; system = \ " \"{attr_name}\" = mkFakeDerivation {{ name = \"{}\"; system = \

View file

@ -46,7 +46,7 @@ struct BuildView {
log_url: String, log_url: String,
} }
/// Enhanced build view for queue page with elapsed time and builder info /// Queue page build info with elapsed time and builder details
struct QueueBuildView { struct QueueBuildView {
id: Uuid, id: Uuid,
job_name: String, job_name: String,
@ -379,7 +379,7 @@ struct ChannelsTemplate {
channels: Vec<Channel>, channels: Vec<Channel>,
} }
/// Enhanced builder view with load and activity info /// Builder info with load and activity metrics
struct BuilderView { struct BuilderView {
id: Uuid, id: Uuid,
name: String, name: String,
@ -455,7 +455,7 @@ async fn home(
State(state): State<AppState>, State(state): State<AppState>,
extensions: Extensions, extensions: Extensions,
) -> Html<String> { ) -> Html<String> {
let stats = fc_common::repo::builds::get_stats(&state.pool) let build_stats = fc_common::repo::builds::get_stats(&state.pool)
.await .await
.unwrap_or_default(); .unwrap_or_default();
let builds = fc_common::repo::builds::list_recent(&state.pool, 10) let builds = fc_common::repo::builds::list_recent(&state.pool, 10)
@ -499,13 +499,13 @@ async fn home(
last_eval = Some(e); last_eval = Some(e);
} }
} }
let (status, class, time) = match &last_eval { let (status, class, time) = last_eval.as_ref().map_or_else(
Some(e) => { || ("-".into(), "pending".into(), "-".into()),
|e| {
let (t, c) = eval_badge(&e.status); let (t, c) = eval_badge(&e.status);
(t, c, e.evaluation_time.format("%Y-%m-%d %H:%M").to_string()) (t, c, e.evaluation_time.format("%Y-%m-%d %H:%M").to_string())
}, },
None => ("-".into(), "pending".into(), "-".into()), );
};
project_summaries.push(ProjectSummaryView { project_summaries.push(ProjectSummaryView {
id: p.id, id: p.id,
name: p.name.clone(), name: p.name.clone(),
@ -517,11 +517,11 @@ async fn home(
} }
let tmpl = HomeTemplate { let tmpl = HomeTemplate {
total_builds: stats.total_builds.unwrap_or(0), total_builds: build_stats.total_builds.unwrap_or(0),
completed_builds: stats.completed_builds.unwrap_or(0), completed_builds: build_stats.completed_builds.unwrap_or(0),
failed_builds: stats.failed_builds.unwrap_or(0), failed_builds: build_stats.failed_builds.unwrap_or(0),
running_builds: stats.running_builds.unwrap_or(0), running_builds: build_stats.running_builds.unwrap_or(0),
pending_builds: stats.pending_builds.unwrap_or(0), pending_builds: build_stats.pending_builds.unwrap_or(0),
recent_builds: builds.iter().map(build_view).collect(), recent_builds: builds.iter().map(build_view).collect(),
recent_evals: evals.iter().map(eval_view).collect(), recent_evals: evals.iter().map(eval_view).collect(),
projects: project_summaries, projects: project_summaries,
@ -581,9 +581,9 @@ async fn project_page(
Path(id): Path<Uuid>, Path(id): Path<Uuid>,
extensions: Extensions, extensions: Extensions,
) -> Html<String> { ) -> Html<String> {
let project = match fc_common::repo::projects::get(&state.pool, id).await { let Ok(project) = fc_common::repo::projects::get(&state.pool, id).await
Ok(p) => p, else {
Err(_) => return Html("Project not found".to_string()), return Html("Project not found".to_string());
}; };
let jobsets = let jobsets =
fc_common::repo::jobsets::list_for_project(&state.pool, id, 100, 0) fc_common::repo::jobsets::list_for_project(&state.pool, id, 100, 0)
@ -604,7 +604,7 @@ async fn project_page(
.unwrap_or_default(); .unwrap_or_default();
evals.append(&mut js_evals); evals.append(&mut js_evals);
} }
evals.sort_by(|a, b| b.evaluation_time.cmp(&a.evaluation_time)); evals.sort_by_key(|e| std::cmp::Reverse(e.evaluation_time));
evals.truncate(10); evals.truncate(10);
let tmpl = ProjectTemplate { let tmpl = ProjectTemplate {
@ -625,18 +625,13 @@ async fn jobset_page(
State(state): State<AppState>, State(state): State<AppState>,
Path(id): Path<Uuid>, Path(id): Path<Uuid>,
) -> Html<String> { ) -> Html<String> {
let jobset = match fc_common::repo::jobsets::get(&state.pool, id).await { let Ok(jobset) = fc_common::repo::jobsets::get(&state.pool, id).await else {
Ok(j) => j, return Html("Jobset not found".to_string());
Err(_) => return Html("Jobset not found".to_string()),
}; };
let project = match fc_common::repo::projects::get( let Ok(project) =
&state.pool, fc_common::repo::projects::get(&state.pool, jobset.project_id).await
jobset.project_id, else {
) return Html("Project not found".to_string());
.await
{
Ok(p) => p,
Err(_) => return Html("Project not found".to_string()),
}; };
let evals = fc_common::repo::evaluations::list_filtered( let evals = fc_common::repo::evaluations::list_filtered(
@ -769,24 +764,20 @@ async fn evaluation_page(
State(state): State<AppState>, State(state): State<AppState>,
Path(id): Path<Uuid>, Path(id): Path<Uuid>,
) -> Html<String> { ) -> Html<String> {
let eval = match fc_common::repo::evaluations::get(&state.pool, id).await { let Ok(eval) = fc_common::repo::evaluations::get(&state.pool, id).await
Ok(e) => e, else {
Err(_) => return Html("Evaluation not found".to_string()), return Html("Evaluation not found".to_string());
}; };
let jobset = let Ok(jobset) =
match fc_common::repo::jobsets::get(&state.pool, eval.jobset_id).await { fc_common::repo::jobsets::get(&state.pool, eval.jobset_id).await
Ok(j) => j, else {
Err(_) => return Html("Jobset not found".to_string()), return Html("Jobset not found".to_string());
}; };
let project = match fc_common::repo::projects::get( let Ok(project) =
&state.pool, fc_common::repo::projects::get(&state.pool, jobset.project_id).await
jobset.project_id, else {
) return Html("Project not found".to_string());
.await
{
Ok(p) => p,
Err(_) => return Html("Project not found".to_string()),
}; };
let builds = fc_common::repo::builds::list_filtered( let builds = fc_common::repo::builds::list_filtered(
@ -919,31 +910,24 @@ async fn build_page(
State(state): State<AppState>, State(state): State<AppState>,
Path(id): Path<Uuid>, Path(id): Path<Uuid>,
) -> Html<String> { ) -> Html<String> {
let build = match fc_common::repo::builds::get(&state.pool, id).await { let Ok(build) = fc_common::repo::builds::get(&state.pool, id).await else {
Ok(b) => b, return Html("Build not found".to_string());
Err(_) => return Html("Build not found".to_string()),
}; };
let eval = let Ok(eval) =
match fc_common::repo::evaluations::get(&state.pool, build.evaluation_id) fc_common::repo::evaluations::get(&state.pool, build.evaluation_id).await
.await else {
{ return Html("Evaluation not found".to_string());
Ok(e) => e,
Err(_) => return Html("Evaluation not found".to_string()),
}; };
let jobset = let Ok(jobset) =
match fc_common::repo::jobsets::get(&state.pool, eval.jobset_id).await { fc_common::repo::jobsets::get(&state.pool, eval.jobset_id).await
Ok(j) => j, else {
Err(_) => return Html("Jobset not found".to_string()), return Html("Jobset not found".to_string());
}; };
let project = match fc_common::repo::projects::get( let Ok(project) =
&state.pool, fc_common::repo::projects::get(&state.pool, jobset.project_id).await
jobset.project_id, else {
) return Html("Project not found".to_string());
.await
{
Ok(p) => p,
Err(_) => return Html("Project not found".to_string()),
}; };
let eval_commit_short = if eval.commit_hash.len() > 12 { let eval_commit_short = if eval.commit_hash.len() > 12 {
@ -1016,12 +1000,10 @@ async fn queue_page(State(state): State<AppState>) -> Html<String> {
let running_builds: Vec<QueueBuildView> = running let running_builds: Vec<QueueBuildView> = running
.iter() .iter()
.map(|b| { .map(|b| {
let elapsed = if let Some(started) = b.started_at { let elapsed = b.started_at.map_or_else(String::new, |started| {
let dur = chrono::Utc::now() - started; let dur = chrono::Utc::now() - started;
format_elapsed(dur.num_seconds()) format_elapsed(dur.num_seconds())
} else { });
String::new()
};
let builder_name = let builder_name =
b.builder_id.and_then(|id| builder_map.get(&id).cloned()); b.builder_id.and_then(|id| builder_map.get(&id).cloned());
QueueBuildView { QueueBuildView {
@ -1114,7 +1096,7 @@ async fn admin_page(
.fetch_one(pool) .fetch_one(pool)
.await .await
.unwrap_or((0,)); .unwrap_or((0,));
let stats = fc_common::repo::builds::get_stats(pool) let build_stats = fc_common::repo::builds::get_stats(pool)
.await .await
.unwrap_or_default(); .unwrap_or_default();
let builders_count = fc_common::repo::remote_builders::count(pool) let builders_count = fc_common::repo::remote_builders::count(pool)
@ -1129,10 +1111,10 @@ async fn admin_page(
projects_count: projects.0, projects_count: projects.0,
jobsets_count: jobsets.0, jobsets_count: jobsets.0,
evaluations_count: evaluations.0, evaluations_count: evaluations.0,
builds_pending: stats.pending_builds.unwrap_or(0), builds_pending: build_stats.pending_builds.unwrap_or(0),
builds_running: stats.running_builds.unwrap_or(0), builds_running: build_stats.running_builds.unwrap_or(0),
builds_completed: stats.completed_builds.unwrap_or(0), builds_completed: build_stats.completed_builds.unwrap_or(0),
builds_failed: stats.failed_builds.unwrap_or(0), builds_failed: build_stats.failed_builds.unwrap_or(0),
remote_builders: builders_count, remote_builders: builders_count,
channels_count: channels.0, channels_count: channels.0,
}; };
@ -1277,17 +1259,10 @@ async fn login_action(
created_at: std::time::Instant::now(), created_at: std::time::Instant::now(),
}); });
let secure_flag = if !state.config.server.cors_permissive let security_flags =
&& state.config.server.host != "127.0.0.1" crate::routes::cookie_security_flags(&state.config.server);
&& state.config.server.host != "localhost"
{
"; Secure"
} else {
""
};
let cookie = format!( let cookie = format!(
"fc_user_session={session_id}; HttpOnly; SameSite=Strict; Path=/; \ "fc_user_session={session_id}; {security_flags}; Path=/; Max-Age=86400"
Max-Age=86400{secure_flag}"
); );
return ( return (
[(axum::http::header::SET_COOKIE, cookie)], [(axum::http::header::SET_COOKIE, cookie)],
@ -1341,17 +1316,10 @@ async fn login_action(
created_at: std::time::Instant::now(), created_at: std::time::Instant::now(),
}); });
let secure_flag = if !state.config.server.cors_permissive let security_flags =
&& state.config.server.host != "127.0.0.1" crate::routes::cookie_security_flags(&state.config.server);
&& state.config.server.host != "localhost"
{
"; Secure"
} else {
""
};
let cookie = format!( let cookie = format!(
"fc_session={session_id}; HttpOnly; SameSite=Strict; Path=/; \ "fc_session={session_id}; {security_flags}; Path=/; Max-Age=86400"
Max-Age=86400{secure_flag}"
); );
( (
[(axum::http::header::SET_COOKIE, cookie)], [(axum::http::header::SET_COOKIE, cookie)],
@ -1395,9 +1363,7 @@ async fn logout_action(
.and_then(|v| v.to_str().ok()) .and_then(|v| v.to_str().ok())
{ {
// Check for user session // Check for user session
if let Some(session_id) = cookie_header if let Some(session_id) = cookie_header.split(';').find_map(|pair| {
.split(';')
.filter_map(|pair| {
let pair = pair.trim(); let pair = pair.trim();
let (k, v) = pair.split_once('=')?; let (k, v) = pair.split_once('=')?;
if k.trim() == "fc_user_session" { if k.trim() == "fc_user_session" {
@ -1405,16 +1371,12 @@ async fn logout_action(
} else { } else {
None None
} }
}) }) {
.next()
{
state.sessions.remove(&session_id); state.sessions.remove(&session_id);
} }
// Check for legacy API key session // Check for legacy API key session
if let Some(session_id) = cookie_header if let Some(session_id) = cookie_header.split(';').find_map(|pair| {
.split(';')
.filter_map(|pair| {
let pair = pair.trim(); let pair = pair.trim();
let (k, v) = pair.split_once('=')?; let (k, v) = pair.split_once('=')?;
if k.trim() == "fc_session" { if k.trim() == "fc_session" {
@ -1422,9 +1384,7 @@ async fn logout_action(
} else { } else {
None None
} }
}) }) {
.next()
{
state.sessions.remove(&session_id); state.sessions.remove(&session_id);
} }
} }
@ -1570,12 +1530,13 @@ async fn starred_page(
Vec::new() Vec::new()
}; };
if let Some(build) = builds.first() { builds.first().map_or_else(
|| ("No builds".to_string(), "pending".to_string(), None),
|build| {
let (text, class) = status_badge(&build.status); let (text, class) = status_badge(&build.status);
(text, class, Some(build.id)) (text, class, Some(build.id))
} else { },
("No builds".to_string(), "pending".to_string(), None) )
}
} else { } else {
("No builds".to_string(), "pending".to_string(), None) ("No builds".to_string(), "pending".to_string(), None)
}; };

View file

@ -93,7 +93,7 @@ async fn stream_build_log(
if active_path.exists() { active_path.clone() } else { final_path.clone() } if active_path.exists() { active_path.clone() } else { final_path.clone() }
}; };
let file = if let Ok(f) = tokio::fs::File::open(&path).await { f } else { let Ok(file) = tokio::fs::File::open(&path).await else {
yield Ok(Event::default().data("Failed to open log file")); yield Ok(Event::default().data("Failed to open log file"));
return; return;
}; };
@ -106,7 +106,7 @@ async fn stream_build_log(
line.clear(); line.clear();
match reader.read_line(&mut line).await { match reader.read_line(&mut line).await {
Ok(0) => { Ok(0) => {
// EOF check if build is still running // EOF - check if build is still running
consecutive_empty += 1; consecutive_empty += 1;
if consecutive_empty > 5 { if consecutive_empty > 5 {
// Check build status // Check build status

View file

@ -21,11 +21,11 @@ struct TimeseriesQuery {
bucket: i32, bucket: i32,
} }
fn default_hours() -> i32 { const fn default_hours() -> i32 {
24 24
} }
fn default_bucket() -> i32 { const fn default_bucket() -> i32 {
60 60
} }
@ -64,21 +64,19 @@ fn escape_prometheus_label(s: &str) -> String {
} }
async fn prometheus_metrics(State(state): State<AppState>) -> Response { async fn prometheus_metrics(State(state): State<AppState>) -> Response {
let stats = match fc_common::repo::builds::get_stats(&state.pool).await { use std::fmt::Write;
Ok(s) => s,
Err(_) => { let Ok(build_stats) = fc_common::repo::builds::get_stats(&state.pool).await
else {
return StatusCode::INTERNAL_SERVER_ERROR.into_response(); return StatusCode::INTERNAL_SERVER_ERROR.into_response();
},
}; };
let eval_count: i64 = let eval_count: i64 =
match sqlx::query_as::<_, (i64,)>("SELECT COUNT(*) FROM evaluations") sqlx::query_as::<_, (i64,)>("SELECT COUNT(*) FROM evaluations")
.fetch_one(&state.pool) .fetch_one(&state.pool)
.await .await
{ .ok()
Ok(row) => row.0, .map_or(0, |row| row.0);
Err(_) => 0,
};
let eval_by_status: Vec<(String, i64)> = sqlx::query_as( let eval_by_status: Vec<(String, i64)> = sqlx::query_as(
"SELECT status::text, COUNT(*) FROM evaluations GROUP BY status", "SELECT status::text, COUNT(*) FROM evaluations GROUP BY status",
@ -124,8 +122,6 @@ async fn prometheus_metrics(State(state): State<AppState>) -> Response {
.await .await
.unwrap_or((None, None, None)); .unwrap_or((None, None, None));
use std::fmt::Write;
let mut output = String::with_capacity(2048); let mut output = String::with_capacity(2048);
// Build counts by status // Build counts by status
@ -134,27 +130,27 @@ async fn prometheus_metrics(State(state): State<AppState>) -> Response {
let _ = writeln!( let _ = writeln!(
output, output,
"fc_builds_total{{status=\"succeeded\"}} {}", "fc_builds_total{{status=\"succeeded\"}} {}",
stats.completed_builds.unwrap_or(0) build_stats.completed_builds.unwrap_or(0)
); );
let _ = writeln!( let _ = writeln!(
output, output,
"fc_builds_total{{status=\"failed\"}} {}", "fc_builds_total{{status=\"failed\"}} {}",
stats.failed_builds.unwrap_or(0) build_stats.failed_builds.unwrap_or(0)
); );
let _ = writeln!( let _ = writeln!(
output, output,
"fc_builds_total{{status=\"running\"}} {}", "fc_builds_total{{status=\"running\"}} {}",
stats.running_builds.unwrap_or(0) build_stats.running_builds.unwrap_or(0)
); );
let _ = writeln!( let _ = writeln!(
output, output,
"fc_builds_total{{status=\"pending\"}} {}", "fc_builds_total{{status=\"pending\"}} {}",
stats.pending_builds.unwrap_or(0) build_stats.pending_builds.unwrap_or(0)
); );
let _ = writeln!( let _ = writeln!(
output, output,
"fc_builds_total{{status=\"all\"}} {}", "fc_builds_total{{status=\"all\"}} {}",
stats.total_builds.unwrap_or(0) build_stats.total_builds.unwrap_or(0)
); );
// Build duration stats // Build duration stats
@ -166,7 +162,7 @@ async fn prometheus_metrics(State(state): State<AppState>) -> Response {
let _ = writeln!( let _ = writeln!(
output, output,
"fc_builds_avg_duration_seconds {:.2}", "fc_builds_avg_duration_seconds {:.2}",
stats.avg_duration_seconds.unwrap_or(0.0) build_stats.avg_duration_seconds.unwrap_or(0.0)
); );
output.push_str( output.push_str(
@ -214,7 +210,7 @@ async fn prometheus_metrics(State(state): State<AppState>) -> Response {
let _ = writeln!( let _ = writeln!(
output, output,
"fc_queue_depth {}", "fc_queue_depth {}",
stats.pending_builds.unwrap_or(0) build_stats.pending_builds.unwrap_or(0)
); );
// Infrastructure // Infrastructure

View file

@ -43,8 +43,37 @@ use crate::{
static STYLE_CSS: &str = include_str!("../../static/style.css"); static STYLE_CSS: &str = include_str!("../../static/style.css");
/// Helper to generate secure cookie flags based on server configuration.
/// Returns a string containing cookie security attributes: `HttpOnly`,
/// `SameSite`, and optionally Secure.
///
/// The Secure flag is set when:
///
/// 1. `force_secure_cookies` is enabled in config (for HTTPS reverse proxies),
/// 2. OR the server is not bound to localhost/127.0.0.1 AND not in permissive
/// mode
#[must_use]
pub fn cookie_security_flags(
config: &fc_common::config::ServerConfig,
) -> String {
let is_localhost = config.host == "127.0.0.1"
|| config.host == "localhost"
|| config.host == "::1";
let secure_flag = if config.force_secure_cookies
|| (!is_localhost && !config.cors_permissive)
{
"; Secure"
} else {
""
};
format!("HttpOnly; SameSite=Strict{secure_flag}")
}
struct RateLimitState { struct RateLimitState {
requests: DashMap<IpAddr, Vec<Instant>>, requests: DashMap<IpAddr, Vec<Instant>>,
rps: u64,
burst: u32, burst: u32,
last_cleanup: std::sync::atomic::AtomicU64, last_cleanup: std::sync::atomic::AtomicU64,
} }
@ -89,10 +118,23 @@ async fn rate_limit_middleware(
let mut entry = rl.requests.entry(ip).or_default(); let mut entry = rl.requests.entry(ip).or_default();
entry.retain(|t| now.duration_since(*t) < window); entry.retain(|t| now.duration_since(*t) < window);
if entry.len() >= rl.burst as usize { // Token bucket algorithm: allow burst, then enforce rps limit
let request_count = entry.len();
if request_count >= rl.burst as usize {
return StatusCode::TOO_MANY_REQUESTS.into_response(); return StatusCode::TOO_MANY_REQUESTS.into_response();
} }
// If within burst but need to check rate, ensure we don't exceed rps
if request_count >= rl.rps as usize {
// Check if oldest request in window is still within the rps constraint
if let Some(oldest) = entry.first() {
let elapsed = now.duration_since(*oldest);
if elapsed < window {
return StatusCode::TOO_MANY_REQUESTS.into_response();
}
}
}
entry.push(now); entry.push(now);
drop(entry); drop(entry);
} }
@ -176,11 +218,12 @@ pub fn router(state: AppState, config: &ServerConfig) -> Router {
)); ));
// Add rate limiting if configured // Add rate limiting if configured
if let (Some(_rps), Some(burst)) = if let (Some(rps), Some(burst)) =
(config.rate_limit_rps, config.rate_limit_burst) (config.rate_limit_rps, config.rate_limit_burst)
{ {
let rl_state = Arc::new(RateLimitState { let rl_state = Arc::new(RateLimitState {
requests: DashMap::new(), requests: DashMap::new(),
rps,
burst, burst,
last_cleanup: std::sync::atomic::AtomicU64::new(0), last_cleanup: std::sync::atomic::AtomicU64::new(0),
}); });

View file

@ -89,12 +89,9 @@ fn build_github_client(config: &GitHubOAuthConfig) -> GitHubOAuthClient {
} }
async fn github_login(State(state): State<AppState>) -> impl IntoResponse { async fn github_login(State(state): State<AppState>) -> impl IntoResponse {
let config = match &state.config.oauth.github { let Some(config) = &state.config.oauth.github else {
Some(c) => c,
None => {
return (StatusCode::NOT_FOUND, "GitHub OAuth not configured") return (StatusCode::NOT_FOUND, "GitHub OAuth not configured")
.into_response(); .into_response();
},
}; };
let client = build_github_client(config); let client = build_github_client(config);
@ -105,16 +102,26 @@ async fn github_login(State(state): State<AppState>) -> impl IntoResponse {
.url(); .url();
// Store CSRF token in a cookie for verification // Store CSRF token in a cookie for verification
// Add Secure flag when using HTTPS (detected via redirect_uri) // Use SameSite=Lax for OAuth flow (must work across redirect)
let secure_flag = if config.redirect_uri.starts_with("https://") { let security_flags = {
let is_localhost = config.redirect_uri.starts_with("http://localhost")
|| config.redirect_uri.starts_with("http://127.0.0.1");
let secure_flag = if state.config.server.force_secure_cookies
|| (!is_localhost && config.redirect_uri.starts_with("https://"))
{
"; Secure" "; Secure"
} else { } else {
"" ""
}; };
format!("HttpOnly; SameSite=Lax{secure_flag}")
};
let cookie = format!( let cookie = format!(
"fc_oauth_state={}; HttpOnly; SameSite=Lax; Path=/; Max-Age=600{}", "fc_oauth_state={}; {}; Path=/; Max-Age=600",
csrf_token.secret(), csrf_token.secret(),
secure_flag security_flags
); );
Response::builder() Response::builder()
@ -131,13 +138,10 @@ async fn github_callback(
headers: axum::http::HeaderMap, headers: axum::http::HeaderMap,
Query(params): Query<OAuthCallbackParams>, Query(params): Query<OAuthCallbackParams>,
) -> Result<impl IntoResponse, ApiError> { ) -> Result<impl IntoResponse, ApiError> {
let config = match &state.config.oauth.github { let Some(config) = &state.config.oauth.github else {
Some(c) => c,
None => {
return Err(ApiError(fc_common::CiError::NotFound( return Err(ApiError(fc_common::CiError::NotFound(
"GitHub OAuth not configured".to_string(), "GitHub OAuth not configured".to_string(),
))); )));
},
}; };
// Verify CSRF token from cookie // Verify CSRF token from cookie
@ -263,20 +267,29 @@ async fn github_callback(
.map_err(ApiError)?; .map_err(ApiError)?;
// Clear OAuth state cookie and set session cookie // Clear OAuth state cookie and set session cookie
// Add Secure flag when using HTTPS (detected via redirect_uri) // Use SameSite=Lax for OAuth callback (must work across redirect)
let secure_flag = if config.redirect_uri.starts_with("https://") { let security_flags = {
let is_localhost = config.redirect_uri.starts_with("http://localhost")
|| config.redirect_uri.starts_with("http://127.0.0.1");
let secure_flag = if state.config.server.force_secure_cookies
|| (!is_localhost && config.redirect_uri.starts_with("https://"))
{
"; Secure" "; Secure"
} else { } else {
"" ""
}; };
let clear_state = format!(
"fc_oauth_state=; HttpOnly; SameSite=Lax; Path=/; Max-Age=0{secure_flag}" format!("HttpOnly; SameSite=Lax{secure_flag}")
); };
let clear_state =
format!("fc_oauth_state=; {security_flags}; Path=/; Max-Age=0");
let session_cookie = format!( let session_cookie = format!(
"fc_user_session={}; HttpOnly; SameSite=Lax; Path=/; Max-Age={}{}", "fc_user_session={}; {}; Path=/; Max-Age={}",
session.0, session.0,
7 * 24 * 60 * 60, // 7 days security_flags,
secure_flag 7 * 24 * 60 * 60 // 7 days
); );
Ok( Ok(
@ -352,21 +365,21 @@ mod tests {
fn test_secure_flag_detection() { fn test_secure_flag_detection() {
// HTTP should not have Secure flag // HTTP should not have Secure flag
let http_uri = "http://localhost:3000/callback"; let http_uri = "http://localhost:3000/callback";
let secure_flag = if http_uri.starts_with("https://") { let http_secure_flag = if http_uri.starts_with("https://") {
"; Secure" "; Secure"
} else { } else {
"" ""
}; };
assert_eq!(secure_flag, ""); assert_eq!(http_secure_flag, "");
// HTTPS should have Secure flag // HTTPS should have Secure flag
let https_uri = "https://example.com/callback"; let https_uri = "https://example.com/callback";
let secure_flag = if https_uri.starts_with("https://") { let https_secure_flag = if https_uri.starts_with("https://") {
"; Secure" "; Secure"
} else { } else {
"" ""
}; };
assert_eq!(secure_flag, "; Secure"); assert_eq!(https_secure_flag, "; Secure");
} }
#[test] #[test]
@ -418,7 +431,7 @@ mod tests {
#[test] #[test]
fn test_github_emails_find_primary_verified() { fn test_github_emails_find_primary_verified() {
let emails = vec![ let emails = [
GitHubEmailResponse { GitHubEmailResponse {
email: "secondary@example.com".to_string(), email: "secondary@example.com".to_string(),
primary: false, primary: false,
@ -448,7 +461,7 @@ mod tests {
#[test] #[test]
fn test_github_emails_fallback_to_verified() { fn test_github_emails_fallback_to_verified() {
// No primary email, should fall back to first verified // No primary email, should fall back to first verified
let emails = vec![ let emails = [
GitHubEmailResponse { GitHubEmailResponse {
email: "unverified@example.com".to_string(), email: "unverified@example.com".to_string(),
primary: false, primary: false,
@ -473,7 +486,7 @@ mod tests {
#[test] #[test]
fn test_github_emails_no_verified() { fn test_github_emails_no_verified() {
// No verified emails // No verified emails
let emails = vec![GitHubEmailResponse { let emails = [GitHubEmailResponse {
email: "unverified@example.com".to_string(), email: "unverified@example.com".to_string(),
primary: true, primary: true,
verified: false, verified: false,
@ -521,8 +534,8 @@ mod tests {
let max_age = 7 * 24 * 60 * 60; let max_age = 7 * 24 * 60 * 60;
let cookie = format!( let cookie = format!(
"fc_user_session={}; HttpOnly; SameSite=Lax; Path=/; Max-Age={}{}", "fc_user_session={session_token}; HttpOnly; SameSite=Lax; Path=/; \
session_token, max_age, secure_flag Max-Age={max_age}{secure_flag}"
); );
assert!(cookie.contains("fc_user_session=test-session-token")); assert!(cookie.contains("fc_user_session=test-session-token"));

View file

@ -159,9 +159,7 @@ async fn handle_github_webhook(
.await .await
.map_err(ApiError)?; .map_err(ApiError)?;
let webhook_config = match webhook_config { let Some(webhook_config) = webhook_config else {
Some(c) => c,
None => {
return Ok(( return Ok((
StatusCode::NOT_FOUND, StatusCode::NOT_FOUND,
Json(WebhookResponse { Json(WebhookResponse {
@ -169,7 +167,6 @@ async fn handle_github_webhook(
message: "No GitHub webhook configured for this project".to_string(), message: "No GitHub webhook configured for this project".to_string(),
}), }),
)); ));
},
}; };
// Verify signature if secret is configured // Verify signature if secret is configured
@ -299,9 +296,7 @@ async fn handle_github_pull_request(
)); ));
} }
let pr = match payload.pull_request { let Some(pr) = payload.pull_request else {
Some(pr) => pr,
None => {
return Ok(( return Ok((
StatusCode::OK, StatusCode::OK,
Json(WebhookResponse { Json(WebhookResponse {
@ -309,7 +304,6 @@ async fn handle_github_pull_request(
message: "No pull request data, skipping".to_string(), message: "No pull request data, skipping".to_string(),
}), }),
)); ));
},
}; };
// Skip draft PRs // Skip draft PRs
@ -513,6 +507,8 @@ async fn handle_gitlab_webhook(
headers: HeaderMap, headers: HeaderMap,
body: Bytes, body: Bytes,
) -> Result<(StatusCode, Json<WebhookResponse>), ApiError> { ) -> Result<(StatusCode, Json<WebhookResponse>), ApiError> {
use subtle::ConstantTimeEq;
// Check webhook config exists // Check webhook config exists
let webhook_config = repo::webhook_configs::get_by_project_and_forge( let webhook_config = repo::webhook_configs::get_by_project_and_forge(
&state.pool, &state.pool,
@ -522,9 +518,7 @@ async fn handle_gitlab_webhook(
.await .await
.map_err(ApiError)?; .map_err(ApiError)?;
let webhook_config = match webhook_config { let Some(webhook_config) = webhook_config else {
Some(c) => c,
None => {
return Ok(( return Ok((
StatusCode::NOT_FOUND, StatusCode::NOT_FOUND,
Json(WebhookResponse { Json(WebhookResponse {
@ -532,7 +526,6 @@ async fn handle_gitlab_webhook(
message: "No GitLab webhook configured for this project".to_string(), message: "No GitLab webhook configured for this project".to_string(),
}), }),
)); ));
},
}; };
// Verify token if secret is configured // Verify token if secret is configured
@ -544,7 +537,6 @@ async fn handle_gitlab_webhook(
.unwrap_or(""); .unwrap_or("");
// Use constant-time comparison to prevent timing attacks // Use constant-time comparison to prevent timing attacks
use subtle::ConstantTimeEq;
let token_matches = token.len() == secret.len() let token_matches = token.len() == secret.len()
&& token.as_bytes().ct_eq(secret.as_bytes()).into(); && token.as_bytes().ct_eq(secret.as_bytes()).into();
@ -656,9 +648,7 @@ async fn handle_gitlab_merge_request(
))) )))
})?; })?;
let attrs = match payload.object_attributes { let Some(attrs) = payload.object_attributes else {
Some(a) => a,
None => {
return Ok(( return Ok((
StatusCode::OK, StatusCode::OK,
Json(WebhookResponse { Json(WebhookResponse {
@ -666,7 +656,6 @@ async fn handle_gitlab_merge_request(
message: "No merge request attributes, skipping".to_string(), message: "No merge request attributes, skipping".to_string(),
}), }),
)); ));
},
}; };
// Skip draft/WIP merge requests // Skip draft/WIP merge requests
@ -774,12 +763,13 @@ mod tests {
#[test] #[test]
fn test_verify_signature_valid() { fn test_verify_signature_valid() {
use hmac::{Hmac, Mac};
use sha2::Sha256;
let secret = "test-secret"; let secret = "test-secret";
let body = b"test-body"; let body = b"test-body";
// Compute expected signature // Compute expected signature
use hmac::{Hmac, Mac};
use sha2::Sha256;
let mut mac = Hmac::<Sha256>::new_from_slice(secret.as_bytes()).unwrap(); let mut mac = Hmac::<Sha256>::new_from_slice(secret.as_bytes()).unwrap();
mac.update(body); mac.update(body);
let expected = hex::encode(mac.finalize().into_bytes()); let expected = hex::encode(mac.finalize().into_bytes());
@ -787,7 +777,7 @@ mod tests {
assert!(verify_signature( assert!(verify_signature(
secret, secret,
body, body,
&format!("sha256={}", expected) &format!("sha256={expected}")
)); ));
} }
@ -800,20 +790,16 @@ mod tests {
#[test] #[test]
fn test_verify_signature_wrong_secret() { fn test_verify_signature_wrong_secret() {
let body = b"test-body";
use hmac::{Hmac, Mac}; use hmac::{Hmac, Mac};
use sha2::Sha256; use sha2::Sha256;
let body = b"test-body";
let mut mac = Hmac::<Sha256>::new_from_slice(b"secret1").unwrap(); let mut mac = Hmac::<Sha256>::new_from_slice(b"secret1").unwrap();
mac.update(body); mac.update(body);
let sig = hex::encode(mac.finalize().into_bytes()); let sig = hex::encode(mac.finalize().into_bytes());
// Verify with different secret should fail // Verify with different secret should fail
assert!(!verify_signature( assert!(!verify_signature("secret2", body, &format!("sha256={sig}")));
"secret2",
body,
&format!("sha256={}", sig)
));
} }
#[test] #[test]

View file

@ -9,11 +9,11 @@ use sqlx::PgPool;
/// Maximum session lifetime before automatic eviction (24 hours). /// Maximum session lifetime before automatic eviction (24 hours).
const SESSION_MAX_AGE: std::time::Duration = const SESSION_MAX_AGE: std::time::Duration =
std::time::Duration::from_secs(24 * 60 * 60); std::time::Duration::from_hours(24);
/// How often the background cleanup task runs (every 5 minutes). /// How often the background cleanup task runs (every 5 minutes).
const SESSION_CLEANUP_INTERVAL: std::time::Duration = const SESSION_CLEANUP_INTERVAL: std::time::Duration =
std::time::Duration::from_secs(5 * 60); std::time::Duration::from_mins(5);
/// Session data supporting both API key and user authentication /// Session data supporting both API key and user authentication
#[derive(Clone)] #[derive(Clone)]
@ -27,13 +27,10 @@ impl SessionData {
/// Check if the session has admin role /// Check if the session has admin role
#[must_use] #[must_use]
pub fn is_admin(&self) -> bool { pub fn is_admin(&self) -> bool {
if let Some(ref user) = self.user { self.user.as_ref().map_or_else(
user.role == "admin" || self.api_key.as_ref().is_some_and(|key| key.role == "admin"),
} else if let Some(ref key) = self.api_key { |user| user.role == "admin",
key.role == "admin" )
} else {
false
}
} }
/// Check if the session has a specific role /// Check if the session has a specific role
@ -42,25 +39,24 @@ impl SessionData {
if self.is_admin() { if self.is_admin() {
return true; return true;
} }
if let Some(ref user) = self.user { self.user.as_ref().map_or_else(
user.role == role || self.api_key.as_ref().is_some_and(|key| key.role == role),
} else if let Some(ref key) = self.api_key { |user| user.role == role,
key.role == role )
} else {
false
}
} }
/// Get the display name for the session (username or api key name) /// Get the display name for the session (username or api key name)
#[must_use] #[must_use]
pub fn display_name(&self) -> String { pub fn display_name(&self) -> String {
if let Some(ref user) = self.user { self.user.as_ref().map_or_else(
user.username.clone() || {
} else if let Some(ref key) = self.api_key { self
key.name.clone() .api_key
} else { .as_ref()
"Anonymous".to_string() .map_or_else(|| "Anonymous".to_string(), |key| key.name.clone())
} },
|user| user.username.clone(),
)
} }
/// Check if this is a user session (not just API key) /// Check if this is a user session (not just API key)

View file

@ -1,5 +1,5 @@
//! Integration tests for API endpoints. //! Integration tests for API endpoints.
//! Requires TEST_DATABASE_URL to be set. //! Requires `TEST_DATABASE_URL` to be set.
use axum::{ use axum::{
body::Body, body::Body,
@ -8,12 +8,9 @@ use axum::{
use tower::ServiceExt; use tower::ServiceExt;
async fn get_pool() -> Option<sqlx::PgPool> { async fn get_pool() -> Option<sqlx::PgPool> {
let url = match std::env::var("TEST_DATABASE_URL") { let Ok(url) = std::env::var("TEST_DATABASE_URL") else {
Ok(url) => url,
Err(_) => {
println!("Skipping API test: TEST_DATABASE_URL not set"); println!("Skipping API test: TEST_DATABASE_URL not set");
return None; return None;
},
}; };
let pool = sqlx::postgres::PgPoolOptions::new() let pool = sqlx::postgres::PgPoolOptions::new()
@ -44,9 +41,8 @@ fn build_app(pool: sqlx::PgPool) -> axum::Router {
#[tokio::test] #[tokio::test]
async fn test_router_no_duplicate_routes() { async fn test_router_no_duplicate_routes() {
let pool = match get_pool().await { let Some(pool) = get_pool().await else {
Some(p) => p, return;
None => return,
}; };
let config = fc_common::config::Config::default(); let config = fc_common::config::Config::default();
@ -79,9 +75,8 @@ fn build_app_with_config(
#[tokio::test] #[tokio::test]
async fn test_health_endpoint() { async fn test_health_endpoint() {
let pool = match get_pool().await { let Some(pool) = get_pool().await else {
Some(p) => p, return;
None => return,
}; };
let app = build_app(pool); let app = build_app(pool);
@ -108,9 +103,8 @@ async fn test_health_endpoint() {
#[tokio::test] #[tokio::test]
async fn test_project_endpoints() { async fn test_project_endpoints() {
let pool = match get_pool().await { let Some(pool) = get_pool().await else {
Some(p) => p, return;
None => return,
}; };
let app = build_app(pool); let app = build_app(pool);
@ -204,9 +198,8 @@ async fn test_project_endpoints() {
#[tokio::test] #[tokio::test]
async fn test_builds_endpoints() { async fn test_builds_endpoints() {
let pool = match get_pool().await { let Some(pool) = get_pool().await else {
Some(p) => p, return;
None => return,
}; };
let app = build_app(pool); let app = build_app(pool);
@ -244,9 +237,8 @@ async fn test_builds_endpoints() {
#[tokio::test] #[tokio::test]
async fn test_error_response_includes_error_code() { async fn test_error_response_includes_error_code() {
let pool = match get_pool().await { let Some(pool) = get_pool().await else {
Some(p) => p, return;
None => return,
}; };
let app = build_app(pool); let app = build_app(pool);
@ -275,9 +267,8 @@ async fn test_error_response_includes_error_code() {
#[tokio::test] #[tokio::test]
async fn test_cache_invalid_hash_returns_404() { async fn test_cache_invalid_hash_returns_404() {
let pool = match get_pool().await { let Some(pool) = get_pool().await else {
Some(p) => p, return;
None => return,
}; };
let mut config = fc_common::config::Config::default(); let mut config = fc_common::config::Config::default();
@ -352,9 +343,8 @@ async fn test_cache_invalid_hash_returns_404() {
#[tokio::test] #[tokio::test]
async fn test_cache_nar_invalid_hash_returns_404() { async fn test_cache_nar_invalid_hash_returns_404() {
let pool = match get_pool().await { let Some(pool) = get_pool().await else {
Some(p) => p, return;
None => return,
}; };
let mut config = fc_common::config::Config::default(); let mut config = fc_common::config::Config::default();
@ -390,9 +380,8 @@ async fn test_cache_nar_invalid_hash_returns_404() {
#[tokio::test] #[tokio::test]
async fn test_cache_disabled_returns_404() { async fn test_cache_disabled_returns_404() {
let pool = match get_pool().await { let Some(pool) = get_pool().await else {
Some(p) => p, return;
None => return,
}; };
let mut config = fc_common::config::Config::default(); let mut config = fc_common::config::Config::default();
@ -426,9 +415,8 @@ async fn test_cache_disabled_returns_404() {
#[tokio::test] #[tokio::test]
async fn test_search_rejects_long_query() { async fn test_search_rejects_long_query() {
let pool = match get_pool().await { let Some(pool) = get_pool().await else {
Some(p) => p, return;
None => return,
}; };
let app = build_app(pool); let app = build_app(pool);
@ -457,9 +445,8 @@ async fn test_search_rejects_long_query() {
#[tokio::test] #[tokio::test]
async fn test_search_rejects_empty_query() { async fn test_search_rejects_empty_query() {
let pool = match get_pool().await { let Some(pool) = get_pool().await else {
Some(p) => p, return;
None => return,
}; };
let app = build_app(pool); let app = build_app(pool);
@ -486,9 +473,8 @@ async fn test_search_rejects_empty_query() {
#[tokio::test] #[tokio::test]
async fn test_search_whitespace_only_query() { async fn test_search_whitespace_only_query() {
let pool = match get_pool().await { let Some(pool) = get_pool().await else {
Some(p) => p, return;
None => return,
}; };
let app = build_app(pool); let app = build_app(pool);
@ -514,9 +500,8 @@ async fn test_search_whitespace_only_query() {
#[tokio::test] #[tokio::test]
async fn test_builds_list_with_system_filter() { async fn test_builds_list_with_system_filter() {
let pool = match get_pool().await { let Some(pool) = get_pool().await else {
Some(p) => p, return;
None => return,
}; };
let app = build_app(pool); let app = build_app(pool);
@ -544,9 +529,8 @@ async fn test_builds_list_with_system_filter() {
#[tokio::test] #[tokio::test]
async fn test_builds_list_with_job_name_filter() { async fn test_builds_list_with_job_name_filter() {
let pool = match get_pool().await { let Some(pool) = get_pool().await else {
Some(p) => p, return;
None => return,
}; };
let app = build_app(pool); let app = build_app(pool);
@ -572,9 +556,8 @@ async fn test_builds_list_with_job_name_filter() {
#[tokio::test] #[tokio::test]
async fn test_builds_list_combined_filters() { async fn test_builds_list_combined_filters() {
let pool = match get_pool().await { let Some(pool) = get_pool().await else {
Some(p) => p, return;
None => return,
}; };
let app = build_app(pool); let app = build_app(pool);
@ -595,9 +578,8 @@ async fn test_builds_list_combined_filters() {
#[tokio::test] #[tokio::test]
async fn test_cache_info_returns_correct_headers() { async fn test_cache_info_returns_correct_headers() {
let pool = match get_pool().await { let Some(pool) = get_pool().await else {
Some(p) => p, return;
None => return,
}; };
let mut config = fc_common::config::Config::default(); let mut config = fc_common::config::Config::default();
@ -631,9 +613,8 @@ async fn test_cache_info_returns_correct_headers() {
#[tokio::test] #[tokio::test]
async fn test_metrics_endpoint() { async fn test_metrics_endpoint() {
let pool = match get_pool().await { let Some(pool) = get_pool().await else {
Some(p) => p, return;
None => return,
}; };
let app = build_app(pool); let app = build_app(pool);
@ -718,9 +699,8 @@ async fn test_metrics_endpoint() {
#[tokio::test] #[tokio::test]
async fn test_get_nonexistent_build_returns_error_code() { async fn test_get_nonexistent_build_returns_error_code() {
let pool = match get_pool().await { let Some(pool) = get_pool().await else {
Some(p) => p, return;
None => return,
}; };
let app = build_app(pool); let app = build_app(pool);
@ -750,9 +730,8 @@ async fn test_get_nonexistent_build_returns_error_code() {
#[tokio::test] #[tokio::test]
async fn test_create_project_validation_rejects_invalid_name() { async fn test_create_project_validation_rejects_invalid_name() {
let pool = match get_pool().await { let Some(pool) = get_pool().await else {
Some(p) => p, return;
None => return,
}; };
let app = build_app(pool); let app = build_app(pool);
@ -786,9 +765,8 @@ async fn test_create_project_validation_rejects_invalid_name() {
#[tokio::test] #[tokio::test]
async fn test_create_project_validation_rejects_bad_url() { async fn test_create_project_validation_rejects_bad_url() {
let pool = match get_pool().await { let Some(pool) = get_pool().await else {
Some(p) => p, return;
None => return,
}; };
let app = build_app(pool); let app = build_app(pool);
@ -821,9 +799,8 @@ async fn test_create_project_validation_rejects_bad_url() {
#[tokio::test] #[tokio::test]
async fn test_create_project_validation_accepts_valid() { async fn test_create_project_validation_accepts_valid() {
let pool = match get_pool().await { let Some(pool) = get_pool().await else {
Some(p) => p, return;
None => return,
}; };
let app = build_app(pool); let app = build_app(pool);
@ -854,14 +831,14 @@ async fn test_create_project_validation_accepts_valid() {
#[tokio::test] #[tokio::test]
async fn test_project_create_with_auth() { async fn test_project_create_with_auth() {
let pool = match get_pool().await { use sha2::Digest;
Some(p) => p,
None => return, let Some(pool) = get_pool().await else {
return;
}; };
// Create an admin API key // Create an admin API key
let mut hasher = sha2::Sha256::new(); let mut hasher = sha2::Sha256::new();
use sha2::Digest;
hasher.update(b"fc_test_project_auth"); hasher.update(b"fc_test_project_auth");
let key_hash = hex::encode(hasher.finalize()); let key_hash = hex::encode(hasher.finalize());
let _ = let _ =
@ -900,9 +877,8 @@ async fn test_project_create_with_auth() {
#[tokio::test] #[tokio::test]
async fn test_project_create_without_auth_rejected() { async fn test_project_create_without_auth_rejected() {
let pool = match get_pool().await { let Some(pool) = get_pool().await else {
Some(p) => p, return;
None => return,
}; };
let app = build_app(pool); let app = build_app(pool);
@ -929,14 +905,14 @@ async fn test_project_create_without_auth_rejected() {
#[tokio::test] #[tokio::test]
async fn test_setup_endpoint_creates_project_and_jobsets() { async fn test_setup_endpoint_creates_project_and_jobsets() {
let pool = match get_pool().await { use sha2::Digest;
Some(p) => p,
None => return, let Some(pool) = get_pool().await else {
return;
}; };
// Create an admin API key // Create an admin API key
let mut hasher = sha2::Sha256::new(); let mut hasher = sha2::Sha256::new();
use sha2::Digest;
hasher.update(b"fc_test_setup_key"); hasher.update(b"fc_test_setup_key");
let key_hash = hex::encode(hasher.finalize()); let key_hash = hex::encode(hasher.finalize());
let _ = let _ =
@ -991,9 +967,8 @@ async fn test_setup_endpoint_creates_project_and_jobsets() {
#[tokio::test] #[tokio::test]
async fn test_security_headers_present() { async fn test_security_headers_present() {
let pool = match get_pool().await { let Some(pool) = get_pool().await else {
Some(p) => p, return;
None => return,
}; };
let app = build_app(pool); let app = build_app(pool);
@ -1033,9 +1008,8 @@ async fn test_security_headers_present() {
#[tokio::test] #[tokio::test]
async fn test_static_css_served() { async fn test_static_css_served() {
let pool = match get_pool().await { let Some(pool) = get_pool().await else {
Some(p) => p, return;
None => return,
}; };
let app = build_app(pool); let app = build_app(pool);

View file

@ -1,5 +1,5 @@
//! End-to-end integration test. //! End-to-end integration test.
//! Requires TEST_DATABASE_URL to be set. //! Requires `TEST_DATABASE_URL` to be set.
//! Tests the full flow: create project -> jobset -> evaluation -> builds. //! Tests the full flow: create project -> jobset -> evaluation -> builds.
//! //!
//! Nix-dependent steps are skipped if nix is not available. //! Nix-dependent steps are skipped if nix is not available.
@ -12,12 +12,9 @@ use fc_common::models::*;
use tower::ServiceExt; use tower::ServiceExt;
async fn get_pool() -> Option<sqlx::PgPool> { async fn get_pool() -> Option<sqlx::PgPool> {
let url = match std::env::var("TEST_DATABASE_URL") { let Ok(url) = std::env::var("TEST_DATABASE_URL") else {
Ok(url) => url,
Err(_) => {
println!("Skipping E2E test: TEST_DATABASE_URL not set"); println!("Skipping E2E test: TEST_DATABASE_URL not set");
return None; return None;
},
}; };
let pool = sqlx::postgres::PgPoolOptions::new() let pool = sqlx::postgres::PgPoolOptions::new()
@ -36,9 +33,8 @@ async fn get_pool() -> Option<sqlx::PgPool> {
#[tokio::test] #[tokio::test]
async fn test_e2e_project_eval_build_flow() { async fn test_e2e_project_eval_build_flow() {
let pool = match get_pool().await { let Some(pool) = get_pool().await else {
Some(p) => p, return;
None => return,
}; };
// 1. Create a project // 1. Create a project
@ -254,10 +250,10 @@ async fn test_e2e_project_eval_build_flow() {
assert_eq!(steps[0].exit_code, Some(0)); assert_eq!(steps[0].exit_code, Some(0));
// 14. Verify build stats reflect our changes // 14. Verify build stats reflect our changes
let stats = fc_common::repo::builds::get_stats(&pool) let build_stats = fc_common::repo::builds::get_stats(&pool)
.await .await
.expect("get stats"); .expect("get stats");
assert!(stats.completed_builds.unwrap_or(0) >= 2); assert!(build_stats.completed_builds.unwrap_or(0) >= 2);
// 15. Create a channel and verify it works // 15. Create a channel and verify it works
let channel = fc_common::repo::channels::create(&pool, CreateChannel { let channel = fc_common::repo::channels::create(&pool, CreateChannel {

View file

@ -16,10 +16,15 @@ performance.
## Executive Summary ## Executive Summary
FC currently implements more or less 50% of Hydra's core features, and has FC currently implements _more or less_ 50% of Hydra's core features, and has
various improvements over Hydra's architecture. As of writing, some gaps (such various improvements over Hydra's architecture. As of writing, some gaps (such
as the plugin architecture, VCS diversity and notification integrations) remain. as the plugin architecture, VCS diversity and notification integrations) remain.
As FC is currently in _heavy_ development, those gaps will remain for the
foreseeable future, however, most _critical_ functionality has already been
implemented. In any case, I believe FC has made good progress on the path of
being a "better Hydra".
### Strengths ### Strengths
- Modern Rust codebase with better error handling - Modern Rust codebase with better error handling
@ -35,7 +40,29 @@ as the plugin architecture, VCS diversity and notification integrations) remain.
- Limited VCS support (Git only in FC vs 6 types in Hydra) - Limited VCS support (Git only in FC vs 6 types in Hydra)
- No plugin architecture for extensibility - No plugin architecture for extensibility
- Missing several notification integrations (Slack, BitBucket, etc.) - Missing several notification integrations (Slack, BitBucket, etc.)
- No declarative project specification (coming soon)
- No coverage/build metrics collection
TODO: add a better comparison matrix ## Feature-by-Feature
### FC Server
`fc-server` crate is the REST API server that powers FC. In comparison to
support for full CRUD operations (on par with Hydra), FC exceeds Hydra in
several areas, such as log streaming, evaluation comparison, build actions or
metrics visualization from the API. Below is a comparison table for the sake of
historical documentation and progress tracking:
| Feature | Hydra | FC | Status | Notes |
| ------------------------ | ---------------- | ------------------- | -------- | ---------------------------------- |
| **REST API Structure** | OpenAPI 3.0 spec | REST | Complete | FC has cleaner `/api/v1` structure |
| **Project Endpoints** | Full CRUD | Full CRUD | Complete | |
| **Jobset Endpoints** | Full CRUD | Full CRUD | Complete | FC has jobset inputs |
| **Build Endpoints** | Full | Full + actions | Complete | FC has cancel/restart/bump |
| **Evaluation Endpoints** | Basic | Full + trigger | Complete | FC has trigger + compare |
| **Search API** | Full search | Advanced search | Complete | Multi-entity, filters, sorting |
| **Channel API** | Management | Full CRUD | Complete | |
| **User API** | User management | Full CRUD + auth | Complete | |
| **Binary Cache API** | NAR/manifest | Full cache protocol | Complete | e |
| **Webhook API** | Push trigger | GitHub/Gitea | Complete | FC has HMAC verification |
| **Badge API** | Status badges | Implemented | Complete | Both support badges |
| **Metrics API** | Prometheus | Prometheus | Complete | Both expose metrics |
| **Log Streaming** | Polling only | SSE streaming | Complete | FC has Server-Sent Events |

View file

@ -223,7 +223,7 @@ development.
| `cache` | `secret_key_file` | none | Signing key for binary cache | | `cache` | `secret_key_file` | none | Signing key for binary cache |
| `signing` | `enabled` | `false` | Sign build outputs | | `signing` | `enabled` | `false` | Sign build outputs |
| `signing` | `key_file` | none | Signing key file path | | `signing` | `key_file` | none | Signing key file path |
| `notifications` | `run_command` | none | Command to run on build completion | | `notifications` | `webhook_url` | none | HTTP endpoint to POST build status JSON |
| `notifications` | `github_token` | none | GitHub token for commit status updates | | `notifications` | `github_token` | none | GitHub token for commit status updates |
| `notifications` | `gitea_url` | none | Gitea/Forgejo instance URL | | `notifications` | `gitea_url` | none | Gitea/Forgejo instance URL |
| `notifications` | `gitea_token` | none | Gitea/Forgejo API token | | `notifications` | `gitea_token` | none | Gitea/Forgejo API token |

View file

@ -1,6 +1,5 @@
# FC CI Configuration File # FC CI Configuration File
# This file contains default configuration for all FC CI components # This file contains default configuration for all FC CI components
[database] [database]
connect_timeout = 30 connect_timeout = 30
idle_timeout = 600 idle_timeout = 600
@ -16,6 +15,11 @@ max_body_size = 10485760 # 10MB
port = 3000 port = 3000
request_timeout = 30 request_timeout = 30
# Security options
# force_secure_cookies = true # enable when behind HTTPS reverse proxy (nginx/caddy)
# rate_limit_rps = 100 # requests per second per IP (prevents DoS)
# rate_limit_burst = 20 # burst size before rate limit enforcement
[evaluator] [evaluator]
allow_ifd = false allow_ifd = false
git_timeout = 600 git_timeout = 600

18
flake.lock generated
View file

@ -2,11 +2,11 @@
"nodes": { "nodes": {
"crane": { "crane": {
"locked": { "locked": {
"lastModified": 1771121070, "lastModified": 1772080396,
"narHash": "sha256-aIlv7FRXF9q70DNJPI237dEDAznSKaXmL5lfK/Id/bI=", "narHash": "sha256-84W9UNtSk9DNMh43WBkOjpkbfODlmg+RDi854PnNgLE=",
"owner": "ipetkov", "owner": "ipetkov",
"repo": "crane", "repo": "crane",
"rev": "a2812c19f1ed2e5ed5ce2ef7109798b575c180e1", "rev": "8525580bc0316c39dbfa18bd09a1331e98c9e463",
"type": "github" "type": "github"
}, },
"original": { "original": {
@ -17,11 +17,11 @@
}, },
"nixpkgs": { "nixpkgs": {
"locked": { "locked": {
"lastModified": 1771177547, "lastModified": 1772082373,
"narHash": "sha256-trTtk3WTOHz7hSw89xIIvahkgoFJYQ0G43IlqprFoMA=", "narHash": "sha256-wySf8a6hvuqgFdwvvzPPTARBCMLDz7WFAufGkllD1M4=",
"owner": "NixOS", "owner": "NixOS",
"repo": "nixpkgs", "repo": "nixpkgs",
"rev": "ac055f38c798b0d87695240c7b761b82fc7e5bc2", "rev": "26eaeac4e409d7b5a6bf6f90a2a2dc223c78d915",
"type": "github" "type": "github"
}, },
"original": { "original": {
@ -45,11 +45,11 @@
] ]
}, },
"locked": { "locked": {
"lastModified": 1771384185, "lastModified": 1772161521,
"narHash": "sha256-KvmjUeA7uODwzbcQoN/B8DCZIbhT/Q/uErF1BBMcYnw=", "narHash": "sha256-HmbcapTlcRqtryLUaJhH8t1mz6DaSJT+nxvWIl2bIPU=",
"owner": "oxalica", "owner": "oxalica",
"repo": "rust-overlay", "repo": "rust-overlay",
"rev": "23dd7fa91602a68bd04847ac41bc10af1e6e2fd2", "rev": "9b2965450437541d25fde167d8bebfd01c156cef",
"type": "github" "type": "github"
}, },
"original": { "original": {

View file

@ -238,7 +238,7 @@
options = { options = {
notificationType = mkOption { notificationType = mkOption {
type = str; type = str;
description = "Notification type: github_status, email, gitlab_status, gitea_status, run_command."; description = "Notification type: github_status, email, gitlab_status, gitea_status, webhook.";
}; };
config = mkOption { config = mkOption {
type = settingsType; type = settingsType;

View file

@ -293,17 +293,19 @@ pkgs.testers.nixosTest {
f"{ro_header}" f"{ro_header}"
).strip() ).strip()
assert code == "403", f"Expected 403 for read-only input delete, got {code}" assert code == "403", f"Expected 403 for read-only input delete, got {code}"
# Clean up: admin deletes the temp input so it doesn't affect future
# inputs_hash computations and evaluator cache lookups
machine.succeed(
"curl -sf -o /dev/null "
f"-X DELETE http://127.0.0.1:3000/api/v1/projects/{e2e_project_id}/jobsets/{e2e_jobset_id}/inputs/{tmp_input_id} "
f"{auth_header}"
)
# Notifications are dispatched after builds complete (already tested above). with subtest("Build status is succeeded"):
# Verify run_command notifications work:
with subtest("Notification run_command is invoked on build completion"):
# This tests that the notification system dispatches properly.
# The actual run_command config is not set in this VM, so we just verify
# the build status was updated correctly after notification dispatch.
result = machine.succeed( result = machine.succeed(
f"curl -sf http://127.0.0.1:3000/api/v1/builds/{e2e_build_id} | jq -r .status" f"curl -sf http://127.0.0.1:3000/api/v1/builds/{e2e_build_id} | jq -r .status"
).strip() ).strip()
assert result == "succeeded", f"Expected succeeded after notification, got {result}" assert result == "succeeded", f"Expected succeeded, got {result}"
with subtest("Channel auto-promotion after all builds complete"): with subtest("Channel auto-promotion after all builds complete"):
# Create a channel tracking the E2E jobset # Create a channel tracking the E2E jobset
@ -388,34 +390,43 @@ pkgs.testers.nixosTest {
timeout=10 timeout=10
) )
with subtest("Notification run_command invoked on build completion"): with subtest("Webhook notification fires on build completion"):
# Write a notification script # Start a minimal HTTP server on the VM to receive the webhook POST.
machine.succeed("mkdir -p /var/lib/fc") # Writes the request body to /tmp/webhook.json so we can inspect it.
machine.succeed(""" machine.succeed(
cat > /var/lib/fc/notify.sh << 'SCRIPT' "cat > /tmp/webhook-server.py << 'PYEOF'\n"
#!/bin/sh "import http.server, json\n"
echo "BUILD_STATUS=$FC_BUILD_STATUS" >> /var/lib/fc/notify-output "class H(http.server.BaseHTTPRequestHandler):\n"
echo "BUILD_ID=$FC_BUILD_ID" >> /var/lib/fc/notify-output " def do_POST(self):\n"
echo "BUILD_JOB=$FC_BUILD_JOB" >> /var/lib/fc/notify-output " n = int(self.headers.get('Content-Length', 0))\n"
SCRIPT " body = self.rfile.read(n)\n"
""") " open('/tmp/webhook.json', 'wb').write(body)\n"
machine.succeed("chmod +x /var/lib/fc/notify.sh") " self.send_response(200)\n"
machine.succeed("chown -R fc:fc /var/lib/fc") " self.end_headers()\n"
" def log_message(self, *a): pass\n"
"http.server.HTTPServer(('127.0.0.1', 9998), H).serve_forever()\n"
"PYEOF\n"
)
machine.succeed("python3 /tmp/webhook-server.py &")
machine.wait_until_succeeds(
"curl -sf -X POST -H 'Content-Length: 2' -d '{}' http://127.0.0.1:9998/",
timeout=10
)
machine.succeed("rm -f /tmp/webhook.json")
# Enable notifications via systemd drop-in override (adds env var directly to service unit) # Configure queue-runner to send webhook notifications
machine.succeed("mkdir -p /run/systemd/system/fc-queue-runner.service.d") machine.succeed("mkdir -p /run/systemd/system/fc-queue-runner.service.d")
machine.succeed(""" machine.succeed(
cat > /run/systemd/system/fc-queue-runner.service.d/notify.conf << 'EOF' "cat > /run/systemd/system/fc-queue-runner.service.d/webhook.conf << 'EOF'\n"
[Service] "[Service]\n"
Environment=FC_NOTIFICATIONS__RUN_COMMAND=/var/lib/fc/notify.sh "Environment=FC_NOTIFICATIONS__WEBHOOK_URL=http://127.0.0.1:9998/notify\n"
EOF "EOF\n"
""") )
machine.succeed("systemctl daemon-reload") machine.succeed("systemctl daemon-reload")
machine.succeed("systemctl restart fc-queue-runner") machine.succeed("systemctl restart fc-queue-runner")
machine.wait_for_unit("fc-queue-runner.service", timeout=30) machine.wait_for_unit("fc-queue-runner.service", timeout=30)
# Create a new simple build to trigger notification # Push a new commit to trigger a fresh evaluation and build
# Push a trivial change to trigger a new evaluation
machine.succeed( machine.succeed(
"cd /tmp/test-flake-work && \\\n" "cd /tmp/test-flake-work && \\\n"
"cat > flake.nix << 'FLAKE'\n" "cat > flake.nix << 'FLAKE'\n"
@ -432,31 +443,16 @@ pkgs.testers.nixosTest {
"}\n" "}\n"
"FLAKE\n" "FLAKE\n"
) )
machine.succeed("cd /tmp/test-flake-work && git add -A && git commit -m 'trigger notification test'") machine.succeed("cd /tmp/test-flake-work && git add -A && git commit -m 'trigger webhook notification test'")
machine.succeed("cd /tmp/test-flake-work && git push origin HEAD:refs/heads/master") machine.succeed("cd /tmp/test-flake-work && git push origin HEAD:refs/heads/master")
# Wait for the notify-test build to succeed # Wait for the webhook to arrive (the build must complete first)
machine.wait_until_succeeds( machine.wait_until_succeeds("test -e /tmp/webhook.json", timeout=120)
"curl -sf 'http://127.0.0.1:3000/api/v1/builds?job_name=notify-test' " payload = json.loads(machine.succeed("cat /tmp/webhook.json"))
"| jq -e '.items[] | select(.status==\"succeeded\")'", assert payload["build_status"] == "success", \
timeout=120 f"Expected build_status=success in webhook payload, got: {payload}"
) assert "build_id" in payload, f"Missing build_id in webhook payload: {payload}"
assert "build_job" in payload, f"Missing build_job in webhook payload: {payload}"
# Get the build ID
notify_build_id = machine.succeed(
"curl -sf 'http://127.0.0.1:3000/api/v1/builds?job_name=notify-test' "
"| jq -r '.items[] | select(.status==\"succeeded\") | .id' | head -1"
).strip()
# Wait a bit for notification to dispatch
time.sleep(5)
# Verify the notification script was executed
machine.wait_for_file("/var/lib/fc/notify-output")
output = machine.succeed("cat /var/lib/fc/notify-output")
assert "BUILD_STATUS=success" in output, \
f"Expected BUILD_STATUS=success in notification output, got: {output}"
assert notify_build_id in output, f"Expected build ID {notify_build_id} in output, got: {output}"
with subtest("Generate signing key and configure signing"): with subtest("Generate signing key and configure signing"):
# Generate a Nix signing key # Generate a Nix signing key
@ -467,13 +463,13 @@ pkgs.testers.nixosTest {
# Enable signing via systemd drop-in override # Enable signing via systemd drop-in override
machine.succeed("mkdir -p /run/systemd/system/fc-queue-runner.service.d") machine.succeed("mkdir -p /run/systemd/system/fc-queue-runner.service.d")
machine.succeed(""" machine.succeed(
cat > /run/systemd/system/fc-queue-runner.service.d/signing.conf << 'EOF' "cat > /run/systemd/system/fc-queue-runner.service.d/signing.conf << 'EOF'\n"
[Service] "[Service]\n"
Environment=FC_SIGNING__ENABLED=true "Environment=FC_SIGNING__ENABLED=true\n"
Environment=FC_SIGNING__KEY_FILE=/var/lib/fc/keys/signing-key "Environment=FC_SIGNING__KEY_FILE=/var/lib/fc/keys/signing-key\n"
EOF "EOF\n"
""") )
machine.succeed("systemctl daemon-reload") machine.succeed("systemctl daemon-reload")
machine.succeed("systemctl restart fc-queue-runner") machine.succeed("systemctl restart fc-queue-runner")
machine.wait_for_unit("fc-queue-runner.service", timeout=30) machine.wait_for_unit("fc-queue-runner.service", timeout=30)
@ -530,15 +526,15 @@ pkgs.testers.nixosTest {
with subtest("GC roots are created for build products"): with subtest("GC roots are created for build products"):
# Enable GC via systemd drop-in override # Enable GC via systemd drop-in override
machine.succeed("mkdir -p /run/systemd/system/fc-queue-runner.service.d") machine.succeed("mkdir -p /run/systemd/system/fc-queue-runner.service.d")
machine.succeed(""" machine.succeed(
cat > /run/systemd/system/fc-queue-runner.service.d/gc.conf << 'EOF' "cat > /run/systemd/system/fc-queue-runner.service.d/gc.conf << 'EOF'\n"
[Service] "[Service]\n"
Environment=FC_GC__ENABLED=true "Environment=FC_GC__ENABLED=true\n"
Environment=FC_GC__GC_ROOTS_DIR=/nix/var/nix/gcroots/per-user/fc "Environment=FC_GC__GC_ROOTS_DIR=/nix/var/nix/gcroots/per-user/fc\n"
Environment=FC_GC__MAX_AGE_DAYS=30 "Environment=FC_GC__MAX_AGE_DAYS=30\n"
Environment=FC_GC__CLEANUP_INTERVAL=3600 "Environment=FC_GC__CLEANUP_INTERVAL=3600\n"
EOF "EOF\n"
""") )
machine.succeed("systemctl daemon-reload") machine.succeed("systemctl daemon-reload")
machine.succeed("systemctl restart fc-queue-runner") machine.succeed("systemctl restart fc-queue-runner")
machine.wait_for_unit("fc-queue-runner.service", timeout=30) machine.wait_for_unit("fc-queue-runner.service", timeout=30)
@ -599,7 +595,6 @@ pkgs.testers.nixosTest {
) )
# Wait for a symlink pointing to our build output to appear # Wait for a symlink pointing to our build output to appear
import time
found = False found = False
for _ in range(10): for _ in range(10):
if wait_for_gc_root(): if wait_for_gc_root():
@ -612,16 +607,16 @@ pkgs.testers.nixosTest {
with subtest("Declarative .fc.toml in repo auto-creates jobset"): with subtest("Declarative .fc.toml in repo auto-creates jobset"):
# Add .fc.toml to the test repo with a new jobset definition # Add .fc.toml to the test repo with a new jobset definition
machine.succeed(""" machine.succeed(
cd /tmp/test-flake-work && \ "cd /tmp/test-flake-work && "
cat > .fc.toml << 'FCTOML' "cat > .fc.toml << 'FCTOML'\n"
[[jobsets]] "[[jobsets]]\n"
name = "declarative-checks" 'name = "declarative-checks"\n'
nix_expression = "checks" 'nix_expression = "checks"\n'
flake_mode = true "flake_mode = true\n"
enabled = true "enabled = true\n"
FCTOML "FCTOML\n"
""") )
machine.succeed("cd /tmp/test-flake-work && git add -A && git commit -m 'add declarative config'") machine.succeed("cd /tmp/test-flake-work && git add -A && git commit -m 'add declarative config'")
machine.succeed("cd /tmp/test-flake-work && git push origin HEAD:refs/heads/master") machine.succeed("cd /tmp/test-flake-work && git push origin HEAD:refs/heads/master")