From 75ff45fc912f5fcfc3fc627cd1df571f99233ee4 Mon Sep 17 00:00:00 2001 From: NotAShelf Date: Sat, 14 Feb 2026 01:38:24 +0300 Subject: [PATCH] various: initial support for S3 cache upload Not too stable yet, but might work. Signed-off-by: NotAShelf Change-Id: If134e7e45aa99ce8d18df7b78b1f881b6a6a6964 --- Cargo.lock | 1 + crates/common/src/config.rs | 47 ++++++++++++++++++++++- crates/queue-runner/Cargo.toml | 1 + crates/queue-runner/src/worker.rs | 64 +++++++++++++++++++++++++++++-- 4 files changed, 109 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3cd1a2c..6c1721f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -884,6 +884,7 @@ dependencies = [ "tokio-util", "tracing", "tracing-subscriber", + "urlencoding", "uuid", ] diff --git a/crates/common/src/config.rs b/crates/common/src/config.rs index ead09ea..aaf420f 100644 --- a/crates/common/src/config.rs +++ b/crates/common/src/config.rs @@ -167,10 +167,55 @@ pub struct SigningConfig { #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(default)] -#[derive(Default)] pub struct CacheUploadConfig { pub enabled: bool, pub store_uri: Option, + /// S3-specific configuration (used when store_uri starts with s3://) + pub s3: Option, +} + +/// S3-specific cache configuration. +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(default)] +pub struct S3CacheConfig { + /// AWS region (e.g., "us-east-1") + pub region: Option, + /// Path prefix within the bucket (e.g., "nix-cache/") + pub prefix: Option, + /// AWS access key ID (optional - uses IAM role if not provided) + pub access_key_id: Option, + /// AWS secret access key (optional - uses IAM role if not provided) + pub secret_access_key: Option, + /// Session token for temporary credentials (optional) + pub session_token: Option, + /// Endpoint URL for S3-compatible services (e.g., MinIO) + pub endpoint_url: Option, + /// Whether to use path-style addressing (for MinIO compatibility) + pub use_path_style: bool, +} + +impl Default for S3CacheConfig { + fn default() -> Self { + Self { + region: None, + prefix: None, + access_key_id: None, + secret_access_key: None, + session_token: None, + endpoint_url: None, + use_path_style: false, + } + } +} + +impl Default for CacheUploadConfig { + fn default() -> Self { + Self { + enabled: false, + store_uri: None, + s3: None, + } + } } /// Declarative project/jobset/api-key/user definitions. diff --git a/crates/queue-runner/Cargo.toml b/crates/queue-runner/Cargo.toml index d6bd929..f169876 100644 --- a/crates/queue-runner/Cargo.toml +++ b/crates/queue-runner/Cargo.toml @@ -19,6 +19,7 @@ tokio.workspace = true tokio-util.workspace = true tracing.workspace = true tracing-subscriber.workspace = true +urlencoding.workspace = true uuid.workspace = true # Our crates diff --git a/crates/queue-runner/src/worker.rs b/crates/queue-runner/src/worker.rs index 34972e3..099ec9c 100644 --- a/crates/queue-runner/src/worker.rs +++ b/crates/queue-runner/src/worker.rs @@ -215,10 +215,22 @@ async fn sign_outputs( } /// Push output paths to an external binary cache via `nix copy`. -async fn push_to_cache(output_paths: &[String], store_uri: &str) { +/// Supports S3 URIs with proper credential handling. +async fn push_to_cache( + output_paths: &[String], + store_uri: &str, + s3_config: Option<&fc_common::config::S3CacheConfig>, +) { + // Build the full store URI with S3 options if applicable + let full_store_uri = if store_uri.starts_with("s3://") { + build_s3_store_uri(store_uri, s3_config) + } else { + store_uri.to_string() + }; + for path in output_paths { let result = tokio::process::Command::new("nix") - .args(["copy", "--to", store_uri, path]) + .args(["copy", "--to", &full_store_uri, path]) .output() .await; match result { @@ -240,6 +252,47 @@ async fn push_to_cache(output_paths: &[String], store_uri: &str) { } } +/// Build S3 store URI with configuration options. +/// Nix S3 URIs support query parameters for configuration: +/// s3://bucket?region=us-east-1&endpoint=https://minio.example.com +fn build_s3_store_uri( + base_uri: &str, + config: Option<&fc_common::config::S3CacheConfig>, +) -> String { + let Some(cfg) = config else { + return base_uri.to_string(); + }; + + let mut params: Vec<(String, String)> = Vec::new(); + + if let Some(region) = &cfg.region { + params.push(("region".to_string(), region.clone())); + } + + if let Some(endpoint) = &cfg.endpoint_url { + params.push(("endpoint".to_string(), endpoint.clone())); + } + + if cfg.use_path_style { + params.push(("use-path-style".to_string(), "true".to_string())); + } + + if params.is_empty() { + return base_uri.to_string(); + } + + // Build URI with query parameters + let query = params + .iter() + .map(|(k, v)| { + format!("{}={}", urlencoding::encode(k), urlencoding::encode(v)) + }) + .collect::>() + .join("&"); + + format!("{}?{}", base_uri, query) +} + /// Try to run the build on a remote builder if one is available for the build's /// system. async fn try_remote_build( @@ -551,7 +604,12 @@ async fn run_build( if cache_upload_config.enabled && let Some(ref store_uri) = cache_upload_config.store_uri { - push_to_cache(&build_result.output_paths, store_uri).await; + push_to_cache( + &build_result.output_paths, + store_uri, + cache_upload_config.s3.as_ref(), + ) + .await; } let primary_output = build_result