various: initial support for S3 cache upload
Not too stable yet, but might work. Signed-off-by: NotAShelf <raf@notashelf.dev> Change-Id: If134e7e45aa99ce8d18df7b78b1f881b6a6a6964
This commit is contained in:
parent
fcb32aa9be
commit
75ff45fc91
4 changed files with 109 additions and 4 deletions
1
Cargo.lock
generated
1
Cargo.lock
generated
|
|
@ -884,6 +884,7 @@ dependencies = [
|
|||
"tokio-util",
|
||||
"tracing",
|
||||
"tracing-subscriber",
|
||||
"urlencoding",
|
||||
"uuid",
|
||||
]
|
||||
|
||||
|
|
|
|||
|
|
@ -167,10 +167,55 @@ pub struct SigningConfig {
|
|||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(default)]
|
||||
#[derive(Default)]
|
||||
pub struct CacheUploadConfig {
|
||||
pub enabled: bool,
|
||||
pub store_uri: Option<String>,
|
||||
/// S3-specific configuration (used when store_uri starts with s3://)
|
||||
pub s3: Option<S3CacheConfig>,
|
||||
}
|
||||
|
||||
/// S3-specific cache configuration.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(default)]
|
||||
pub struct S3CacheConfig {
|
||||
/// AWS region (e.g., "us-east-1")
|
||||
pub region: Option<String>,
|
||||
/// Path prefix within the bucket (e.g., "nix-cache/")
|
||||
pub prefix: Option<String>,
|
||||
/// AWS access key ID (optional - uses IAM role if not provided)
|
||||
pub access_key_id: Option<String>,
|
||||
/// AWS secret access key (optional - uses IAM role if not provided)
|
||||
pub secret_access_key: Option<String>,
|
||||
/// Session token for temporary credentials (optional)
|
||||
pub session_token: Option<String>,
|
||||
/// Endpoint URL for S3-compatible services (e.g., MinIO)
|
||||
pub endpoint_url: Option<String>,
|
||||
/// Whether to use path-style addressing (for MinIO compatibility)
|
||||
pub use_path_style: bool,
|
||||
}
|
||||
|
||||
impl Default for S3CacheConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
region: None,
|
||||
prefix: None,
|
||||
access_key_id: None,
|
||||
secret_access_key: None,
|
||||
session_token: None,
|
||||
endpoint_url: None,
|
||||
use_path_style: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for CacheUploadConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
enabled: false,
|
||||
store_uri: None,
|
||||
s3: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Declarative project/jobset/api-key/user definitions.
|
||||
|
|
|
|||
|
|
@ -19,6 +19,7 @@ tokio.workspace = true
|
|||
tokio-util.workspace = true
|
||||
tracing.workspace = true
|
||||
tracing-subscriber.workspace = true
|
||||
urlencoding.workspace = true
|
||||
uuid.workspace = true
|
||||
|
||||
# Our crates
|
||||
|
|
|
|||
|
|
@ -215,10 +215,22 @@ async fn sign_outputs(
|
|||
}
|
||||
|
||||
/// Push output paths to an external binary cache via `nix copy`.
|
||||
async fn push_to_cache(output_paths: &[String], store_uri: &str) {
|
||||
/// Supports S3 URIs with proper credential handling.
|
||||
async fn push_to_cache(
|
||||
output_paths: &[String],
|
||||
store_uri: &str,
|
||||
s3_config: Option<&fc_common::config::S3CacheConfig>,
|
||||
) {
|
||||
// Build the full store URI with S3 options if applicable
|
||||
let full_store_uri = if store_uri.starts_with("s3://") {
|
||||
build_s3_store_uri(store_uri, s3_config)
|
||||
} else {
|
||||
store_uri.to_string()
|
||||
};
|
||||
|
||||
for path in output_paths {
|
||||
let result = tokio::process::Command::new("nix")
|
||||
.args(["copy", "--to", store_uri, path])
|
||||
.args(["copy", "--to", &full_store_uri, path])
|
||||
.output()
|
||||
.await;
|
||||
match result {
|
||||
|
|
@ -240,6 +252,47 @@ async fn push_to_cache(output_paths: &[String], store_uri: &str) {
|
|||
}
|
||||
}
|
||||
|
||||
/// Build S3 store URI with configuration options.
|
||||
/// Nix S3 URIs support query parameters for configuration:
|
||||
/// s3://bucket?region=us-east-1&endpoint=https://minio.example.com
|
||||
fn build_s3_store_uri(
|
||||
base_uri: &str,
|
||||
config: Option<&fc_common::config::S3CacheConfig>,
|
||||
) -> String {
|
||||
let Some(cfg) = config else {
|
||||
return base_uri.to_string();
|
||||
};
|
||||
|
||||
let mut params: Vec<(String, String)> = Vec::new();
|
||||
|
||||
if let Some(region) = &cfg.region {
|
||||
params.push(("region".to_string(), region.clone()));
|
||||
}
|
||||
|
||||
if let Some(endpoint) = &cfg.endpoint_url {
|
||||
params.push(("endpoint".to_string(), endpoint.clone()));
|
||||
}
|
||||
|
||||
if cfg.use_path_style {
|
||||
params.push(("use-path-style".to_string(), "true".to_string()));
|
||||
}
|
||||
|
||||
if params.is_empty() {
|
||||
return base_uri.to_string();
|
||||
}
|
||||
|
||||
// Build URI with query parameters
|
||||
let query = params
|
||||
.iter()
|
||||
.map(|(k, v)| {
|
||||
format!("{}={}", urlencoding::encode(k), urlencoding::encode(v))
|
||||
})
|
||||
.collect::<Vec<_>>()
|
||||
.join("&");
|
||||
|
||||
format!("{}?{}", base_uri, query)
|
||||
}
|
||||
|
||||
/// Try to run the build on a remote builder if one is available for the build's
|
||||
/// system.
|
||||
async fn try_remote_build(
|
||||
|
|
@ -551,7 +604,12 @@ async fn run_build(
|
|||
if cache_upload_config.enabled
|
||||
&& let Some(ref store_uri) = cache_upload_config.store_uri
|
||||
{
|
||||
push_to_cache(&build_result.output_paths, store_uri).await;
|
||||
push_to_cache(
|
||||
&build_result.output_paths,
|
||||
store_uri,
|
||||
cache_upload_config.s3.as_ref(),
|
||||
)
|
||||
.await;
|
||||
}
|
||||
|
||||
let primary_output = build_result
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue