treewide: general cleanup

Finally had the time to clean up after myself. Does a bunch of things,
without breakage as far as I'm aware. I've removed around 20 unnecessary
clones, and simplified the architechture a little bit. 

Signed-off-by: NotAShelf <raf@notashelf.dev>
Change-Id: I4d22337b997a3bf5b0593e6068cd1bd86a6a6964
This commit is contained in:
raf 2026-02-27 21:27:57 +03:00
commit a1357b2501
Signed by: NotAShelf
GPG key ID: 29D95B64378DB4BF
6 changed files with 87 additions and 86 deletions

View file

@ -24,12 +24,6 @@ pub struct Fetcher {
shelve: bool, shelve: bool,
} }
pub struct FileFetcher {
client: Client,
base_path: PathBuf,
shelve: bool,
}
impl Fetcher { impl Fetcher {
pub fn new<P: AsRef<Path>>(base_path: P) -> Self { pub fn new<P: AsRef<Path>>(base_path: P) -> Self {
Self { Self {
@ -44,25 +38,10 @@ impl Fetcher {
self self
} }
pub async fn fetch_all(
&self,
lockfile: &LockFile,
config: &Config,
) -> Result<()> {
let fetcher = FileFetcher {
client: self.client.clone(),
base_path: self.base_path.clone(),
shelve: self.shelve,
};
fetcher.fetch_all(lockfile, config).await
}
pub async fn sync(&self, lockfile: &LockFile, config: &Config) -> Result<()> { pub async fn sync(&self, lockfile: &LockFile, config: &Config) -> Result<()> {
self.fetch_all(lockfile, config).await self.fetch_all(lockfile, config).await
} }
}
impl FileFetcher {
/// Fetch all project files according to lockfile with parallel downloads /// Fetch all project files according to lockfile with parallel downloads
pub async fn fetch_all( pub async fn fetch_all(
&self, &self,
@ -94,14 +73,14 @@ impl FileFetcher {
let semaphore = Arc::new(Semaphore::new(MAX_CONCURRENT_DOWNLOADS)); let semaphore = Arc::new(Semaphore::new(MAX_CONCURRENT_DOWNLOADS));
// Prepare download tasks // Prepare download tasks
let client = &self.client;
let base_path = &self.base_path;
let download_tasks: Vec<_> = exportable_projects let download_tasks: Vec<_> = exportable_projects
.iter() .iter()
.map(|project| { .map(|project| {
let semaphore = Arc::clone(&semaphore); let semaphore = Arc::clone(&semaphore);
let client = self.client.clone(); let client = client.clone();
let base_path = self.base_path.clone(); let base_path = base_path.clone();
let lockfile = lockfile.clone();
let config = config.clone();
let project = (*project).clone(); let project = (*project).clone();
let overall_bar = overall_bar.clone(); let overall_bar = overall_bar.clone();
@ -111,11 +90,7 @@ impl FileFetcher {
PakkerError::InternalError("Semaphore acquisition failed".into()) PakkerError::InternalError("Semaphore acquisition failed".into())
})?; })?;
let name = project let name = project.get_name();
.name
.values()
.next()
.map_or("unknown".to_string(), std::clone::Clone::clone);
let fetcher = Self { let fetcher = Self {
client, client,
@ -123,8 +98,7 @@ impl FileFetcher {
shelve: false, // Shelving happens at sync level, not per-project shelve: false, // Shelving happens at sync level, not per-project
}; };
let result = let result = fetcher.fetch_project(&project, lockfile, config).await;
fetcher.fetch_project(&project, &lockfile, &config).await;
// Update progress bar // Update progress bar
overall_bar.inc(1); overall_bar.inc(1);

View file

@ -2,6 +2,12 @@ use std::time::Duration;
use reqwest::Client; use reqwest::Client;
/// Create HTTP client with optimized settings for API requests.
///
/// # Panics
///
/// Panics if the HTTP client cannot be built, which should only happen in
/// extreme cases like OOM or broken TLS configuration.
pub fn create_http_client() -> Client { pub fn create_http_client() -> Client {
Client::builder() Client::builder()
.pool_max_idle_per_host(10) .pool_max_idle_per_host(10)
@ -12,5 +18,8 @@ pub fn create_http_client() -> Client {
.timeout(Duration::from_secs(30)) .timeout(Duration::from_secs(30))
.user_agent("Pakker/0.1.0") .user_agent("Pakker/0.1.0")
.build() .build()
.expect("Failed to build HTTP client") .expect(
"Failed to build HTTP client - this should never happen unless system \
resources are exhausted",
)
} }

View file

@ -153,17 +153,18 @@ impl IpcCoordinator {
let ipc_dir = ipc_base.join(&modpack_hash); let ipc_dir = ipc_base.join(&modpack_hash);
// Create IPC directory with restricted permissions // Create IPC directory with restricted permissions
if let Err(e) = fs::create_dir_all(&ipc_dir) fs::create_dir_all(&ipc_dir).or_else(|e| {
&& !ipc_dir.exists() if ipc_dir.exists() {
{ Ok(())
return Err(IpcError::IpcDirCreationFailed(e.to_string())); } else {
} Err(IpcError::IpcDirCreationFailed(e.to_string()))
}
})?;
if ipc_dir.exists() { // Set permissions to 700 (owner only)
// Set permissions to 700 (owner only) if let Ok(metadata) = fs::metadata(&ipc_dir) {
if let Ok(metadata) = fs::metadata(&ipc_dir) let current_mode = metadata.permissions().mode() & 0o777;
&& metadata.permissions().mode() != 0o700 if current_mode != 0o700 {
{
let mut perms = metadata.permissions(); let mut perms = metadata.permissions();
perms.set_mode(0o700); perms.set_mode(0o700);
let _ = fs::set_permissions(&ipc_dir, perms); let _ = fs::set_permissions(&ipc_dir, perms);

View file

@ -59,15 +59,15 @@ async fn main() -> Result<(), PakkerError> {
}, },
Commands::AddPrj(args) => { Commands::AddPrj(args) => {
cli::commands::add_prj::execute( cli::commands::add_prj::execute(
args.curseforge.clone(), args.curseforge,
args.modrinth.clone(), args.modrinth,
args.github.clone(), args.github,
args.project_type, args.project_type,
args.side, args.side,
args.strategy, args.strategy,
args.redistributable, args.redistributable,
args.subpath.clone(), args.subpath,
args.aliases.clone(), args.aliases,
args.export, args.export,
args.no_deps, args.no_deps,
args.yes, args.yes,
@ -121,12 +121,12 @@ async fn main() -> Result<(), PakkerError> {
.await .await
}, },
Commands::Credentials(args) => { Commands::Credentials(args) => {
match &args.subcommand { match args.subcommand {
Some(cli::CredentialsSubcommand::Set(set_args)) => { Some(cli::CredentialsSubcommand::Set(set_args)) => {
cli::commands::credentials_set::execute( cli::commands::credentials_set::execute(
set_args.cf_api_key.clone(), set_args.cf_api_key,
set_args.modrinth_token.clone(), set_args.modrinth_token,
set_args.gh_access_token.clone(), set_args.gh_access_token,
) )
}, },
None => { None => {
@ -139,34 +139,34 @@ async fn main() -> Result<(), PakkerError> {
} }
}, },
Commands::Cfg(args) => { Commands::Cfg(args) => {
match &args.subcommand { match args.subcommand {
Some(cli::CfgSubcommand::Prj(prj_args)) => { Some(cli::CfgSubcommand::Prj(prj_args)) => {
cli::commands::cfg_prj::execute( cli::commands::cfg_prj::execute(
&config_path, &config_path,
&lockfile_path, &lockfile_path,
prj_args.project.clone(), prj_args.project,
prj_args.r#type.as_deref(), prj_args.r#type.as_deref(),
prj_args.side.as_deref(), prj_args.side.as_deref(),
prj_args.update_strategy.as_deref(), prj_args.update_strategy.as_deref(),
prj_args.redistributable, prj_args.redistributable,
prj_args.subpath.clone(), prj_args.subpath,
prj_args.add_alias.clone(), prj_args.add_alias,
prj_args.remove_alias.clone(), prj_args.remove_alias,
prj_args.export, prj_args.export,
) )
}, },
None => { None => {
cli::commands::cfg::execute( cli::commands::cfg::execute(
&config_path, &config_path,
args.name.clone(), args.name,
args.version.clone(), args.version,
args.description.clone(), args.description,
args.author.clone(), args.author,
args.mods_path.clone(), args.mods_path,
args.resource_packs_path.clone(), args.resource_packs_path,
args.data_packs_path.clone(), args.data_packs_path,
args.worlds_path.clone(), args.worlds_path,
args.shaders_path.clone(), args.shaders_path,
) )
}, },
} }

View file

@ -91,12 +91,13 @@ impl Project {
} }
pub fn get_name(&self) -> String { pub fn get_name(&self) -> String {
self.name.values().next().cloned().unwrap_or_else(|| { self
self .name
.pakku_id .values()
.clone() .next()
.unwrap_or_else(|| "unknown".to_string()) .map(|s| s.to_owned())
}) .or_else(|| self.pakku_id.as_ref().map(|s| s.to_owned()))
.unwrap_or_else(|| "unknown".to_string())
} }
pub fn matches_input(&self, input: &str) -> bool { pub fn matches_input(&self, input: &str) -> bool {
@ -145,10 +146,10 @@ impl Project {
pub fn merge(&mut self, other: Self) { pub fn merge(&mut self, other: Self) {
// Merge platform identifiers // Merge platform identifiers
for (platform, id) in other.id { for (platform, id) in other.id {
self.id.entry(platform.clone()).or_insert(id); self.id.entry(platform).or_insert(id);
} }
for (platform, slug) in other.slug { for (platform, slug) in other.slug {
self.slug.entry(platform.clone()).or_insert(slug); self.slug.entry(platform).or_insert(slug);
} }
for (platform, name) in other.name { for (platform, name) in other.name {
self.name.entry(platform).or_insert(name); self.name.entry(platform).or_insert(name);

View file

@ -56,18 +56,34 @@ impl RateLimiter {
} }
pub async fn acquire(&self, platform: &str) -> Result<()> { pub async fn acquire(&self, platform: &str) -> Result<()> {
let config = { let (rate, burst) = {
let inner = self.inner.lock().await; let inner = self.inner.lock().await;
inner.config.clone() match platform.to_lowercase().as_str() {
}; "modrinth" => {
(
let (rate, burst) = match platform.to_lowercase().as_str() { inner.config.modrinth_requests_per_min,
"modrinth" => (config.modrinth_requests_per_min, config.modrinth_burst), inner.config.modrinth_burst,
"curseforge" => { )
(config.curseforge_requests_per_min, config.curseforge_burst) },
}, "curseforge" => {
"github" => (config.github_requests_per_min, config.github_burst), (
_ => (config.default_requests_per_min, config.default_burst), inner.config.curseforge_requests_per_min,
inner.config.curseforge_burst,
)
},
"github" => {
(
inner.config.github_requests_per_min,
inner.config.github_burst,
)
},
_ => {
(
inner.config.default_requests_per_min,
inner.config.default_burst,
)
},
}
}; };
let interval = Duration::from_secs(60) / rate.max(1); let interval = Duration::from_secs(60) / rate.max(1);
@ -76,7 +92,7 @@ impl RateLimiter {
let mut inner = self.inner.lock().await; let mut inner = self.inner.lock().await;
let now = Instant::now(); let now = Instant::now();
let platform_requests = let platform_requests =
inner.requests.entry(platform.to_string()).or_default(); inner.requests.entry(platform.to_owned()).or_default();
platform_requests platform_requests
.retain(|t| now.duration_since(*t) < Duration::from_secs(60)); .retain(|t| now.duration_since(*t) < Duration::from_secs(60));