Compare commits

..

No commits in common. "main" and "v0.3.6" have entirely different histories.

25 changed files with 334 additions and 3024 deletions

View file

@ -1,4 +0,0 @@
# https://github.com/rui314/mold?tab=readme-ov-file#how-to-use
[target.'cfg(target_os = "linux")']
linker = "clang"
rustflags = ["-C", "link-arg=-fuse-ld=mold"]

Binary file not shown.

Before

Width:  |  Height:  |  Size: 20 KiB

After

Width:  |  Height:  |  Size: 32 KiB

Before After
Before After

View file

@ -1,14 +0,0 @@
version: 2
updates:
# Update Cargo deps
- package-ecosystem: cargo
directory: "/"
schedule:
interval: "weekly"
# Update used workflows
- package-ecosystem: github-actions
directory: "/"
schedule:
interval: daily

View file

@ -1,62 +0,0 @@
name: hotpath-comment
on:
workflow_run:
workflows: ["hotpath-profile"]
types:
- completed
permissions:
contents: read
pull-requests: write
jobs:
comment:
runs-on: ubuntu-latest
if: ${{ github.event.workflow_run.conclusion == 'success' }}
steps:
- name: Checkout
uses: actions/checkout@v6
- name: Download profiling results
uses: actions/download-artifact@v6
with:
name: hotpath-results
path: /tmp/
github-token: ${{ secrets.GITHUB_TOKEN }}
run-id: ${{ github.event.workflow_run.id }}
- name: Setup Rust
uses: actions-rust-lang/setup-rust-toolchain@v1
- name: Install hotpath CLI
run: cargo install hotpath
- name: Post timing comparison comment
env:
GH_TOKEN: ${{ github.token }}
run: |
set -euo pipefail
HEAD_METRICS=$(cat /tmp/head_timing.json)
BASE_METRICS=$(cat /tmp/base_timing.json)
PR_NUMBER=$(cat /tmp/pr_number.txt)
hotpath profile-pr \
--head-metrics "$HEAD_METRICS" \
--base-metrics "$BASE_METRICS" \
--github-token "$GH_TOKEN" \
--pr-number "$PR_NUMBER"
- name: Post allocation comparison comment
env:
GH_TOKEN: ${{ github.token }}
run: |
set -euo pipefail
HEAD_METRICS=$(cat /tmp/head_alloc.json)
BASE_METRICS=$(cat /tmp/base_alloc.json)
PR_NUMBER=$(cat /tmp/pr_number.txt)
hotpath profile-pr \
--head-metrics "$HEAD_METRICS" \
--base-metrics "$BASE_METRICS" \
--github-token "$GH_TOKEN" \
--pr-number "$PR_NUMBER"

View file

@ -1,65 +0,0 @@
name: hotpath-profile
on:
pull_request:
branches: [ "main" ]
env:
CARGO_TERM_COLOR: always
jobs:
profile:
runs-on: ubuntu-latest
steps:
- name: Checkout PR HEAD
uses: actions/checkout@v6
with:
fetch-depth: 0
- name: Setup Rust
uses: actions-rust-lang/setup-rust-toolchain@v1
- name: Run timing profiling on HEAD
env:
HOTPATH_JSON: "true"
run: |
cargo run --features='hotpath' 2>&1 | grep '^{"hotpath_profiling_mode"' > /tmp/head_timing.json
- name: Run allocation profiling on HEAD
env:
HOTPATH_JSON: "true"
run: |
cargo run --features='hotpath,hotpath-alloc' 2>&1 | grep '^{"hotpath_profiling_mode"' > /tmp/head_alloc.json
- name: Checkout base branch
run: |
git checkout ${{ github.event.pull_request.base.sha }}
- name: Run timing profiling on base
env:
HOTPATH_JSON: "true"
run: |
cargo run --features='hotpath' 2>&1 | grep '^{"hotpath_profiling_mode"' > /tmp/base_timing.json || echo '{}' > /tmp/base_timing.json
- name: Run allocation profiling on base
env:
HOTPATH_JSON: "true"
run: |
cargo run --features='hotpath,hotpath-alloc' 2>&1 | grep '^{"hotpath_profiling_mode"' > /tmp/base_alloc.json || echo '{}' > /tmp/base_alloc.json
- name: Save PR number
run: |
echo '${{ github.event.pull_request.number }}' > /tmp/pr_number.txt
- name: Upload profiling results
uses: actions/upload-artifact@v5
with:
name: hotpath-results
path: |
/tmp/head_timing.json
/tmp/head_alloc.json
/tmp/base_timing.json
/tmp/base_alloc.json
/tmp/pr_number.txt
retention-days: 1

View file

@ -10,45 +10,12 @@ env:
CARGO_TERM_COLOR: always
jobs:
test:
name: Test on ${{ matrix.target }}
runs-on: ${{ matrix.os }}
strategy:
fail-fast: false
matrix:
include:
- os: ubuntu-latest
target: x86_64-unknown-linux-gnu
- os: ubuntu-latest
target: aarch64-unknown-linux-gnu
build:
runs-on: ubuntu-latest
steps:
- name: "Checkout"
uses: actions/checkout@v6
- name: "Setup Rust toolchain"
uses: actions-rust-lang/setup-rust-toolchain@v1
with:
target: ${{ matrix.target }}
- name: "Install cross-compilation tools"
if: matrix.target == 'aarch64-unknown-linux-gnu'
run: |
sudo apt-get update
sudo apt-get install -y gcc-aarch64-linux-gnu
- name: "Configure linker for aarch64"
if: matrix.target == 'aarch64-unknown-linux-gnu'
run: |
mkdir -p .cargo
cat >> .cargo/config.toml << EOF
[target.aarch64-unknown-linux-gnu]
linker = "aarch64-linux-gnu-gcc"
EOF
- name: "Build"
run: cargo build --verbose --target ${{ matrix.target }}
- name: "Run tests"
if: matrix.target == 'x86_64-unknown-linux-gnu'
run: cargo test --verbose --target ${{ matrix.target }}
uses: actions/checkout@v4
- name: "Build with Cargo"
run: cargo build --verbose

1
.gitignore vendored
View file

@ -1,3 +1,2 @@
/target
result*
/.direnv

View file

@ -1,26 +0,0 @@
condense_wildcard_suffixes = true
doc_comment_code_block_width = 80
edition = "2024" # Keep in sync with Cargo.toml.
enum_discrim_align_threshold = 60
force_explicit_abi = false
force_multiline_blocks = true
format_code_in_doc_comments = true
format_macro_matchers = true
format_strings = true
group_imports = "StdExternalCrate"
hex_literal_case = "Upper"
imports_granularity = "Crate"
imports_layout = "HorizontalVertical"
inline_attribute_width = 60
match_block_trailing_comma = true
max_width = 80
newline_style = "Unix"
normalize_comments = true
normalize_doc_attributes = true
overflow_delimited_expr = true
struct_field_align_threshold = 60
tab_spaces = 2
unstable_features = true
use_field_init_shorthand = true
use_try_shorthand = true
wrap_comments = true

View file

@ -1,13 +0,0 @@
[formatting]
align_entries = true
column_width = 100
compact_arrays = false
reorder_inline_tables = true
reorder_keys = true
[[rule]]
include = [ "**/Cargo.toml" ]
keys = [ "package" ]
[rule.formatting]
reorder_keys = false

1712
Cargo.lock generated

File diff suppressed because it is too large Load diff

View file

@ -1,68 +1,24 @@
[package]
name = "microfetch"
version = "0.4.12"
edition = "2024"
[lib]
name = "microfetch_lib"
path = "src/lib.rs"
[[bin]]
name = "microfetch"
path = "src/main.rs"
version = "0.3.5"
edition = "2021"
[dependencies]
hotpath = { optional = true, version = "0.8.0" }
libc = "0.2.178"
[dev-dependencies]
criterion = "0.8.1"
[features]
hotpath = [ "dep:hotpath", "hotpath/hotpath" ]
hotpath-alloc = [ "hotpath/hotpath-alloc" ]
hotpath-off = [ "hotpath/hotpath-off" ]
[[bench]]
harness = false
name = "benchmark"
nix = { version = "0.29", features = ["fs", "hostname", "feature"] }
color-eyre = { version = "0.6", default-features = false }
[profile.dev]
opt-level = 1
opt-level = 3
[profile.release]
strip = true
opt-level = "z"
lto = true
codegen-units = 1
lto = true
opt-level = "s"
panic = "abort"
strip = true
panic = "abort"
[profile.profiler]
debug = true
inherits = "release"
inherits = "release"
debug = true
split-debuginfo = "unpacked"
strip = "none"
[lints.clippy]
complexity = { level = "warn", priority = -1 }
nursery = { level = "warn", priority = -1 }
pedantic = { level = "warn", priority = -1 }
perf = { level = "warn", priority = -1 }
style = { level = "warn", priority = -1 }
# The lint groups above enable some less-than-desirable rules, we should manually
# enable those to keep our sanity.
absolute_paths = "allow"
arbitrary_source_item_ordering = "allow"
implicit_return = "allow"
missing_docs_in_private_items = "allow"
non_ascii_literal = "allow"
pattern_type_mismatch = "allow"
print_stdout = "allow"
question_mark_used = "allow"
similar_names = "allow"
single_call_fn = "allow"
std_instead_of_core = "allow"
too_long_first_doc_paragraph = "allow"
too_many_lines = "allow"
unused_trait_names = "allow"
strip = "none"

260
README.md
View file

@ -1,30 +1,17 @@
<!-- markdownlint-disable MD013 MD033 MD041 -->
<div align="center">
<img src="https://deps.rs/repo/github/notashelf/microfetch/status.svg" alt="https://deps.rs/repo/github/notashelf/microfetch">
<img src="https://img.shields.io/github/stars/notashelf/microfetch?label=stars&color=DEA584" alt="stars">
<!-- <img src="https://img.shields.io/github/v/release/notashelf/microfetch?display_name=tag&color=DEA584"> -->
<img src="https://img.shields.io/github/stars/notashelf/microfetch?label=stars&color=DEA584">
</div>
<div id="doc-begin" align="center">
<h1 id="header">Microfetch</h1>
<p>Microscopic fetch tool in Rust, for NixOS systems, with special emphasis on speed</p>
<br/>
<a href="#synopsis">Synopsis</a><br/>
<a href="#features">Features</a> | <a href="#motivation">Motivation</a><br/> | <a href="#benchmarks">Benchmarks</a><br/>
<a href="#installation">Installation</a>
<br/>
</div>
<h1 align="center">Microfetch</h1>
## Synopsis
[fastfetch]: https://github.com/fastfetch-cli/fastfetch
Stupidly small and simple, laughably fast and pretty fetch tool. Written in Rust
for speed and ease of maintainability. Runs in a _fraction of a millisecond_ and
displays _most_ of the nonsense you'd see posted on r/unixporn or other internet
communities. Aims to replace [fastfetch] on my personal system, but
[probably not yours](#customizing). Though, you are more than welcome to use it
on your system: it is pretty _[fast](#benchmarks)_...
Stupidly simple, laughably fast fetch tool. Written in Rust for speed and ease
of maintainability. Runs in a _fraction of a millisecond_ and displays _most_ of
the nonsense you'd see posted on r/unixporn or other internet communities. Aims
to replace [fastfetch](https://github.com/fastfetch-cli/fastfetch) on my
personal system, but [probably not yours](#customizing). Though, you are more
than welcome to use it on your system: it's pretty [fast...](#benchmarks)
<p align="center">
<img
@ -39,7 +26,6 @@ on your system: it is pretty _[fast](#benchmarks)_...
- Fast
- Really fast
- Minimal dependencies
- Tiny binary (~370kb [^1])
- Actually really fast
- Cool NixOS logo (other, inferior, distros are not supported)
- Reliable detection of following info:
@ -48,187 +34,56 @@ on your system: it is pretty _[fast](#benchmarks)_...
- Name
- Version
- Architecture
- Current shell (from `$SHELL`, trimmed if store path)
- Current shell (from $SHELL, trimmed if store path)
- Current Desktop (DE/WM/Compositor and display backend)
- Memory Usage/Total Memory
- Storage Usage/Total Storage (for `/` only)
- Shell Colors
- Did I mention fast?
- Respects [`NO_COLOR` spec](https://no-color.org/)
[^1]: With the Mold linker, which is enabled by default in the Flake package,
the binary size is roughly 350kb. That's nearly 20kb reduction in size :)
## Motivation
[Rube-Goldmark Machine]: https://en.wikipedia.org/wiki/Rube_Goldberg_machine
Fastfetch, as its name _probably_ already hinted, is a very fast fetch tool
written in C. I used to use Fastfetch on my systems, but I eventually came to
the realization that I am _not interested in any of its additional features_. I
don't use Sixel, I don't change my configuration more than maybe once a year and
I don't even display most of the fields that it does. Sure the configurability
is nice and I can configure the defaults that I do not like but how often do I
really do that?
Since I already enjoy programming challenges, and don't use a fetch program that
often, I eventually came to try and answer the question _how fast can I make my
fetch script?_ It is an _even faster_ fetch tool that I would've written in Bash
and put in my `~/.bashrc` but is _actually_ incredibly fast because it opts out
of all the customization options provided by tools such as Fastfetch. Since
Fetch scripts are kind of a coming-of-age ritual for most Linux users, I've
decided to use it on my system. You also might be interested if you like the
defaults and like speed.
Ultimately, it's a small, opinionated binary with a nice size that doesn't
bother me, and incredible speed. Customization? No thank you. I cannot
re-iterate it enough, Microfetch is _annoyingly fast_. It does not, however,
solve a technical problem. The "problem" Microfetch solves is entirely
self-imposed. On the matter of _size_, the project is written in Rust, which
comes at the cost of "bloated" dependency trees and the increased build times,
but we make an extended effort to keep the dependencies minimal and build times
managable. The latter is also very easily mitigated with Nix's binary cache
systems. Since Microfetch is already in Nixpkgs, you are recommended to use it
to utilize the binary cache properly. The usage of Rust _is_ nice, however,
since it provides us with incredible tooling and a very powerful language that
allows for Microfetch to be as fast as possible. ~~Sure C could've been used
here as well, but do you think I hate myself?~~ Microfetch now features
handwritten assembly to unsafely optimize some areas. In hindsight you all
should have seen this coming. Is it faster? Yes.
Also see: [Rube-Goldmark Machine]
## Benchmarks
Below are the benchmarks that I've used to back up my claims of Microfetch's
speed. It is fast, it is _very_ fast and that is the point of its existence. It
_could_ be faster, and it will be. Eventually.
At this point in time, the performance may be sometimes influenced by
hardware-specific race conditions or even your kernel configuration. Which means
that Microfetch's speed may (at times) depend on your hardware setup. However,
even with the worst possible hardware I could find in my house, I've achieved a
nice less-than-1ms invocation time. Which is pretty good. While Microfetch
_could_ be made faster, we're in the territory of environmental bottlenecks
given how little Microfetch actually allocates.
Below are the actual benchmarks with Hyperfine measured on my Desktop system.
The benchmarks were performed under medium system load, and may not be the same
on your system. Please _also_ note that those benchmarks will not be always kept
up to date, but I will try to update the numbers as I make Microfetch faster.
| Command | Mean [µs] | Min [µs] | Max [µs] | Relative | Written by raf? |
| :----------- | ----------------: | -------: | -------: | -------------: | --------------: |
| `microfetch` | 604.4 ± 64.2 | 516.0 | 1184.6 | 1.00 | Yes |
| `fastfetch` | 140836.6 ± 1258.6 | 139204.7 | 143299.4 | 233.00 ± 24.82 | No |
| `pfetch` | 177036.6 ± 1614.3 | 174199.3 | 180830.2 | 292.89 ± 31.20 | No |
| `neofetch` | 406309.9 ± 1810.0 | 402757.3 | 409526.3 | 672.20 ± 71.40 | No |
| `nitch` | 127743.7 ± 1391.7 | 123933.5 | 130451.2 | 211.34 ± 22.55 | No |
| `macchina` | 13603.7 ± 339.7 | 12642.9 | 14701.4 | 22.51 ± 2.45 | No |
The point stands that Microfetch is significantly faster than every other fetch
tool I have tried. This is to be expected, of course, since Microfetch is
designed _explicitly_ for speed and makes some tradeoffs to achieve it's
signature speed.
### Benchmarking Individual Functions
[Criterion.rs]: https://github.com/bheisler/criterion.rs
[Getting Started Guide]: https://bheisler.github.io/criterion.rs/book/getting_started.html
To benchmark individual functions, [Criterion.rs] is used. See Criterion's
[Getting Started Guide] for details or just run `cargo bench` to benchmark all
features of Microfetch.
### Profiling Allocations and Timing
[Hotpath]: https://github.com/pawurb/hotpath
Microfetch uses [Hotpath] for profiling function execution timing and heap
allocations. This helps identify performance bottlenecks and track optimization
progress. It is so effective that thanks to Hotpath, Microfetch has seen a 60%
reduction in the number of allocations.
To profile timing:
```bash
HOTPATH_JSON=true cargo run --features=hotpath
```
To profile allocations:
```bash
HOTPATH_JSON=true cargo run --features=hotpath,hotpath-alloc
```
The JSON output can be analyzed with the `hotpath` CLI tool for detailed
performance metrics. On pull requests, GitHub Actions automatically profiles
both timing and allocations, posting comparison comments to help catch
performance regressions.
## Installation
> [!NOTE]
> You will need a Nerdfonts patched font installed, and for your terminal
> emulator to support said font. Microfetch uses nerdfonts glyphs by default,
> but this can be changed by [patching the program](#customizing).
Microfetch is packaged in [nixpkgs](https://github.com/nixos/nixpkgs). You can
get it through the unstable channel for the time being. The Nix flake can also
be used for bleeding-edge builds.
Microfetch is packaged in [nixpkgs](https://github.com/nixos/nixpkgs). It can be
installed by adding `pkgs.microfetch` to your `environment.systemPackages`.
Additionally, you can try out Microfetch in a Nix shell.
```bash
nix shell nixpkgs#microfetch
```
Or run it directly with `nix run`
```bash
nix run nixpkgs#microfetch
```
Non-Nix users will have to build Microfetch with `cargo`. It is not published
anywhere but I imagine you can use `cargo install --git` to install it from
source.
```bash
cargo install --git https://github.com/notashelf/microfetch.git
```
Non-Nix users will have to build Microfetch with `cargo`.
Microfetch is _currently_ not available anywhere else. Though, does it _really_
have to be?
## Benchmarks
Microfetch's performance is mostly hardware-dependant, however, the overall
trend seems to be < 2ms on any modern (2015 and after) CPU. Below are the
benchmarks with Hyperfine on my desktop system.
| Command | Mean [ms] | Min [ms] | Max [ms] | Relative | Written by raf? |
| :----------- | ----------: | -------: | -------: | -------------: | --------------: |
| `microfetch` | 1.3 ± 0.0 | 1.3 | 1.4 | 1.00 | yes |
| `pfetch` | 254.2 ± 4.8 | 246.7 | 264.9 | 191.97 ± 7.10 | no |
| `neofetch` | 735.4 ± 9.5 | 721.1 | 752.8 | 555.48 ± 19.08 | no |
| `fastfetch` | 31.9 ± 0.8 | 30.8 | 33.8 | 24.08 ± 0.98 | no |
_As far as I'm concerned, Microfetch is faster than almost every fetch tool
there is. The only downside of using Rust is introducing more "bloated"
dependency trees and increasing build times. The latter is easily mitigated with
Nix's binary cache, though._
## Customizing
You can't.
You can't\*.
### Why?
Customization, of most kinds, are expensive: I could try reading environment
variables, parse command-line arguments or read a configuration file to allow
configuring various fields but those inflate execution time and the resource
consumption by a lot. Since Microfetch is closer to a code golf challenge than a
program that attempts to fill a gap, I have elected not to make this trade.
Customization, of any kind, is expensive: I could try reading environment
variables, parse command-line arguments or read a configuration file but all of
those increment execution time and resource consumption by a lot.
### Really?
[main module]: ./src/main.rs
[discussions tab]: https://github.com/NotAShelf/microfetch/discussions
To be fair, you _can_ customize Microfetch by, well, patching it. It is
certainly not the easiest way of doing so but if you are planning to change
something in Microfetch, patching is the best way to go. It will also the only
way that does not compromise on speed, unless you patch in bad code. Various
users have adapted Microfetch to their distribution by patching the
[main module] and inserting the logo of their choice. This is also the best way
to go if you plan to make small changes. If your changes are not small, you
might want to look for a program that is designed to be customizable; Microfetch
is built for maximum performance.
The Nix package allows passing patches in a streamlined manner by passing
`.overrideAttrs` to the derivation. You can apply your patches in `patches` and
share your derivations with people. Feel free to use the [discussions tab] to
share your own variants of Microfetch!
To be fair, you _can_ customize Microfetch by... Well, patching it. It's not the
best way per se, but it will be the only way that does not compromise on speed.
## Contributing
@ -237,45 +92,20 @@ them altogether, as you might have a really good idea worth discussing but as a
general rule of thumb consider talking to me before creating a feature PR.
Contributions that help improve performance in specific areas of Microfetch are
welcome. Though, prepare to be bombarded with questions if your changes are
large.
welcome. Though, prepare to be bombarded with questions.
### Hacking
## Hacking
A Nix flake is provided. You may use `nix develop` to get started. Direnv users
may instead run `direnv allow` to get a complete environment with shell
integration.
A Nix flake is provided. `nix develop` to get started. Direnv users may simply
run `direnv allow` to get started.
Non-Nix user will need `cargo`, `clang` and `mold` installed on their system to
build Microfetch. As Mold seems to yield _slightly_ better results than the
default linker, it has been set as the default in `.cargo/config.toml` for
x86-64 Linux. You may override those defaults using the `RUSTFLAGS` environment
variable. For example:
```sh
# Use ld instead of Mold
$ RUSTFLAGS="-C linker=/path/to/ld.lld" cargo build
```
Non-nix users will need `cargo` and `gcc` installed on their system, see
`Cargo.toml` for available release profiles.
## Thanks
Huge thanks to everyone who took the time to make pull requests or nag me in
person about current issues. To list a few, special thanks to:
- [@Nydragon](https://github.com/Nydragon) - For packaging Microfetch in Nixpkgs
- [@ErrorNoInternet](https://github.com/ErrorNoInternet) - Performance
improvements and code assistance
- [@SoraTenshi](https://github.com/SoraTenshi) - General tips and code
improvements
- [@bloxx12](https://github.com/bloxx12) - Performance improvements and
benchmarking plots
- [@sioodmy](https://github.com/sioodmy) - Being cute
- [@mewoocat](https://github.com/mewoocat) - The awesome NixOS logo ASCII used
in Microfetch
- [@uzaaft](https://github.com/uzaaft) - Helping me going faster
Additionally a big thank you to everyone who used, talked about or criticized
Microfetch. I might have missed your name here, but you have my thanks.
person about current issues.
## License

View file

@ -1,33 +0,0 @@
use criterion::{Criterion, criterion_group, criterion_main};
use microfetch_lib::{
UtsName,
colors::print_dots,
desktop::get_desktop_info,
release::{get_os_pretty_name, get_system_info},
system::{
get_memory_usage,
get_root_disk_usage,
get_shell,
get_username_and_hostname,
},
uptime::get_current,
};
fn main_benchmark(c: &mut Criterion) {
let utsname = UtsName::uname().expect("Failed to get uname");
c.bench_function("user_info", |b| {
b.iter(|| get_username_and_hostname(&utsname));
});
c.bench_function("os_name", |b| b.iter(get_os_pretty_name));
c.bench_function("kernel_version", |b| b.iter(|| get_system_info(&utsname)));
c.bench_function("shell", |b| b.iter(get_shell));
c.bench_function("desktop", |b| b.iter(get_desktop_info));
c.bench_function("uptime", |b| b.iter(get_current));
c.bench_function("memory_usage", |b| b.iter(get_memory_usage));
c.bench_function("storage", |b| b.iter(get_root_disk_usage));
c.bench_function("colors", |b| b.iter(print_dots));
}
criterion_group!(benches, main_benchmark);
criterion_main!(benches);

10
flake.lock generated
View file

@ -2,19 +2,15 @@
"nodes": {
"nixpkgs": {
"locked": {
"lastModified": 1743359643,
"narHash": "sha256-RkyJ9a67s0zEIz4O66TyZOIGh4TFZ4dKHKMgnxZCh2I=",
"lastModified": 1763381801,
"narHash": "sha256-325fR0JmHW7B74/gHPv/S9w1Rfj/M2HniwQFUwdrZ9k=",
"lastModified": 1722719969,
"narHash": "sha256-E47qbT/mRtBCSZra+9S9208sp/QnNeOAq7EhHX+eMNE=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "ca77b4bc80e558ce59f2712fdb276f90c0ee309a",
"rev": "46931757ea8bdbba25c076697f8e73b8dc39fef5",
"rev": "83a364ced9d5b8a6bdac897bbef6b91e70777b97",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixos-unstable",
"repo": "nixpkgs",
"type": "github"
}

View file

@ -1,6 +1,6 @@
{
description = "A microscopic fetch script in Rust, for NixOS systems";
inputs.nixpkgs.url = "github:NixOS/nixpkgs?ref=nixos-unstable";
inputs.nixpkgs.url = "github:NixOS/nixpkgs";
outputs = {
self,
@ -10,12 +10,8 @@
forEachSystem = nixpkgs.lib.genAttrs systems;
pkgsForEach = nixpkgs.legacyPackages;
in {
packages = forEachSystem (system: let
pkgs = pkgsForEach.${system};
in {
default = self.packages.${system}.microfetch;
microfetch = pkgs.callPackage ./nix/package.nix {};
microfetch-mold = pkgs.callPackage ./nix/package.nix {useMold = true;};
packages = forEachSystem (system: {
default = pkgsForEach.${system}.callPackage ./nix/package.nix {};
});
devShells = forEachSystem (system: {

View file

@ -1,53 +1,30 @@
{
lib,
stdenv,
stdenvAdapters,
rustPlatform,
stdenvAdapters,
llvm,
useMold ? stdenv.isLinux && !stdenv.hostPlatform.isAarch,
}: let
toml = (lib.importTOML ../Cargo.toml).package;
pname = toml.name;
inherit (toml) version;
# Select stdenv based on useMold flag
stdenv =
if useMold
then stdenvAdapters.useMoldLinker llvm.stdenv
else llvm.stdenv;
in
rustPlatform.buildRustPackage.override {inherit stdenv;} {
rustPlatform.buildRustPackage.override {stdenv = stdenvAdapters.useMoldLinker llvm.stdenv;} {
RUSTFLAGS = "-C link-arg=-fuse-ld=mold";
inherit pname version;
src = let
fs = lib.fileset;
s = ../.;
in
fs.toSource {
root = s;
fileset = fs.unions [
(fs.fileFilter (file: builtins.any file.hasExt ["rs"]) (s + /src))
(s + /Cargo.lock)
(s + /Cargo.toml)
(s + /benches)
];
};
cargoLock.lockFile = ../Cargo.lock;
enableParallelBuilding = true;
buildNoDefaultFeatures = true;
doCheck = false;
# Only set RUSTFLAGS for mold if useMold is enabled
env = lib.optionalAttrs useMold {
CARGO_LINKER = "clang";
RUSTFLAGS = "-C link-arg=-fuse-ld=mold";
src = builtins.path {
name = "${pname}-${version}";
path = lib.sources.cleanSource ../.;
};
cargoLock.lockFile = ../Cargo.lock;
meta = {
description = "Microscopic fetch script in Rust, for NixOS systems";
description = "A microscopic fetch script in Rust, for NixOS systems";
homepage = "https://github.com/NotAShelf/microfetch";
license = lib.licenses.gpl3Only;
maintainers = [lib.maintainers.NotAShelf];
maintainers = with lib.maintainers; [NotAShelf];
mainProgram = "microfetch";
};
}

View file

@ -1,31 +1,24 @@
{
mkShell,
cargo,
rustc,
mold,
clang,
rust-analyzer-unwrapped,
rustfmt,
clippy,
taplo,
cargo,
rustc,
gcc,
rustPlatform,
gnuplot,
}:
mkShell {
name = "microfetch";
strictDeps = true;
nativeBuildInputs = [
cargo
rustc
mold
clang
gcc
rust-analyzer-unwrapped
(rustfmt.override {asNightly = true;})
rustfmt
clippy
taplo
gnuplot # for Criterion.rs plots
];
env.RUST_SRC_PATH = "${rustPlatform.rustLibSrc}";

View file

@ -1,81 +1,11 @@
use std::sync::LazyLock;
pub const RESET: &str = "\x1b[0m";
pub const BLUE: &str = "\x1b[34m";
pub const CYAN: &str = "\x1b[36m";
pub const GREEN: &str = "\x1b[32m";
pub const YELLOW: &str = "\x1b[33m";
pub const RED: &str = "\x1b[31m";
pub const MAGENTA: &str = "\x1b[35m";
pub struct Colors {
pub reset: &'static str,
pub blue: &'static str,
pub cyan: &'static str,
pub green: &'static str,
pub yellow: &'static str,
pub red: &'static str,
pub magenta: &'static str,
}
impl Colors {
const fn new(is_no_color: bool) -> Self {
if is_no_color {
Self {
reset: "",
blue: "",
cyan: "",
green: "",
yellow: "",
red: "",
magenta: "",
}
} else {
Self {
reset: "\x1b[0m",
blue: "\x1b[34m",
cyan: "\x1b[36m",
green: "\x1b[32m",
yellow: "\x1b[33m",
red: "\x1b[31m",
magenta: "\x1b[35m",
}
}
}
}
pub static COLORS: LazyLock<Colors> = LazyLock::new(|| {
const NO_COLOR: *const libc::c_char = c"NO_COLOR".as_ptr();
let is_no_color = unsafe { !libc::getenv(NO_COLOR).is_null() };
Colors::new(is_no_color)
});
#[must_use]
#[cfg_attr(feature = "hotpath", hotpath::measure)]
pub fn print_dots() -> String {
// Pre-calculate capacity: 6 color codes + " " (glyph + 2 spaces) per color
const GLYPH: &str = "";
let capacity = COLORS.blue.len()
+ COLORS.cyan.len()
+ COLORS.green.len()
+ COLORS.yellow.len()
+ COLORS.red.len()
+ COLORS.magenta.len()
+ COLORS.reset.len()
+ (GLYPH.len() + 2) * 6;
let mut result = String::with_capacity(capacity);
result.push_str(COLORS.blue);
result.push_str(GLYPH);
result.push_str(" ");
result.push_str(COLORS.cyan);
result.push_str(GLYPH);
result.push_str(" ");
result.push_str(COLORS.green);
result.push_str(GLYPH);
result.push_str(" ");
result.push_str(COLORS.yellow);
result.push_str(GLYPH);
result.push_str(" ");
result.push_str(COLORS.red);
result.push_str(GLYPH);
result.push_str(" ");
result.push_str(COLORS.magenta);
result.push_str(GLYPH);
result.push_str(" ");
result.push_str(COLORS.reset);
result
format!("{BLUE}{CYAN}{GREEN}{YELLOW}{RED}{MAGENTA}{RESET}")
}

View file

@ -1,42 +1,35 @@
use std::{ffi::CStr, fmt::Write};
#[must_use]
#[cfg_attr(feature = "hotpath", hotpath::measure)]
pub fn get_desktop_info() -> String {
// Retrieve the environment variables and handle Result types
let desktop_str = unsafe {
let ptr = libc::getenv(c"XDG_CURRENT_DESKTOP".as_ptr());
if ptr.is_null() {
"Unknown"
} else {
let s = CStr::from_ptr(ptr).to_str().unwrap_or("Unknown");
s.strip_prefix("none+").unwrap_or(s)
fn capitalize_first_letter(s: &str) -> String {
if s.is_empty() {
return String::new();
}
let mut chars = s.chars();
let first_char = chars.next().unwrap().to_uppercase().to_string();
let rest: String = chars.collect();
first_char + &rest
}
};
let backend_str = unsafe {
let ptr = libc::getenv(c"XDG_SESSION_TYPE".as_ptr());
if ptr.is_null() {
"Unknown"
// Retrieve the environment variables and handle Result types
let desktop_env = std::env::var("XDG_CURRENT_DESKTOP");
let display_backend_result = std::env::var("XDG_SESSION_TYPE");
// Capitalize the first letter of the display backend value
let display_backend = capitalize_first_letter(display_backend_result.as_deref().unwrap_or(""));
// Trim "none+" from the start of desktop_env if present
// Use "Unknown" if desktop_env is empty or has an error
let desktop_env = match desktop_env {
Err(_) => "Unknown".to_string(),
Ok(s) => s.trim_start_matches("none+").to_string(),
};
// Handle the case where display_backend might be empty after capitalization
let display_backend = if display_backend.is_empty() {
"Unknown".to_string()
} else {
let s = CStr::from_ptr(ptr).to_str().unwrap_or("Unknown");
if s.is_empty() { "Unknown" } else { s }
}
};
display_backend
};
// Pre-calculate capacity: desktop_len + " (" + backend_len + ")"
// Capitalize first char needs temporary allocation only if backend exists
let mut result =
String::with_capacity(desktop_str.len() + backend_str.len() + 3);
result.push_str(desktop_str);
result.push_str(" (");
// Capitalize first character of backend
if let Some(first_char) = backend_str.chars().next() {
let _ = write!(result, "{}", first_char.to_ascii_uppercase());
result.push_str(&backend_str[first_char.len_utf8()..]);
}
result.push(')');
result
format!("{desktop_env} ({display_backend})")
}

View file

@ -1,46 +0,0 @@
pub mod colors;
pub mod desktop;
pub mod release;
pub mod syscall;
pub mod system;
pub mod uptime;
use std::mem::MaybeUninit;
/// Wrapper for `libc::utsname` with safe accessor methods
pub struct UtsName(libc::utsname);
impl UtsName {
/// Calls `uname` syscall and returns a `UtsName` wrapper
///
/// # Errors
///
/// Returns an error if the `uname` syscall fails
pub fn uname() -> Result<Self, std::io::Error> {
let mut uts = MaybeUninit::uninit();
if unsafe { libc::uname(uts.as_mut_ptr()) } != 0 {
return Err(std::io::Error::last_os_error());
}
Ok(Self(unsafe { uts.assume_init() }))
}
#[must_use]
pub const fn nodename(&self) -> &std::ffi::CStr {
unsafe { std::ffi::CStr::from_ptr(self.0.nodename.as_ptr()) }
}
#[must_use]
pub const fn sysname(&self) -> &std::ffi::CStr {
unsafe { std::ffi::CStr::from_ptr(self.0.sysname.as_ptr()) }
}
#[must_use]
pub const fn release(&self) -> &std::ffi::CStr {
unsafe { std::ffi::CStr::from_ptr(self.0.release.as_ptr()) }
}
#[must_use]
pub const fn machine(&self) -> &std::ffi::CStr {
unsafe { std::ffi::CStr::from_ptr(self.0.machine.as_ptr()) }
}
}

View file

@ -1,112 +1,75 @@
mod colors;
mod desktop;
mod release;
mod syscall;
mod system;
mod uptime;
use std::io::{self, Cursor, Write};
use crate::colors::{print_dots, BLUE, CYAN, RESET};
use crate::desktop::get_desktop_info;
use crate::release::{get_os_pretty_name, get_system_info};
use crate::system::{get_memory_usage, get_root_disk_usage, get_shell, get_username_and_hostname};
use crate::uptime::get_current;
pub use microfetch_lib::UtsName;
use color_eyre::Report;
use crate::{
colors::print_dots,
desktop::get_desktop_info,
release::{get_os_pretty_name, get_system_info},
system::{
get_memory_usage,
get_root_disk_usage,
get_shell,
get_username_and_hostname,
},
uptime::get_current,
};
fn main() -> Result<(), Report> {
color_eyre::install()?;
#[cfg_attr(feature = "hotpath", hotpath::main)]
fn main() -> Result<(), Box<dyn std::error::Error>> {
if Some("--version") == std::env::args().nth(1).as_deref() {
println!("Microfetch {}", env!("CARGO_PKG_VERSION"));
} else {
let utsname = UtsName::uname()?;
let fields = Fields {
user_info: get_username_and_hostname(&utsname),
os_name: get_os_pretty_name()?,
kernel_version: get_system_info(&utsname),
shell: get_shell(),
desktop: get_desktop_info(),
uptime: get_current()?,
memory_usage: get_memory_usage()?,
storage: get_root_disk_usage()?,
colors: print_dots(),
user_info: get_username_and_hostname(),
os_name: get_os_pretty_name()?,
kernel_version: get_system_info()?,
shell: get_shell(),
desktop: get_desktop_info(),
uptime: get_current()?,
memory_usage: get_memory_usage()?,
storage: get_root_disk_usage()?,
colors: print_dots(),
};
print_system_info(&fields)?;
}
Ok(())
print_system_info(&fields);
Ok(())
}
// Struct to hold all the fields we need in order to print the fetch. This
// helps avoid Clippy warnings about argument count, and makes it slightly
// easier to pass data around. Though, it is not like we really need to.
// Struct to hold all the fields we need to print
// helps avoid clippy warnings about argument count
// and makes it easier to pass around, though its
// not like we need to
struct Fields {
user_info: String,
os_name: String,
kernel_version: String,
shell: String,
uptime: String,
desktop: String,
memory_usage: String,
storage: String,
colors: String,
user_info: String,
os_name: String,
kernel_version: String,
shell: String,
uptime: String,
desktop: String,
memory_usage: String,
storage: String,
colors: String,
}
#[cfg_attr(feature = "hotpath", hotpath::measure)]
fn print_system_info(
fields: &Fields,
) -> Result<(), Box<dyn std::error::Error>> {
use crate::colors::COLORS;
fn print_system_info(fields: &Fields) {
let Fields {
user_info,
os_name,
kernel_version,
shell,
uptime,
desktop,
memory_usage,
storage,
colors,
} = fields;
let Fields {
user_info,
os_name,
kernel_version,
shell,
uptime,
desktop,
memory_usage,
storage,
colors,
} = fields;
let cyan = COLORS.cyan;
let blue = COLORS.blue;
let reset = COLORS.reset;
let mut buf = [0u8; 2048];
let mut cursor = Cursor::new(&mut buf[..]);
write!(
cursor,
"
{cyan} {blue} {user_info} ~{reset}
{cyan} {blue} {cyan} {cyan} {blue}System{reset} {os_name}
{cyan} {blue} {cyan} {cyan} {blue}Kernel{reset} {kernel_version}
{blue} {blue}{cyan} {cyan} {blue}Shell{reset} {shell}
{blue} {cyan} {cyan} {blue}Uptime{reset} {uptime}
{blue} {cyan} {cyan} {cyan} {blue}Desktop{reset} {desktop}
{blue} {cyan}{blue} {cyan}󰍛 {blue}Memory{reset} {memory_usage}
{blue} {cyan}{blue} {cyan}󱥎 {blue}Storage (/){reset} {storage}
{cyan} {blue} {cyan} {blue}Colors{reset} {colors}\n\n"
)?;
let len = cursor.position() as usize;
// Direct syscall to avoid stdout buffering allocation
let written = unsafe { libc::write(libc::STDOUT_FILENO, buf.as_ptr().cast(), len) };
if written < 0 {
return Err(io::Error::last_os_error().into());
}
if written as usize != len {
return Err(io::Error::new(io::ErrorKind::WriteZero, "partial write to stdout").into());
}
Ok(())
println!(
"
{CYAN} {BLUE} {user_info} ~{RESET}
{CYAN} {BLUE} {CYAN} {CYAN} {BLUE}System{RESET} {os_name}
{CYAN} {BLUE} {CYAN} {CYAN} {BLUE}Kernel{RESET} {kernel_version}
{BLUE} {BLUE}{CYAN} {CYAN} {BLUE}Shell{RESET} {shell}
{BLUE} {CYAN} {CYAN} {BLUE}Uptime{RESET} {uptime}
{BLUE} {CYAN} {CYAN} {CYAN} {BLUE}Desktop{RESET} {desktop}
{BLUE} {CYAN}{BLUE} {CYAN}󰍛 {BLUE}Memory{RESET} {memory_usage}
{BLUE} {CYAN}{BLUE} {CYAN}󱥎 {BLUE}Storage (/){RESET} {storage}
{CYAN} {BLUE} {CYAN} {BLUE}Colors{RESET} {colors}");
}

View file

@ -1,69 +1,27 @@
use std::{fmt::Write as _, io};
use color_eyre::Result;
use std::fs::read_to_string;
use std::io;
use crate::{UtsName, syscall::read_file_fast};
#[must_use]
#[cfg_attr(feature = "hotpath", hotpath::measure)]
pub fn get_system_info(utsname: &UtsName) -> String {
let sysname = utsname.sysname().to_str().unwrap_or("Unknown");
let release = utsname.release().to_str().unwrap_or("Unknown");
let machine = utsname.machine().to_str().unwrap_or("Unknown");
// Pre-allocate capacity: sysname + " " + release + " (" + machine + ")"
let capacity = sysname.len() + 1 + release.len() + 2 + machine.len() + 1;
let mut result = String::with_capacity(capacity);
write!(result, "{sysname} {release} ({machine})").unwrap();
result
pub fn get_system_info() -> nix::Result<String> {
let utsname = nix::sys::utsname::uname()?;
Ok(format!(
"{} {} ({})",
utsname.sysname().to_str().unwrap_or("Unknown"),
utsname.release().to_str().unwrap_or("Unknown"),
utsname.machine().to_str().unwrap_or("Unknown")
))
}
/// Gets the pretty name of the OS from `/etc/os-release`.
///
/// # Errors
///
/// Returns an error if `/etc/os-release` cannot be read.
#[cfg_attr(feature = "hotpath", hotpath::measure)]
pub fn get_os_pretty_name() -> Result<String, io::Error> {
// Fast byte-level scanning for PRETTY_NAME=
const PREFIX: &[u8] = b"PRETTY_NAME=";
let os_release_content = read_to_string("/etc/os-release")?;
let pretty_name = os_release_content
.lines()
.find(|line| line.starts_with("PRETTY_NAME="))
.map(|line| {
line.trim_start_matches("PRETTY_NAME=")
.trim_matches('"')
.to_string()
});
let mut buffer = [0u8; 1024];
// Use fast syscall-based file reading
let bytes_read = read_file_fast("/etc/os-release", &mut buffer)?;
let content = &buffer[..bytes_read];
let mut offset = 0;
while offset < content.len() {
let remaining = &content[offset..];
// Find newline or end
let line_end = remaining
.iter()
.position(|&b| b == b'\n')
.unwrap_or(remaining.len());
let line = &remaining[..line_end];
if line.starts_with(PREFIX) {
let value = &line[PREFIX.len()..];
// Strip quotes if present
let trimmed = if value.len() >= 2
&& value[0] == b'"'
&& value[value.len() - 1] == b'"'
{
&value[1..value.len() - 1]
} else {
value
};
// Convert to String - should be valid UTF-8
return Ok(String::from_utf8_lossy(trimmed).into_owned());
}
offset += line_end + 1;
}
Ok("Unknown".to_owned())
Ok(pretty_name.unwrap_or("Unknown".to_string()))
}

View file

@ -1,210 +0,0 @@
//! Incredibly fast syscall wrappers for using inline assembly. Serves the
//! purposes of completely bypassing Rust's standard library in favor of
//! handwritten Assembly. Is this a good idea? No. Is it fast? Yeah, but only
//! marginally. Either way it serves a purpose and I will NOT accept criticism.
//! What do you mean I wasted two whole hours to make the program only 100µs
//! faster?
//!
//! Supports `x86_64` and `aarch64` architectures. Riscv support will be
//! implemented when and ONLY WHEN I can be bothered to work on it.
use std::io;
/// Direct syscall to open a file
///
/// # Returns
///
/// File descriptor or -1 on error
///
/// # Safety
///
/// The caller must ensure:
///
/// - `path` points to a valid null-terminated C string
/// - The pointer remains valid for the duration of the syscall
#[inline]
#[must_use]
pub unsafe fn sys_open(path: *const u8, flags: i32) -> i32 {
#[cfg(target_arch = "x86_64")]
unsafe {
let fd: i64;
std::arch::asm!(
"syscall",
in("rax") 2i64, // SYS_open
in("rdi") path,
in("rsi") flags,
in("rdx") 0i32, // mode (not used for reading)
lateout("rax") fd,
lateout("rcx") _,
lateout("r11") _,
options(nostack)
);
#[allow(clippy::cast_possible_truncation)]
{
fd as i32
}
}
#[cfg(target_arch = "aarch64")]
unsafe {
let fd: i64;
std::arch::asm!(
"svc #0",
in("x8") 56i64, // SYS_openat
in("x0") -100i32, // AT_FDCWD
in("x1") path,
in("x2") flags,
in("x3") 0i32, // mode
lateout("x0") fd,
options(nostack)
);
#[allow(clippy::cast_possible_truncation)]
{
fd as i32
}
}
#[cfg(not(any(target_arch = "x86_64", target_arch = "aarch64")))]
{
compile_error!("Unsupported architecture for inline assembly syscalls");
}
}
/// Direct syscall to read from a file descriptor
///
/// # Returns n
///
/// Number of bytes read or -1 on error
///
/// # Safety
///
/// The caller must ensure:
/// - `buf` points to a valid writable buffer of at least `count` bytes
/// - `fd` is a valid open file descriptor
#[inline]
pub unsafe fn sys_read(fd: i32, buf: *mut u8, count: usize) -> isize {
#[cfg(target_arch = "x86_64")]
unsafe {
let ret: i64;
std::arch::asm!(
"syscall",
in("rax") 0i64, // SYS_read
in("rdi") fd,
in("rsi") buf,
in("rdx") count,
lateout("rax") ret,
lateout("rcx") _,
lateout("r11") _,
options(nostack)
);
#[allow(clippy::cast_possible_truncation)]
{
ret as isize
}
}
#[cfg(target_arch = "aarch64")]
unsafe {
let ret: i64;
std::arch::asm!(
"svc #0",
in("x8") 63i64, // SYS_read
in("x0") fd,
in("x1") buf,
in("x2") count,
lateout("x0") ret,
options(nostack)
);
#[allow(clippy::cast_possible_truncation)]
{
ret as isize
}
}
#[cfg(not(any(target_arch = "x86_64", target_arch = "aarch64")))]
{
compile_error!("Unsupported architecture for inline assembly syscalls");
}
}
/// Direct syscall to close a file descriptor
///
/// # Safety
///
/// The caller must ensure `fd` is a valid open file descriptor
#[inline]
#[must_use]
pub unsafe fn sys_close(fd: i32) -> i32 {
#[cfg(target_arch = "x86_64")]
unsafe {
let ret: i64;
std::arch::asm!(
"syscall",
in("rax") 3i64, // SYS_close
in("rdi") fd,
lateout("rax") ret,
lateout("rcx") _,
lateout("r11") _,
options(nostack)
);
#[allow(clippy::cast_possible_truncation)]
{
ret as i32
}
}
#[cfg(target_arch = "aarch64")]
unsafe {
let ret: i64;
std::arch::asm!(
"svc #0",
in("x8") 57i64, // SYS_close
in("x0") fd,
lateout("x0") ret,
options(nostack)
);
#[allow(clippy::cast_possible_truncation)]
{
ret as i32
}
}
#[cfg(not(any(target_arch = "x86_64", target_arch = "aarch64")))]
{
compile_error!("Unsupported architecture for inline assembly syscalls");
}
}
/// Read entire file using direct syscalls. This avoids libc overhead and can be
/// significantly faster for small files.
///
/// # Errors
///
/// Returns an error if the file cannot be opened or read
#[inline]
pub fn read_file_fast(path: &str, buffer: &mut [u8]) -> io::Result<usize> {
const O_RDONLY: i32 = 0;
// Use stack-allocated buffer for null-terminated path (max 256 bytes)
let path_bytes = path.as_bytes();
if path_bytes.len() >= 256 {
return Err(io::Error::new(io::ErrorKind::InvalidInput, "Path too long"));
}
let mut path_buf = [0u8; 256];
path_buf[..path_bytes.len()].copy_from_slice(path_bytes);
// XXX: Already zero-terminated since array is initialized to zeros
unsafe {
let fd = sys_open(path_buf.as_ptr(), O_RDONLY);
if fd < 0 {
return Err(io::Error::last_os_error());
}
let bytes_read = sys_read(fd, buffer.as_mut_ptr(), buffer.len());
let _ = sys_close(fd);
if bytes_read < 0 {
return Err(io::Error::last_os_error());
}
#[allow(clippy::cast_sign_loss)]
{
Ok(bytes_read as usize)
}
}
}

View file

@ -1,184 +1,71 @@
use std::{ffi::CStr, fmt::Write as _, io, mem::MaybeUninit};
use color_eyre::Result;
use nix::sys::statvfs::statvfs;
use crate::{UtsName, colors::COLORS, syscall::read_file_fast};
use std::env;
use std::io::{self};
#[must_use]
#[cfg_attr(feature = "hotpath", hotpath::measure)]
pub fn get_username_and_hostname(utsname: &UtsName) -> String {
let username = unsafe {
let ptr = libc::getenv(c"USER".as_ptr());
if ptr.is_null() {
"unknown_user"
} else {
CStr::from_ptr(ptr).to_str().unwrap_or("unknown_user")
}
};
let hostname = utsname.nodename().to_str().unwrap_or("unknown_host");
use crate::colors::{CYAN, GREEN, RED, RESET, YELLOW};
let capacity = COLORS.yellow.len()
+ username.len()
+ COLORS.red.len()
+ 1
+ COLORS.green.len()
+ hostname.len()
+ COLORS.reset.len();
let mut result = String::with_capacity(capacity);
pub fn get_username_and_hostname() -> String {
let username = env::var("USER").unwrap_or("unknown_user".to_string());
let hostname = nix::unistd::gethostname().unwrap_or("unknown_host".to_string().into());
let hostname = hostname.to_string_lossy();
result.push_str(COLORS.yellow);
result.push_str(username);
result.push_str(COLORS.red);
result.push('@');
result.push_str(COLORS.green);
result.push_str(hostname);
result.push_str(COLORS.reset);
result
format!("{YELLOW}{username}{RED}@{GREEN}{hostname}")
}
#[must_use]
#[cfg_attr(feature = "hotpath", hotpath::measure)]
pub fn get_shell() -> String {
unsafe {
let ptr = libc::getenv(c"SHELL".as_ptr());
if ptr.is_null() {
return "unknown_shell".into();
}
let shell_path = env::var("SHELL").unwrap_or("unknown_shell".to_string());
let shell_name = shell_path.rsplit('/').next().unwrap_or("unknown_shell");
let bytes = CStr::from_ptr(ptr).to_bytes();
let start = bytes.iter().rposition(|&b| b == b'/').map_or(0, |i| i + 1);
let name = std::str::from_utf8_unchecked(&bytes[start..]);
name.into()
}
shell_name.to_string()
}
/// Gets the root disk usage information.
///
/// # Errors
///
/// Returns an error if the filesystem information cannot be retrieved.
#[cfg_attr(feature = "hotpath", hotpath::measure)]
#[allow(clippy::cast_precision_loss)]
pub fn get_root_disk_usage() -> Result<String, io::Error> {
let mut vfs = MaybeUninit::uninit();
let path = b"/\0";
let vfs = statvfs("/")?;
let block_size = vfs.block_size() as u64;
let total_blocks = vfs.blocks();
let available_blocks = vfs.blocks_available();
if unsafe { libc::statvfs(path.as_ptr().cast(), vfs.as_mut_ptr()) } != 0 {
return Err(io::Error::last_os_error());
}
let total_size = block_size * total_blocks;
let used_size = total_size - (block_size * available_blocks);
let vfs = unsafe { vfs.assume_init() };
let block_size = vfs.f_bsize;
let total_blocks = vfs.f_blocks;
let available_blocks = vfs.f_bavail;
let total_size = total_size as f64 / (1024.0 * 1024.0 * 1024.0);
let used_size = used_size as f64 / (1024.0 * 1024.0 * 1024.0);
let usage = (used_size as f64 / total_size as f64) * 100.0;
let total_size = block_size * total_blocks;
let used_size = total_size - (block_size * available_blocks);
let total_size = total_size as f64 / (1024.0 * 1024.0 * 1024.0);
let used_size = used_size as f64 / (1024.0 * 1024.0 * 1024.0);
let usage = (used_size / total_size) * 100.0;
let mut result = String::with_capacity(64);
write!(
result,
"{used_size:.2} GiB / {total_size:.2} GiB ({cyan}{usage:.0}%{reset})",
cyan = COLORS.cyan,
reset = COLORS.reset,
)
.unwrap();
Ok(result)
Ok(format!(
"{used_size:.2} GiB / {total_size:.2} GiB ({CYAN}{usage:.0}%{RESET})"
))
}
/// Fast integer parsing without stdlib overhead
#[inline]
fn parse_u64_fast(s: &[u8]) -> u64 {
let mut result = 0u64;
for &byte in s {
if byte.is_ascii_digit() {
result = result * 10 + u64::from(byte - b'0');
} else {
break;
}
}
result
}
/// Gets the system memory usage information.
///
/// # Errors
///
/// Returns an error if `/proc/meminfo` cannot be read.
#[cfg_attr(feature = "hotpath", hotpath::measure)]
pub fn get_memory_usage() -> Result<String, io::Error> {
#[cfg_attr(feature = "hotpath", hotpath::measure)]
fn parse_memory_info() -> Result<(f64, f64), io::Error> {
let mut total_memory_kb = 0u64;
let mut available_memory_kb = 0u64;
let mut buffer = [0u8; 1024];
#[inline(always)]
fn parse_memory_info() -> Result<(f64, f64), io::Error> {
let mut total_memory_kb = 0.0;
let mut available_memory_kb = 0.0;
// Use fast syscall-based file reading
let bytes_read = read_file_fast("/proc/meminfo", &mut buffer)?;
let meminfo = &buffer[..bytes_read];
// Fast scanning for MemTotal and MemAvailable
let mut offset = 0;
let mut found_total = false;
let mut found_available = false;
while offset < meminfo.len() && (!found_total || !found_available) {
let remaining = &meminfo[offset..];
// Find newline or end
let line_end = remaining
.iter()
.position(|&b| b == b'\n')
.unwrap_or(remaining.len());
let line = &remaining[..line_end];
if line.starts_with(b"MemTotal:") {
// Skip "MemTotal:" and whitespace
let mut pos = 9;
while pos < line.len() && line[pos].is_ascii_whitespace() {
pos += 1;
for line in std::fs::read_to_string("/proc/meminfo")?.lines() {
let mut split = line.split_whitespace();
let key = split.next().unwrap_or("");
if key == "MemTotal:" {
total_memory_kb = split.next().unwrap_or("0").parse().unwrap_or(0.0);
} else if key == "MemAvailable:" {
available_memory_kb = split.next().unwrap_or("0").parse().unwrap_or(0.0);
}
}
total_memory_kb = parse_u64_fast(&line[pos..]);
found_total = true;
} else if line.starts_with(b"MemAvailable:") {
// Skip "MemAvailable:" and whitespace
let mut pos = 13;
while pos < line.len() && line[pos].is_ascii_whitespace() {
pos += 1;
}
available_memory_kb = parse_u64_fast(&line[pos..]);
found_available = true;
}
offset += line_end + 1;
let total_memory_gb = total_memory_kb / 1024.0 / 1024.0;
let available_memory_gb = available_memory_kb / 1024.0 / 1024.0;
let used_memory_gb = total_memory_gb - available_memory_gb;
Ok((used_memory_gb, total_memory_gb))
}
#[allow(clippy::cast_precision_loss)]
let total_memory_gb = total_memory_kb as f64 / 1024.0 / 1024.0;
#[allow(clippy::cast_precision_loss)]
let available_memory_gb = available_memory_kb as f64 / 1024.0 / 1024.0;
let used_memory_gb = total_memory_gb - available_memory_gb;
let (used_memory, total_memory) = parse_memory_info()?;
let percentage_used = (used_memory / total_memory * 100.0).round() as u64;
Ok((used_memory_gb, total_memory_gb))
}
let (used_memory, total_memory) = parse_memory_info()?;
#[allow(clippy::cast_possible_truncation, clippy::cast_sign_loss)]
let percentage_used = (used_memory / total_memory * 100.0).round() as u64;
let mut result = String::with_capacity(64);
write!(
result,
"{used_memory:.2} GiB / {total_memory:.2} GiB \
({cyan}{percentage_used}%{reset})",
cyan = COLORS.cyan,
reset = COLORS.reset,
)
.unwrap();
Ok(result)
Ok(format!(
"{used_memory:.2} GiB / {total_memory:.2} GiB ({CYAN}{percentage_used}%{RESET})"
))
}

View file

@ -1,114 +1,26 @@
use std::{io, mem::MaybeUninit};
use color_eyre::Result;
use nix::sys::sysinfo::sysinfo;
use std::io;
/// Faster integer to string conversion without the formatting overhead.
#[inline]
fn itoa(mut n: u64, buf: &mut [u8]) -> &str {
if n == 0 {
return "0";
}
let mut i = buf.len();
while n > 0 {
i -= 1;
buf[i] = b'0' + (n % 10) as u8;
n /= 10;
}
unsafe { std::str::from_utf8_unchecked(&buf[i..]) }
}
/// Direct `sysinfo` syscall using inline assembly
///
/// # Safety
///
/// This function uses inline assembly to make a direct syscall.
/// The caller must ensure the sysinfo pointer is valid.
#[inline]
unsafe fn sys_sysinfo(info: *mut libc::sysinfo) -> i64 {
#[cfg(target_arch = "x86_64")]
{
let ret: i64;
unsafe {
std::arch::asm!(
"syscall",
in("rax") 99_i64, // __NR_sysinfo
in("rdi") info,
out("rcx") _,
out("r11") _,
lateout("rax") ret,
options(nostack)
);
}
ret
}
#[cfg(target_arch = "aarch64")]
{
let ret: i64;
unsafe {
std::arch::asm!(
"svc #0",
in("x8") 179_i64, // __NR_sysinfo
in("x0") info,
lateout("x0") ret,
options(nostack)
);
}
ret
}
#[cfg(not(any(target_arch = "x86_64", target_arch = "aarch64")))]
{
unsafe { libc::sysinfo(info) as i64 }
}
}
/// Gets the current system uptime.
///
/// # Errors
///
/// Returns an error if the system uptime cannot be retrieved.
#[cfg_attr(feature = "hotpath", hotpath::measure)]
pub fn get_current() -> Result<String, io::Error> {
let uptime_seconds = {
let mut info = MaybeUninit::uninit();
if unsafe { sys_sysinfo(info.as_mut_ptr()) } != 0 {
return Err(io::Error::last_os_error());
}
#[allow(clippy::cast_sign_loss)]
unsafe {
info.assume_init().uptime as u64
}
};
let info = sysinfo().map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
let uptime_seconds = info.uptime().as_secs_f64();
let days = uptime_seconds / 86400;
let hours = (uptime_seconds / 3600) % 24;
let minutes = (uptime_seconds / 60) % 60;
let total_minutes = (uptime_seconds / 60.0).round() as u64;
let days = total_minutes / (60 * 24);
let hours = (total_minutes % (60 * 24)) / 60;
let minutes = total_minutes % 60;
let mut result = String::with_capacity(32);
let mut buf = [0u8; 20]; // Enough for u64::MAX
if days > 0 {
result.push_str(itoa(days, &mut buf));
result.push_str(if days == 1 { " day" } else { " days" });
}
if hours > 0 {
if !result.is_empty() {
result.push_str(", ");
let mut parts = Vec::with_capacity(3);
if days > 0 {
parts.push(format!("{days} days"));
}
result.push_str(itoa(hours, &mut buf));
result.push_str(if hours == 1 { " hour" } else { " hours" });
}
if minutes > 0 {
if !result.is_empty() {
result.push_str(", ");
if hours > 0 || days > 0 {
parts.push(format!("{hours} hours"));
}
if minutes > 0 || hours > 0 || days > 0 {
parts.push(format!("{minutes} minutes"));
}
result.push_str(itoa(minutes, &mut buf));
result.push_str(if minutes == 1 { " minute" } else { " minutes" });
}
if result.is_empty() {
result.push_str("less than a minute");
}
Ok(result)
Ok(parts.join(", "))
}