eris: apply clippy lints

This commit is contained in:
raf 2025-05-01 05:04:30 +03:00
commit 17a3f15131
Signed by: NotAShelf
GPG key ID: 29D95B64378DB4BF

View file

@ -121,27 +121,21 @@ fn get_xdg_dirs() -> (PathBuf, PathBuf, PathBuf) {
let config_home = env::var_os("XDG_CONFIG_HOME") let config_home = env::var_os("XDG_CONFIG_HOME")
.map(PathBuf::from) .map(PathBuf::from)
.unwrap_or_else(|| { .unwrap_or_else(|| {
let home = env::var_os("HOME") let home = env::var_os("HOME").map_or_else(|| PathBuf::from("."), PathBuf::from);
.map(PathBuf::from)
.unwrap_or_else(|| PathBuf::from("."));
home.join(".config") home.join(".config")
}); });
let data_home = env::var_os("XDG_DATA_HOME") let data_home = env::var_os("XDG_DATA_HOME")
.map(PathBuf::from) .map(PathBuf::from)
.unwrap_or_else(|| { .unwrap_or_else(|| {
let home = env::var_os("HOME") let home = env::var_os("HOME").map_or_else(|| PathBuf::from("."), PathBuf::from);
.map(PathBuf::from)
.unwrap_or_else(|| PathBuf::from("."));
home.join(".local").join("share") home.join(".local").join("share")
}); });
let cache_home = env::var_os("XDG_CACHE_HOME") let cache_home = env::var_os("XDG_CACHE_HOME")
.map(PathBuf::from) .map(PathBuf::from)
.unwrap_or_else(|| { .unwrap_or_else(|| {
let home = env::var_os("HOME") let home = env::var_os("HOME").map_or_else(|| PathBuf::from("."), PathBuf::from);
.map(PathBuf::from)
.unwrap_or_else(|| PathBuf::from("."));
home.join(".cache") home.join(".cache")
}); });
@ -179,8 +173,8 @@ impl Config {
max_delay: args.max_delay, max_delay: args.max_delay,
max_tarpit_time: args.max_tarpit_time, max_tarpit_time: args.max_tarpit_time,
block_threshold: args.block_threshold, block_threshold: args.block_threshold,
markov_corpora_dir: format!("{}/corpora", data_dir), markov_corpora_dir: format!("{data_dir}/corpora"),
lua_scripts_dir: format!("{}/scripts", data_dir), lua_scripts_dir: format!("{data_dir}/scripts"),
data_dir, data_dir,
config_dir, config_dir,
cache_dir, cache_dir,
@ -217,7 +211,7 @@ impl Config {
for dir in dirs { for dir in dirs {
fs::create_dir_all(dir)?; fs::create_dir_all(dir)?;
log::debug!("Created directory: {}", dir); log::debug!("Created directory: {dir}");
} }
Ok(()) Ok(())
@ -265,7 +259,7 @@ impl BotState {
// Load previous state from disk // Load previous state from disk
fn load_from_disk(data_dir: &str, cache_dir: &str) -> Self { fn load_from_disk(data_dir: &str, cache_dir: &str) -> Self {
let mut state = Self::new(data_dir, cache_dir); let mut state = Self::new(data_dir, cache_dir);
let blocked_ips_file = format!("{}/blocked_ips.txt", data_dir); let blocked_ips_file = format!("{data_dir}/blocked_ips.txt");
if let Ok(content) = fs::read_to_string(&blocked_ips_file) { if let Ok(content) = fs::read_to_string(&blocked_ips_file) {
let mut loaded = 0; let mut loaded = 0;
@ -275,13 +269,13 @@ impl BotState {
loaded += 1; loaded += 1;
} }
} }
log::info!("Loaded {} blocked IPs from {}", loaded, blocked_ips_file); log::info!("Loaded {loaded} blocked IPs from {blocked_ips_file}");
} else { } else {
log::info!("No blocked IPs file found at {}", blocked_ips_file); log::info!("No blocked IPs file found at {blocked_ips_file}");
} }
// Check for temporary hit counter cache // Check for temporary hit counter cache
let hit_cache_file = format!("{}/hit_counters.json", cache_dir); let hit_cache_file = format!("{cache_dir}/hit_counters.json");
if let Ok(content) = fs::read_to_string(&hit_cache_file) { if let Ok(content) = fs::read_to_string(&hit_cache_file) {
if let Ok(hit_map) = serde_json::from_str::<HashMap<String, u32>>(&content) { if let Ok(hit_map) = serde_json::from_str::<HashMap<String, u32>>(&content) {
for (ip_str, count) in hit_map { for (ip_str, count) in hit_map {
@ -301,7 +295,7 @@ impl BotState {
fn save_to_disk(&self) { fn save_to_disk(&self) {
// Save blocked IPs // Save blocked IPs
if let Err(e) = fs::create_dir_all(&self.data_dir) { if let Err(e) = fs::create_dir_all(&self.data_dir) {
log::error!("Failed to create data directory: {}", e); log::error!("Failed to create data directory: {e}");
return; return;
} }
@ -315,16 +309,16 @@ impl BotState {
count += 1; count += 1;
} }
} }
log::info!("Saved {} blocked IPs to {}", count, blocked_ips_file); log::info!("Saved {count} blocked IPs to {blocked_ips_file}");
} }
Err(e) => { Err(e) => {
log::error!("Failed to create blocked IPs file: {}", e); log::error!("Failed to create blocked IPs file: {e}");
} }
} }
// Save hit counters to cache // Save hit counters to cache
if let Err(e) = fs::create_dir_all(&self.cache_dir) { if let Err(e) = fs::create_dir_all(&self.cache_dir) {
log::error!("Failed to create cache directory: {}", e); log::error!("Failed to create cache directory: {e}");
return; return;
} }
@ -337,13 +331,13 @@ impl BotState {
match fs::File::create(&hit_cache_file) { match fs::File::create(&hit_cache_file) {
Ok(file) => { Ok(file) => {
if let Err(e) = serde_json::to_writer(file, &hit_map) { if let Err(e) = serde_json::to_writer(file, &hit_map) {
log::error!("Failed to write hit counters to cache: {}", e); log::error!("Failed to write hit counters to cache: {e}");
} else { } else {
log::debug!("Saved hit counters for {} IPs to cache", hit_map.len()); log::debug!("Saved hit counters for {} IPs to cache", hit_map.len());
} }
} }
Err(e) => { Err(e) => {
log::error!("Failed to create hit counter cache file: {}", e); log::error!("Failed to create hit counter cache file: {e}");
} }
} }
} }
@ -363,7 +357,7 @@ impl ScriptManager {
// Try to load scripts from directory // Try to load scripts from directory
let script_dir = Path::new(scripts_dir); let script_dir = Path::new(scripts_dir);
if script_dir.exists() { if script_dir.exists() {
log::debug!("Loading Lua scripts from directory: {}", scripts_dir); log::debug!("Loading Lua scripts from directory: {scripts_dir}");
if let Ok(entries) = fs::read_dir(script_dir) { if let Ok(entries) = fs::read_dir(script_dir) {
for entry in entries { for entry in entries {
if let Ok(entry) = entry { if let Ok(entry) = entry {
@ -382,7 +376,7 @@ impl ScriptManager {
} }
} }
} else { } else {
log::warn!("Lua scripts directory does not exist: {}", scripts_dir); log::warn!("Lua scripts directory does not exist: {scripts_dir}");
} }
// If no scripts were loaded, use a default script // If no scripts were loaded, use a default script
@ -426,7 +420,7 @@ impl ScriptManager {
let lua = Lua::new(); let lua = Lua::new();
if let Err(e) = lua.load(&self.script_content).exec() { if let Err(e) = lua.load(&self.script_content).exec() {
log::warn!("Error loading Lua script: {}", e); log::warn!("Error loading Lua script: {e}");
return format!("{text}\n<!-- Error: Failed to load Lua script -->"); return format!("{text}\n<!-- Error: Failed to load Lua script -->");
} }
@ -436,13 +430,13 @@ impl ScriptManager {
match enhance_func.call::<_, String>((text, response_type, path, token)) { match enhance_func.call::<_, String>((text, response_type, path, token)) {
Ok(result) => result, Ok(result) => result,
Err(e) => { Err(e) => {
log::warn!("Error calling Lua function enhance_response: {}", e); log::warn!("Error calling Lua function enhance_response: {e}");
format!("{text}\n<!-- Error calling Lua enhance_response -->") format!("{text}\n<!-- Error calling Lua enhance_response -->")
} }
} }
} }
Err(e) => { Err(e) => {
log::warn!("Lua enhance_response function not found: {}", e); log::warn!("Lua enhance_response function not found: {e}");
format!("{text}\n<!-- Lua enhance_response function not found -->") format!("{text}\n<!-- Lua enhance_response function not found -->")
} }
} }
@ -460,16 +454,16 @@ async fn handle_connection(
let peer_addr = match stream.peer_addr() { let peer_addr = match stream.peer_addr() {
Ok(addr) => addr.ip(), Ok(addr) => addr.ip(),
Err(e) => { Err(e) => {
log::debug!("Failed to get peer address: {}", e); log::debug!("Failed to get peer address: {e}");
return; return;
} }
}; };
log::debug!("New connection from: {}", peer_addr); log::debug!("New connection from: {peer_addr}");
// Check if IP is already blocked // Check if IP is already blocked
if state.read().await.blocked.contains(&peer_addr) { if state.read().await.blocked.contains(&peer_addr) {
log::debug!("Rejected connection from blocked IP: {}", peer_addr); log::debug!("Rejected connection from blocked IP: {peer_addr}");
let _ = stream.shutdown().await; let _ = stream.shutdown().await;
return; return;
} }
@ -492,7 +486,7 @@ async fn handle_connection(
} }
} }
Err(e) => { Err(e) => {
log::debug!("Error reading from stream: {}", e); log::debug!("Error reading from stream: {e}");
break; break;
} }
} }
@ -504,7 +498,7 @@ async fn handle_connection(
tokio::select! { tokio::select! {
() = read_fut => {}, () = read_fut => {},
() = timeout_fut => { () = timeout_fut => {
log::debug!("Connection timeout from: {}", peer_addr); log::debug!("Connection timeout from: {peer_addr}");
let _ = stream.shutdown().await; let _ = stream.shutdown().await;
return; return;
} }
@ -515,7 +509,7 @@ async fn handle_connection(
let request_lines: Vec<&str> = request_str.lines().collect(); let request_lines: Vec<&str> = request_str.lines().collect();
if request_lines.is_empty() { if request_lines.is_empty() {
log::debug!("Empty request from: {}", peer_addr); log::debug!("Empty request from: {peer_addr}");
let _ = stream.shutdown().await; let _ = stream.shutdown().await;
return; return;
} }
@ -533,11 +527,7 @@ async fn handle_connection(
let protocol = request_parts[2]; let protocol = request_parts[2];
log::debug!( log::debug!(
"Request: {} {} {} from {}", "Request: {method} {path} {protocol} from {peer_addr}"
method,
path,
protocol,
peer_addr
); );
// Parse headers // Parse headers
@ -564,11 +554,7 @@ async fn handle_connection(
if should_tarpit { if should_tarpit {
log::info!( log::info!(
"Tarpit triggered: {} {} from {} (UA: {})", "Tarpit triggered: {method} {path} from {peer_addr} (UA: {user_agent})"
method,
path,
peer_addr,
user_agent
); );
// Update metrics // Update metrics
@ -584,11 +570,11 @@ async fn handle_connection(
*state.hits.entry(peer_addr).or_insert(0) += 1; *state.hits.entry(peer_addr).or_insert(0) += 1;
let hit_count = state.hits[&peer_addr]; let hit_count = state.hits[&peer_addr];
log::debug!("Hit count for {}: {}", peer_addr, hit_count); log::debug!("Hit count for {peer_addr}: {hit_count}");
// Block IPs that hit tarpits too many times // Block IPs that hit tarpits too many times
if hit_count >= config.block_threshold && !state.blocked.contains(&peer_addr) { if hit_count >= config.block_threshold && !state.blocked.contains(&peer_addr) {
log::info!("Blocking IP {} after {} hits", peer_addr, hit_count); log::info!("Blocking IP {peer_addr} after {hit_count} hits");
state.blocked.insert(peer_addr); state.blocked.insert(peer_addr);
BLOCKED_IPS.set(state.blocked.len() as f64); BLOCKED_IPS.set(state.blocked.len() as f64);
state.save_to_disk(); state.save_to_disk();
@ -596,7 +582,7 @@ async fn handle_connection(
// Try to add to firewall // Try to add to firewall
let peer_addr_str = peer_addr.to_string(); let peer_addr_str = peer_addr.to_string();
tokio::spawn(async move { tokio::spawn(async move {
log::debug!("Adding IP {} to firewall blacklist", peer_addr_str); log::debug!("Adding IP {peer_addr_str} to firewall blacklist");
match Command::new("nft") match Command::new("nft")
.args([ .args([
"add", "add",
@ -621,7 +607,7 @@ async fn handle_connection(
} }
} }
Err(e) => { Err(e) => {
log::warn!("Failed to execute nft command: {}", e); log::warn!("Failed to execute nft command: {e}");
} }
} }
}); });
@ -645,7 +631,7 @@ async fn handle_connection(
) )
.await; .await;
} else { } else {
log::debug!("Proxying request: {} {} from {}", method, path, peer_addr); log::debug!("Proxying request: {method} {path} from {peer_addr}");
// Proxy non-matching requests to the actual backend // Proxy non-matching requests to the actual backend
proxy_to_backend( proxy_to_backend(
@ -666,7 +652,7 @@ async fn should_tarpit(path: &str, ip: &IpAddr, config: &Config) -> bool {
for network_str in &config.whitelist_networks { for network_str in &config.whitelist_networks {
if let Ok(network) = network_str.parse::<IpNetwork>() { if let Ok(network) = network_str.parse::<IpNetwork>() {
if network.contains(*ip) { if network.contains(*ip) {
log::debug!("IP {} is in whitelist network {}", ip, network_str); log::debug!("IP {ip} is in whitelist network {network_str}");
return false; return false;
} }
} }
@ -675,7 +661,7 @@ async fn should_tarpit(path: &str, ip: &IpAddr, config: &Config) -> bool {
// Check if the request path matches any of our trap patterns // Check if the request path matches any of our trap patterns
for pattern in &config.trap_patterns { for pattern in &config.trap_patterns {
if path.contains(pattern) { if path.contains(pattern) {
log::debug!("Path '{}' matches trap pattern '{}'", path, pattern); log::debug!("Path '{path}' matches trap pattern '{pattern}'");
return true; return true;
} }
} }
@ -702,7 +688,7 @@ async fn generate_deceptive_response(
"generic" "generic"
}; };
log::debug!("Generating {} response for path: {}", response_type, path); log::debug!("Generating {response_type} response for path: {path}");
// Generate tracking token for this interaction // Generate tracking token for this interaction
let tracking_token = format!( let tracking_token = format!(
@ -765,9 +751,7 @@ async fn tarpit_connection(
let elapsed_secs = start_time.elapsed().as_secs(); let elapsed_secs = start_time.elapsed().as_secs();
if elapsed_secs > max_tarpit_time { if elapsed_secs > max_tarpit_time {
log::info!( log::info!(
"Tarpit maximum time ({} sec) reached for {}", "Tarpit maximum time ({max_tarpit_time} sec) reached for {peer_addr}"
max_tarpit_time,
peer_addr
); );
break; break;
} }
@ -784,12 +768,12 @@ async fn tarpit_connection(
// Try to write chunk // Try to write chunk
if stream.write_all(chunk.as_bytes()).await.is_err() { if stream.write_all(chunk.as_bytes()).await.is_err() {
log::debug!("Connection closed by client during tarpit: {}", peer_addr); log::debug!("Connection closed by client during tarpit: {peer_addr}");
break; break;
} }
if stream.flush().await.is_err() { if stream.flush().await.is_err() {
log::debug!("Failed to flush stream during tarpit: {}", peer_addr); log::debug!("Failed to flush stream during tarpit: {peer_addr}");
break; break;
} }
@ -833,13 +817,13 @@ async fn proxy_to_backend(
let server_stream = match TcpStream::connect(backend_addr).await { let server_stream = match TcpStream::connect(backend_addr).await {
Ok(stream) => stream, Ok(stream) => stream,
Err(e) => { Err(e) => {
log::warn!("Failed to connect to backend {}: {}", backend_addr, e); log::warn!("Failed to connect to backend {backend_addr}: {e}");
let _ = client_stream.shutdown().await; let _ = client_stream.shutdown().await;
return; return;
} }
}; };
log::debug!("Connected to backend server at {}", backend_addr); log::debug!("Connected to backend server at {backend_addr}");
// Forward the original request // Forward the original request
let mut request = format!("{method} {path} {protocol}\r\n"); let mut request = format!("{method} {path} {protocol}\r\n");
@ -877,7 +861,7 @@ async fn proxy_to_backend(
} }
} }
log::debug!("Client -> Server: forwarded {} bytes", bytes_forwarded); log::debug!("Client -> Server: forwarded {bytes_forwarded} bytes");
}; };
// Server -> Client // Server -> Client
@ -898,7 +882,7 @@ async fn proxy_to_backend(
} }
} }
log::debug!("Server -> Client: forwarded {} bytes", bytes_forwarded); log::debug!("Server -> Client: forwarded {bytes_forwarded} bytes");
}; };
// Run both directions concurrently // Run both directions concurrently
@ -917,11 +901,11 @@ async fn metrics_handler(_req: HttpRequest) -> HttpResponse {
let mut buffer = Vec::new(); let mut buffer = Vec::new();
match encoder.encode(&prometheus::gather(), &mut buffer) { match encoder.encode(&prometheus::gather(), &mut buffer) {
Ok(_) => { Ok(()) => {
log::debug!("Metrics requested, returned {} bytes", buffer.len()); log::debug!("Metrics requested, returned {} bytes", buffer.len());
} }
Err(e) => { Err(e) => {
log::error!("Error encoding metrics: {}", e); log::error!("Error encoding metrics: {e}");
} }
} }
@ -978,12 +962,12 @@ async fn setup_firewall() -> Result<(), String> {
.await; .await;
if let Err(e) = result { if let Err(e) = result {
return Err(format!("Failed to create nftables table: {}", e)); return Err(format!("Failed to create nftables table: {e}"));
} }
} }
} }
Err(e) => { Err(e) => {
log::warn!("Failed to check if nftables table exists: {}", e); log::warn!("Failed to check if nftables table exists: {e}");
log::info!("Will try to create it anyway"); log::info!("Will try to create it anyway");
let result = Command::new("nft") let result = Command::new("nft")
.args(["create", "table", "inet", "filter"]) .args(["create", "table", "inet", "filter"])
@ -991,7 +975,7 @@ async fn setup_firewall() -> Result<(), String> {
.await; .await;
if let Err(e) = result { if let Err(e) = result {
return Err(format!("Failed to create nftables table: {}", e)); return Err(format!("Failed to create nftables table: {e}"));
} }
} }
} }
@ -1019,13 +1003,13 @@ async fn setup_firewall() -> Result<(), String> {
.await; .await;
if let Err(e) = result { if let Err(e) = result {
return Err(format!("Failed to create blacklist set: {}", e)); return Err(format!("Failed to create blacklist set: {e}"));
} }
} }
} }
Err(e) => { Err(e) => {
log::warn!("Failed to check if blacklist set exists: {}", e); log::warn!("Failed to check if blacklist set exists: {e}");
return Err(format!("Failed to check if blacklist set exists: {}", e)); return Err(format!("Failed to check if blacklist set exists: {e}"));
} }
} }
@ -1058,13 +1042,13 @@ async fn setup_firewall() -> Result<(), String> {
.await; .await;
if let Err(e) = result { if let Err(e) = result {
return Err(format!("Failed to add firewall rule: {}", e)); return Err(format!("Failed to add firewall rule: {e}"));
} }
} }
} }
Err(e) => { Err(e) => {
log::warn!("Failed to check if firewall rule exists: {}", e); log::warn!("Failed to check if firewall rule exists: {e}");
return Err(format!("Failed to check if firewall rule exists: {}", e)); return Err(format!("Failed to check if firewall rule exists: {e}"));
} }
} }
@ -1086,14 +1070,14 @@ async fn main() -> std::io::Result<()> {
// Load configuration // Load configuration
let config = if let Some(config_path) = &args.config_file { let config = if let Some(config_path) = &args.config_file {
log::info!("Loading configuration from {:?}", config_path); log::info!("Loading configuration from {config_path:?}");
match Config::load_from_file(config_path) { match Config::load_from_file(config_path) {
Ok(cfg) => { Ok(cfg) => {
log::info!("Configuration loaded successfully"); log::info!("Configuration loaded successfully");
cfg cfg
} }
Err(e) => { Err(e) => {
log::warn!("Failed to load configuration file: {}", e); log::warn!("Failed to load configuration file: {e}");
log::info!("Using configuration from command-line arguments"); log::info!("Using configuration from command-line arguments");
Config::from_args(&args) Config::from_args(&args)
} }
@ -1105,9 +1089,9 @@ async fn main() -> std::io::Result<()> {
// Ensure required directories exist // Ensure required directories exist
match config.ensure_dirs_exist() { match config.ensure_dirs_exist() {
Ok(_) => log::info!("Directory setup completed"), Ok(()) => log::info!("Directory setup completed"),
Err(e) => { Err(e) => {
log::error!("Failed to create required directories: {}", e); log::error!("Failed to create required directories: {e}");
log::info!("Will continue with default in-memory configuration"); log::info!("Will continue with default in-memory configuration");
} }
} }
@ -1115,14 +1099,14 @@ async fn main() -> std::io::Result<()> {
// Save config for reference if it was loaded from command line // Save config for reference if it was loaded from command line
if args.config_file.is_none() { if args.config_file.is_none() {
if let Err(e) = fs::create_dir_all(&config.config_dir) { if let Err(e) = fs::create_dir_all(&config.config_dir) {
log::warn!("Failed to create config directory: {}", e); log::warn!("Failed to create config directory: {e}");
} else { } else {
let config_path = Path::new(&config.config_dir).join("config.json"); let config_path = Path::new(&config.config_dir).join("config.json");
if !config_path.exists() { if !config_path.exists() {
if let Err(e) = config.save_to_file(&config_path) { if let Err(e) = config.save_to_file(&config_path) {
log::warn!("Failed to save default configuration: {}", e); log::warn!("Failed to save default configuration: {e}");
} else { } else {
log::info!("Saved default configuration to {:?}", config_path); log::info!("Saved default configuration to {config_path:?}");
} }
} }
} }
@ -1139,9 +1123,9 @@ async fn main() -> std::io::Result<()> {
// Setup firewall rules for IP blocking // Setup firewall rules for IP blocking
match setup_firewall().await { match setup_firewall().await {
Ok(_) => {} Ok(()) => {}
Err(e) => { Err(e) => {
log::warn!("Failed to set up firewall rules: {}", e); log::warn!("Failed to set up firewall rules: {e}");
log::info!("IP blocking will be managed in memory only"); log::info!("IP blocking will be managed in memory only");
} }
} }
@ -1183,7 +1167,7 @@ async fn main() -> std::io::Result<()> {
loop { loop {
match listener.accept().await { match listener.accept().await {
Ok((stream, addr)) => { Ok((stream, addr)) => {
log::debug!("Accepted connection from {}", addr); log::debug!("Accepted connection from {addr}");
let state_clone = tarpit_state.clone(); let state_clone = tarpit_state.clone();
let markov_clone = markov_generator.clone(); let markov_clone = markov_generator.clone();
@ -1202,7 +1186,7 @@ async fn main() -> std::io::Result<()> {
}); });
} }
Err(e) => { Err(e) => {
log::error!("Error accepting connection: {}", e); log::error!("Error accepting connection: {e}");
} }
} }
} }
@ -1213,7 +1197,7 @@ async fn main() -> std::io::Result<()> {
// Start the metrics server with actix_web // Start the metrics server with actix_web
let metrics_addr = format!("0.0.0.0:{}", metrics_config.metrics_port); let metrics_addr = format!("0.0.0.0:{}", metrics_config.metrics_port);
log::info!("Starting metrics server on {}", metrics_addr); log::info!("Starting metrics server on {metrics_addr}");
let metrics_server = HttpServer::new(move || { let metrics_server = HttpServer::new(move || {
App::new() App::new()
@ -1231,29 +1215,29 @@ async fn main() -> std::io::Result<()> {
let metrics_server = match metrics_server { let metrics_server = match metrics_server {
Ok(server) => server.run(), Ok(server) => server.run(),
Err(e) => { Err(e) => {
log::error!("Failed to bind metrics server to {}: {}", metrics_addr, e); log::error!("Failed to bind metrics server to {metrics_addr}: {e}");
return Err(e); return Err(e);
} }
}; };
log::info!("Metrics server listening on {}", metrics_addr); log::info!("Metrics server listening on {metrics_addr}");
// Run both servers concurrently // Run both servers concurrently
tokio::select! { tokio::select! {
result = tarpit_server => match result { result = tarpit_server => match result {
Ok(Ok(())) => Ok(()), Ok(Ok(())) => Ok(()),
Ok(Err(e)) => { Ok(Err(e)) => {
log::error!("Tarpit server error: {}", e); log::error!("Tarpit server error: {e}");
Err(std::io::Error::new(std::io::ErrorKind::Other, e)) Err(std::io::Error::new(std::io::ErrorKind::Other, e))
}, },
Err(e) => { Err(e) => {
log::error!("Tarpit server task error: {}", e); log::error!("Tarpit server task error: {e}");
Err(std::io::Error::new(std::io::ErrorKind::Other, e.to_string())) Err(std::io::Error::new(std::io::ErrorKind::Other, e.to_string()))
}, },
}, },
result = metrics_server => { result = metrics_server => {
if let Err(ref e) = result { if let Err(ref e) = result {
log::error!("Metrics server error: {}", e); log::error!("Metrics server error: {e}");
} }
result result
}, },