Compare commits

..

10 commits

Author SHA1 Message Date
6ebe027401
nix: include contrib dir in packaging
Signed-off-by: NotAShelf <raf@notashelf.dev>
Change-Id: Ie19e1d0f147dd98d15f71adc16ca691f6a6a6964
2026-03-02 00:24:54 +03:00
83ae044fd3
various: extract BotStat and error types into library crate
Signed-off-by: NotAShelf <raf@notashelf.dev>
Change-Id: I09cf5713683831154292cd59fdd2c7596a6a6964
2026-03-01 21:16:37 +03:00
ad6a992aa6
flake: bump inputs
Signed-off-by: NotAShelf <raf@notashelf.dev>
Change-Id: I8bb614421afee8673158995651214fcc6a6a6964
2026-03-01 21:16:36 +03:00
ead95bcbc9
chore: bump dependencies; set MSRV to 1.91.0
Signed-off-by: NotAShelf <raf@notashelf.dev>
Change-Id: I9cc660c16187d07ee21f0a229b9fd1036a6a6964
2026-03-01 21:16:25 +03:00
69917a9247
docs: use mermaidjs for visual graphs
Signed-off-by: NotAShelf <raf@notashelf.dev>
Change-Id: Ib7ae49f209214fc428f6e9bfc5c7d9176a6a6964
2026-03-01 00:36:47 +03:00
dd7e41eb64
nix: clean up devshell
Signed-off-by: NotAShelf <raf@notashelf.dev>
Change-Id: Ia844e89f1450cce1625c57c9d81279706a6a6964
2026-03-01 00:36:46 +03:00
8aa39cfb1a
eris: add more default scan paths to honeypot
Signed-off-by: NotAShelf <raf@notashelf.dev>
Change-Id: I10c0129340d517587905a97a034f01406a6a6964
2026-03-01 00:36:45 +03:00
57b739ddbe
nix: switch to crane for incramental builds
Signed-off-by: NotAShelf <raf@notashelf.dev>
Change-Id: Ia5ab2f512ffac20722966b605d7eaf156a6a6964
2026-03-01 00:36:44 +03:00
150f632fb8
chore: bump dependencies
Signed-off-by: NotAShelf <raf@notashelf.dev>
Change-Id: Ia2537b41147373d94e08325e8540bf906a6a6964
2026-03-01 00:36:43 +03:00
10c523ab89
nix: bump inputs
Signed-off-by: NotAShelf <raf@notashelf.dev>
Change-Id: I704ac50f34324d22d6ea86329f709e9d6a6a6964
2026-03-01 00:36:42 +03:00
11 changed files with 1323 additions and 700 deletions

1223
Cargo.lock generated

File diff suppressed because it is too large Load diff

View file

@ -1,21 +1,33 @@
[package] [package]
name = "eris" name = "eris"
version = "0.1.0" description = "Sophisticated HTTP tarpit and honeypot stream"
authors = ["NotAShelf <raf@notashelf.dev"]
version = "0.2.0"
edition = "2024" edition = "2024"
rust-version = "1.91.0"
[dependencies] [dependencies]
actix-web = "4.3.1" actix-web = "4.13.0"
clap = { version = "4.3", features = ["derive"] } clap = { version = "4.5.60", features = ["derive"] }
chrono = "0.4.24" chrono = "0.4.44"
futures = "0.3.28" futures = "0.3.32"
ipnetwork = "0.21.1" ipnetwork = "0.21.1"
lazy_static = "1.4.0" lazy_static = "1.5.0"
prometheus = "0.14.0" prometheus = "0.14.0"
prometheus_exporter = "0.8.5" prometheus_exporter = "0.8.5"
rand = "0.9.1" rand = "0.10.0"
rlua = "0.20.1" rlua = "0.20.1"
serde = { version = "1.0.162", features = ["derive"] } serde = { version = "1.0.228", features = ["derive"] }
serde_json = "1.0.96" serde_json = "1.0.149"
tokio = { version = "1.28.0", features = ["full"] } tokio = { version = "1.49.0", features = ["full"] }
log = "0.4.27" log = "0.4.29"
env_logger = "0.11.8" env_logger = "0.11.9"
thiserror = "2.0.18"
[lib]
name = "eris"
path = "src/lib.rs"
[[bin]]
name = "eris"
path = "src/main.rs"

View file

@ -81,8 +81,10 @@ Pre-built binaries are not yet available.
For static sites served by Nginx, the proper setup is to place Eris in front of For static sites served by Nginx, the proper setup is to place Eris in front of
Nginx. Here is a graph of how it's meant to be configured: Nginx. Here is a graph of how it's meant to be configured:
``` ```mermaid
Internet → [Eris (port 80)] → [Nginx (local port)] graph LR
A[Internet] --> B[Eris (port 80)]
B --> C[Nginx (local port)]
``` ```
You will want to configure Eris to listen on port 80 (or 443 for SSL) and You will want to configure Eris to listen on port 80 (or 443 for SSL) and
@ -132,8 +134,11 @@ eris --listen-addr 0.0.0.0:443 --backend-addr 127.0.0.1:8080 --ssl-cert /path/to
### Option 2: Use a separate SSL terminator ### Option 2: Use a separate SSL terminator
``` ```mermaid
Internet → [SSL Terminator (port 443)] → [Eris (local port)] → [Nginx (local port)] graph LR
A[Internet] --> B[SSL Terminator (port 443)]
B --> C[Eris (local port)]
C --> D[Nginx (local port)]
``` ```
You can use Nginx, HAProxy, or Caddy as the SSL terminator, forwarding decrypted You can use Nginx, HAProxy, or Caddy as the SSL terminator, forwarding decrypted

22
flake.lock generated
View file

@ -1,12 +1,27 @@
{ {
"nodes": { "nodes": {
"crane": {
"locked": {
"lastModified": 1772080396,
"narHash": "sha256-84W9UNtSk9DNMh43WBkOjpkbfODlmg+RDi854PnNgLE=",
"owner": "ipetkov",
"repo": "crane",
"rev": "8525580bc0316c39dbfa18bd09a1331e98c9e463",
"type": "github"
},
"original": {
"owner": "ipetkov",
"repo": "crane",
"type": "github"
}
},
"nixpkgs": { "nixpkgs": {
"locked": { "locked": {
"lastModified": 1745930157, "lastModified": 1772198003,
"narHash": "sha256-y3h3NLnzRSiUkYpnfvnS669zWZLoqqI6NprtLQ+5dck=", "narHash": "sha256-I45esRSssFtJ8p/gLHUZ1OUaaTaVLluNkABkk6arQwE=",
"owner": "NixOS", "owner": "NixOS",
"repo": "nixpkgs", "repo": "nixpkgs",
"rev": "46e634be05ce9dc6d4db8e664515ba10b78151ae", "rev": "dd9b079222d43e1943b6ebd802f04fd959dc8e61",
"type": "github" "type": "github"
}, },
"original": { "original": {
@ -18,6 +33,7 @@
}, },
"root": { "root": {
"inputs": { "inputs": {
"crane": "crane",
"nixpkgs": "nixpkgs" "nixpkgs": "nixpkgs"
} }
} }

View file

@ -1,11 +1,15 @@
{ {
inputs.nixpkgs.url = "github:NixOS/nixpkgs?ref=nixos-unstable"; inputs = {
nixpkgs.url = "github:NixOS/nixpkgs?ref=nixos-unstable";
crane.url = "github:ipetkov/crane";
};
outputs = { outputs = {
self, self,
nixpkgs, nixpkgs,
crane,
}: let }: let
systems = ["x86_64-linux"]; systems = ["x86_64-linux" "aarch64-linux"];
forEachSystem = nixpkgs.lib.genAttrs systems; forEachSystem = nixpkgs.lib.genAttrs systems;
pkgsForEach = nixpkgs.legacyPackages; pkgsForEach = nixpkgs.legacyPackages;
in { in {
@ -14,8 +18,10 @@
default = self.nixosModules.eris; default = self.nixosModules.eris;
}; };
packages = forEachSystem (system: { packages = forEachSystem (system: let
eris = pkgsForEach.${system}.callPackage ./nix/package.nix {}; craneLib = crane.mkLib pkgsForEach.${system};
in {
eris = pkgsForEach.${system}.callPackage ./nix/package.nix {inherit craneLib;};
default = self.packages.${system}.eris; default = self.packages.${system}.eris;
}); });

View file

@ -1,39 +1,50 @@
{ {
lib, lib,
rustPlatform, craneLib,
pkg-config,
openssl,
}: let }: let
fs = lib.fileset;
lockfile = ../Cargo.lock;
cargoToml = ../Cargo.toml;
in
rustPlatform.buildRustPackage {
pname = "eris"; pname = "eris";
version = "0.0.1"; inherit ((lib.importTOML ../Cargo.toml).package) version;
src = let src = let
fs = lib.fileset;
s = ../.; s = ../.;
in in
fs.toSource { fs.toSource {
root = s; root = s;
fileset = fs.unions [ fileset = fs.unions [
(fs.fileFilter (file: builtins.any file.hasExt ["rs"]) (s + /src)) (fs.fileFilter (file: builtins.any file.hasExt ["rs"]) (s + /src))
(s + /contrib) (s + /Cargo.lock)
lockfile (s + /Cargo.toml)
cargoToml
]; ];
}; };
postInstall = '' cargoArtifacts = craneLib.buildDepsOnly {
mkdir -p $out/share/contrib name = "eris";
cp -rv $src/contrib/corpus $out/share/contrib inherit src;
cp -rv $src/contrib/lua $out/share/contrib
'';
cargoLock.lockFile = lockfile; strictDeps = true;
nativeBuildInputs = [pkg-config];
buildInputs = [openssl];
};
in
craneLib.buildPackage {
inherit pname src version cargoArtifacts;
strictDeps = true;
# FIXME: better provide those via wrappers...
# postFixup = ''
# mkdir -p "$out"/share/contrib/{corpus,lua}
#
# install -Dm755 ${../contrib/corpus}/*.txt $out/share/contrib/corpus
# install -Dm755 ${../contrib/lua}/*.lua $out/share/contrib/lua
# '';
meta = { meta = {
description = "Sophisticated HTTP tarpit and honeypot stream"; description = "Sophisticated HTTP tarpit and honeypot stream";
homepage = "https://git.frzn.dev/NotAShelf/eris";
maintainers = [lib.maintainers.NotAShelf];
mainProgram = "eris"; mainProgram = "eris";
}; };
} }

View file

@ -1,28 +1,26 @@
{ {
mkShell, mkShell,
rust-analyzer, rustc,
cargo,
rustfmt, rustfmt,
clippy, clippy,
cargo, taplo,
gcc, rust-analyzer-unwrapped,
openssl, rustPlatform,
pkg-config,
rustc,
}: }:
mkShell { mkShell {
name = "eris"; name = "rust";
packages = [ packages = [
rust-analyzer rustc
rustfmt cargo
(rustfmt.override {asNightly = true;})
clippy clippy
cargo cargo
gcc taplo
clippy rust-analyzer-unwrapped
rustfmt
rustc
# For TLS and friends
openssl
pkg-config
]; ];
RUST_SRC_PATH = "${rustPlatform.rustLibSrc}";
} }

281
src/error.rs Normal file
View file

@ -0,0 +1,281 @@
use std::io;
/// Result type alias for the application
pub type Result<T> = std::result::Result<T, ErisError>;
/// Comprehensive error types for the Eris application
#[derive(thiserror::Error, Debug)]
pub enum ErisError {
/// Configuration-related errors
#[error("Configuration error: {message}")]
Config { message: String },
/// Network-related errors
#[error("Network error: {0}")]
Network(#[from] io::Error),
/// HTTP parsing errors
#[error("HTTP parsing error: {message}")]
HttpParse { message: String },
/// Firewall operation errors
#[error("Firewall operation failed: {message}")]
Firewall { message: String },
/// Lua script execution errors
#[error("Lua script error: {message}")]
Lua { message: String },
/// Markov chain generation errors
#[error("Markov generation error: {message}")]
Markov { message: String },
/// Metrics collection errors
#[error("Metrics error: {message}")]
Metrics { message: String },
/// File system errors
#[error("File system error: {message}")]
FileSystem { message: String },
/// Validation errors
#[error("Validation error: {message}")]
Validation { message: String },
/// IP address parsing errors
#[error("Invalid IP address: {address}")]
InvalidIp { address: String },
/// Connection limit exceeded
#[error("Connection limit exceeded: {current}/{max}")]
ConnectionLimit { current: usize, max: usize },
/// Rate limiting errors
#[error("Rate limit exceeded for IP: {ip}")]
RateLimit { ip: String },
/// Timeout errors
#[error("Operation timed out: {operation}")]
Timeout { operation: String },
/// Permission errors
#[error("Permission denied: {operation}")]
Permission { operation: String },
/// Resource not found errors
#[error("Resource not found: {resource}")]
NotFound { resource: String },
/// Generic application errors
#[error("Application error: {message}")]
Application { message: String },
}
impl ErisError {
/// Create a new configuration error
pub fn config<T: Into<String>>(message: T) -> Self {
Self::Config {
message: message.into(),
}
}
/// Create a new HTTP parsing error
pub fn http_parse<T: Into<String>>(message: T) -> Self {
Self::HttpParse {
message: message.into(),
}
}
/// Create a new firewall error
pub fn firewall<T: Into<String>>(message: T) -> Self {
Self::Firewall {
message: message.into(),
}
}
/// Create a new Lua script error
pub fn lua<T: Into<String>>(message: T) -> Self {
Self::Lua {
message: message.into(),
}
}
/// Create a new Markov generation error
pub fn markov<T: Into<String>>(message: T) -> Self {
Self::Markov {
message: message.into(),
}
}
/// Create a new metrics error
pub fn metrics<T: Into<String>>(message: T) -> Self {
Self::Metrics {
message: message.into(),
}
}
/// Create a new file system error
pub fn filesystem<T: Into<String>>(message: T) -> Self {
Self::FileSystem {
message: message.into(),
}
}
/// Create a new validation error
pub fn validation<T: Into<String>>(message: T) -> Self {
Self::Validation {
message: message.into(),
}
}
/// Create a new invalid IP error
pub fn invalid_ip<T: Into<String>>(address: T) -> Self {
Self::InvalidIp {
address: address.into(),
}
}
/// Create a new connection limit error
#[must_use]
pub const fn connection_limit(current: usize, max: usize) -> Self {
Self::ConnectionLimit { current, max }
}
/// Create a new rate limit error
pub fn rate_limit<T: Into<String>>(ip: T) -> Self {
Self::RateLimit { ip: ip.into() }
}
/// Create a new timeout error
pub fn timeout<T: Into<String>>(operation: T) -> Self {
Self::Timeout {
operation: operation.into(),
}
}
/// Create a new permission error
pub fn permission<T: Into<String>>(operation: T) -> Self {
Self::Permission {
operation: operation.into(),
}
}
/// Create a new not found error
pub fn not_found<T: Into<String>>(resource: T) -> Self {
Self::NotFound {
resource: resource.into(),
}
}
/// Create a new application error
pub fn application<T: Into<String>>(message: T) -> Self {
Self::Application {
message: message.into(),
}
}
/// Check if this is a retryable error
#[must_use]
pub const fn is_retryable(&self) -> bool {
matches!(
self,
Self::Network(_)
| Self::Timeout { .. }
| Self::ConnectionLimit { .. }
| Self::RateLimit { .. }
)
}
/// Check if this error should be logged at debug level
#[must_use]
pub const fn is_debug_level(&self) -> bool {
matches!(
self,
Self::Network(_) | Self::HttpParse { .. } | Self::RateLimit { .. }
)
}
/// Get error category for metrics
#[must_use]
pub const fn category(&self) -> &'static str {
match self {
Self::Config { .. } => "config",
Self::Network { .. } => "network",
Self::HttpParse { .. } => "http",
Self::Firewall { .. } => "firewall",
Self::Lua { .. } => "lua",
Self::Markov { .. } => "markov",
Self::Metrics { .. } => "metrics",
Self::FileSystem { .. } => "filesystem",
Self::Validation { .. } => "validation",
Self::InvalidIp { .. } => "network",
Self::ConnectionLimit { .. } => "connection",
Self::RateLimit { .. } => "rate_limit",
Self::Timeout { .. } => "timeout",
Self::Permission { .. } => "permission",
Self::NotFound { .. } => "not_found",
Self::Application { .. } => "application",
}
}
}
/// Convert from `serde_json::Error`
impl From<serde_json::Error> for ErisError {
fn from(err: serde_json::Error) -> Self {
Self::config(format!("JSON parsing error: {err}"))
}
}
/// Convert from `rlua::Error`
impl From<rlua::Error> for ErisError {
fn from(err: rlua::Error) -> Self {
Self::lua(format!("Lua execution error: {err}"))
}
}
/// Convert from `ipnetwork::IpNetworkError`
impl From<ipnetwork::IpNetworkError> for ErisError {
fn from(err: ipnetwork::IpNetworkError) -> Self {
Self::validation(format!("IP network error: {err}"))
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_error_creation() {
let err = ErisError::config("Invalid port");
assert!(matches!(err, ErisError::Config { .. }));
assert_eq!(err.category(), "config");
}
#[test]
fn test_error_retryable() {
assert!(
ErisError::Network(io::Error::new(io::ErrorKind::TimedOut, "timeout")).is_retryable()
);
assert!(!ErisError::config("test").is_retryable());
}
#[test]
fn test_error_debug_level() {
assert!(
ErisError::Network(io::Error::new(io::ErrorKind::ConnectionRefused, "refused"))
.is_debug_level()
);
assert!(!ErisError::config("test").is_debug_level());
}
#[test]
fn test_error_conversions() {
let io_err = io::Error::new(io::ErrorKind::NotFound, "file not found");
let eris_err: ErisError = io_err.into();
assert!(matches!(eris_err, ErisError::Network(_)));
let json_err = serde_json::from_str::<serde_json::Value>("invalid json").unwrap_err();
let eris_err: ErisError = json_err.into();
assert!(matches!(eris_err, ErisError::Config { .. }));
}
}

141
src/lib.rs Normal file
View file

@ -0,0 +1,141 @@
//! Eris - Sophisticated HTTP tarpit and honeypot
use std::collections::{HashMap, HashSet};
use std::net::IpAddr;
pub mod error;
pub mod markov;
pub mod metrics;
// Re-export commonly used types
pub use error::{ErisError, Result};
pub use markov::MarkovGenerator;
pub use metrics::{
ACTIVE_CONNECTIONS, BLOCKED_IPS, HITS_COUNTER, PATH_HITS, UA_HITS, metrics_handler,
status_handler,
};
/// State of bots/IPs hitting the honeypot
#[derive(Clone, Debug)]
pub struct BotState {
pub hits: HashMap<IpAddr, u32>,
pub blocked: HashSet<IpAddr>,
pub active_connections: HashSet<IpAddr>,
pub data_dir: String,
pub cache_dir: String,
}
impl BotState {
#[must_use]
pub fn new(data_dir: &str, cache_dir: &str) -> Self {
Self {
hits: HashMap::new(),
blocked: HashSet::new(),
active_connections: HashSet::new(),
data_dir: data_dir.to_string(),
cache_dir: cache_dir.to_string(),
}
}
/// Load previous state from disk
#[must_use]
pub fn load_from_disk(data_dir: &str, cache_dir: &str) -> Self {
let mut state = Self::new(data_dir, cache_dir);
let blocked_ips_file = format!("{data_dir}/blocked_ips.txt");
if let Ok(content) = std::fs::read_to_string(&blocked_ips_file) {
let mut loaded = 0;
for line in content.lines() {
if let Ok(ip) = line.parse::<std::net::IpAddr>() {
state.blocked.insert(ip);
loaded += 1;
}
}
log::info!("Loaded {loaded} blocked IPs from {blocked_ips_file}");
} else {
log::info!("No blocked IPs file found at {blocked_ips_file}");
}
// Check for temporary hit counter cache
let hit_cache_file = format!("{cache_dir}/hit_counters.json");
if let Ok(content) = std::fs::read_to_string(&hit_cache_file)
&& let Ok(hit_map) =
serde_json::from_str::<std::collections::HashMap<String, u32>>(&content)
{
for (ip_str, count) in hit_map {
if let Ok(ip) = ip_str.parse::<std::net::IpAddr>() {
state.hits.insert(ip, count);
}
}
log::info!("Loaded hit counters for {} IPs", state.hits.len());
}
BLOCKED_IPS.set(state.blocked.len() as f64);
state
}
/// Persist state to disk for later reloading
pub fn save_to_disk(&self) {
// Save blocked IPs
if let Err(e) = std::fs::create_dir_all(&self.data_dir) {
log::error!("Failed to create data directory: {e}");
return;
}
let blocked_ips_file = format!("{}/blocked_ips.txt", self.data_dir);
match std::fs::File::create(&blocked_ips_file) {
Ok(mut file) => {
let mut count = 0;
for ip in &self.blocked {
if std::io::Write::write_fmt(&mut file, format_args!("{ip}\n")).is_ok() {
count += 1;
}
}
log::info!("Saved {count} blocked IPs to {blocked_ips_file}");
}
Err(e) => {
log::error!("Failed to create blocked IPs file: {e}");
}
}
// Save hit counters to cache
if let Err(e) = std::fs::create_dir_all(&self.cache_dir) {
log::error!("Failed to create cache directory: {e}");
return;
}
let hit_cache_file = format!("{}/hit_counters.json", self.cache_dir);
let mut hit_map = std::collections::HashMap::new();
for (ip, count) in &self.hits {
hit_map.insert(ip.to_string(), *count);
}
match std::fs::File::create(&hit_cache_file) {
Ok(file) => {
if let Err(e) = serde_json::to_writer(file, &hit_map) {
log::error!("Failed to write hit counters to cache: {e}");
} else {
log::debug!("Saved hit counters for {} IPs to cache", hit_map.len());
}
}
Err(e) => {
log::error!("Failed to create hit counter cache file: {e}");
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_library_imports() {
// Test that we can import and use the main types
let _err = ErisError::config("test");
let _result: Result<()> = Ok(());
// Test markov generator creation
let _markov = MarkovGenerator::new("./test_corpora");
}
}

View file

@ -1,12 +1,13 @@
use actix_web::{App, HttpResponse, HttpServer, web}; use actix_web::{App, HttpResponse, HttpServer, web};
use clap::Parser; use clap::Parser;
use eris::{BotState, ErisError, MarkovGenerator, Result};
use ipnetwork::IpNetwork; use ipnetwork::IpNetwork;
use rlua::{Function, Lua}; use rlua::{Function, Lua};
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use std::collections::{HashMap, HashSet}; use std::collections::HashMap;
use std::env; use std::env;
use std::fs; use std::fs;
use std::io::Write;
use std::net::IpAddr; use std::net::IpAddr;
use std::path::{Path, PathBuf}; use std::path::{Path, PathBuf};
use std::sync::Arc; use std::sync::Arc;
@ -17,11 +18,8 @@ use tokio::process::Command;
use tokio::sync::RwLock; use tokio::sync::RwLock;
use tokio::time::sleep; use tokio::time::sleep;
mod markov; // Import metrics from the metrics module
mod metrics; use eris::{
use markov::MarkovGenerator;
use metrics::{
ACTIVE_CONNECTIONS, BLOCKED_IPS, HITS_COUNTER, PATH_HITS, UA_HITS, metrics_handler, ACTIVE_CONNECTIONS, BLOCKED_IPS, HITS_COUNTER, PATH_HITS, UA_HITS, metrics_handler,
status_handler, status_handler,
}; };
@ -149,6 +147,8 @@ impl Default for Config {
"/config".to_string(), "/config".to_string(),
"/api/".to_string(), "/api/".to_string(),
"/actuator/".to_string(), "/actuator/".to_string(),
"/search/feedback".to_string(),
"/wp-json/v1/u".to_string(),
], ],
whitelist_networks: vec![ whitelist_networks: vec![
"192.168.0.0/16".to_string(), "192.168.0.0/16".to_string(),
@ -268,114 +268,6 @@ impl Config {
} }
} }
// State of bots/IPs hitting the honeypot
#[derive(Clone, Debug)]
struct BotState {
hits: HashMap<IpAddr, u32>,
blocked: HashSet<IpAddr>,
active_connections: HashSet<IpAddr>,
data_dir: String,
cache_dir: String,
}
impl BotState {
fn new(data_dir: &str, cache_dir: &str) -> Self {
Self {
hits: HashMap::new(),
blocked: HashSet::new(),
active_connections: HashSet::new(),
data_dir: data_dir.to_string(),
cache_dir: cache_dir.to_string(),
}
}
// Load previous state from disk
fn load_from_disk(data_dir: &str, cache_dir: &str) -> Self {
let mut state = Self::new(data_dir, cache_dir);
let blocked_ips_file = format!("{data_dir}/blocked_ips.txt");
if let Ok(content) = fs::read_to_string(&blocked_ips_file) {
let mut loaded = 0;
for line in content.lines() {
if let Ok(ip) = line.parse::<IpAddr>() {
state.blocked.insert(ip);
loaded += 1;
}
}
log::info!("Loaded {loaded} blocked IPs from {blocked_ips_file}");
} else {
log::info!("No blocked IPs file found at {blocked_ips_file}");
}
// Check for temporary hit counter cache
let hit_cache_file = format!("{cache_dir}/hit_counters.json");
if let Ok(content) = fs::read_to_string(&hit_cache_file) {
if let Ok(hit_map) = serde_json::from_str::<HashMap<String, u32>>(&content) {
for (ip_str, count) in hit_map {
if let Ok(ip) = ip_str.parse::<IpAddr>() {
state.hits.insert(ip, count);
}
}
log::info!("Loaded hit counters for {} IPs", state.hits.len());
}
}
BLOCKED_IPS.set(state.blocked.len() as f64);
state
}
// Persist state to disk for later reloading
fn save_to_disk(&self) {
// Save blocked IPs
if let Err(e) = fs::create_dir_all(&self.data_dir) {
log::error!("Failed to create data directory: {e}");
return;
}
let blocked_ips_file = format!("{}/blocked_ips.txt", self.data_dir);
match fs::File::create(&blocked_ips_file) {
Ok(mut file) => {
let mut count = 0;
for ip in &self.blocked {
if writeln!(file, "{ip}").is_ok() {
count += 1;
}
}
log::info!("Saved {count} blocked IPs to {blocked_ips_file}");
}
Err(e) => {
log::error!("Failed to create blocked IPs file: {e}");
}
}
// Save hit counters to cache
if let Err(e) = fs::create_dir_all(&self.cache_dir) {
log::error!("Failed to create cache directory: {e}");
return;
}
let hit_cache_file = format!("{}/hit_counters.json", self.cache_dir);
let mut hit_map = HashMap::new();
for (ip, count) in &self.hits {
hit_map.insert(ip.to_string(), *count);
}
match fs::File::create(&hit_cache_file) {
Ok(file) => {
if let Err(e) = serde_json::to_writer(file, &hit_map) {
log::error!("Failed to write hit counters to cache: {e}");
} else {
log::debug!("Saved hit counters for {} IPs to cache", hit_map.len());
}
}
Err(e) => {
log::error!("Failed to create hit counter cache file: {e}");
}
}
}
}
// Lua scripts for response generation and customization // Lua scripts for response generation and customization
struct ScriptManager { struct ScriptManager {
script_content: String, script_content: String,
@ -392,8 +284,7 @@ impl ScriptManager {
if script_dir.exists() { if script_dir.exists() {
log::debug!("Loading Lua scripts from directory: {scripts_dir}"); log::debug!("Loading Lua scripts from directory: {scripts_dir}");
if let Ok(entries) = fs::read_dir(script_dir) { if let Ok(entries) = fs::read_dir(script_dir) {
for entry in entries { for entry in entries.flatten() {
if let Ok(entry) = entry {
let path = entry.path(); let path = entry.path();
if path.extension().and_then(|ext| ext.to_str()) == Some("lua") { if path.extension().and_then(|ext| ext.to_str()) == Some("lua") {
if let Ok(content) = fs::read_to_string(&path) { if let Ok(content) = fs::read_to_string(&path) {
@ -407,7 +298,6 @@ impl ScriptManager {
} }
} }
} }
}
} else { } else {
log::warn!("Lua scripts directory does not exist: {scripts_dir}"); log::warn!("Lua scripts directory does not exist: {scripts_dir}");
} }
@ -679,13 +569,12 @@ async fn handle_connection(
async fn should_tarpit(path: &str, ip: &IpAddr, config: &Config) -> bool { async fn should_tarpit(path: &str, ip: &IpAddr, config: &Config) -> bool {
// Don't tarpit whitelisted IPs (internal networks, etc) // Don't tarpit whitelisted IPs (internal networks, etc)
for network_str in &config.whitelist_networks { for network_str in &config.whitelist_networks {
if let Ok(network) = network_str.parse::<IpNetwork>() { if let Ok(network) = network_str.parse::<IpNetwork>()
if network.contains(*ip) { && network.contains(*ip) {
log::debug!("IP {ip} is in whitelist network {network_str}"); log::debug!("IP {ip} is in whitelist network {network_str}");
return false; return false;
} }
} }
}
// Check if the request path matches any of our trap patterns // Check if the request path matches any of our trap patterns
for pattern in &config.trap_patterns { for pattern in &config.trap_patterns {
@ -922,7 +811,7 @@ async fn proxy_to_backend(
} }
// Set up nftables firewall rules for IP blocking // Set up nftables firewall rules for IP blocking
async fn setup_firewall() -> Result<(), String> { async fn setup_firewall() -> Result<()> {
log::info!("Setting up firewall rules"); log::info!("Setting up firewall rules");
// Check if nft command exists // Check if nft command exists
@ -954,7 +843,8 @@ async fn setup_firewall() -> Result<(), String> {
.await; .await;
if let Err(e) = result { if let Err(e) = result {
return Err(format!("Failed to create nftables table: {e}")); log::error!("Failed to create nftables table: {e}");
return Err(ErisError::firewall("Failed to create nftables table"));
} }
} }
} }
@ -967,7 +857,8 @@ async fn setup_firewall() -> Result<(), String> {
.await; .await;
if let Err(e) = result { if let Err(e) = result {
return Err(format!("Failed to create nftables table: {e}")); log::error!("Failed to create nftables table: {e}");
return Err(ErisError::firewall("Failed to create nftables table"));
} }
} }
} }
@ -995,13 +886,16 @@ async fn setup_firewall() -> Result<(), String> {
.await; .await;
if let Err(e) = result { if let Err(e) = result {
return Err(format!("Failed to create blacklist set: {e}")); log::error!("Failed to create blacklist set: {e}");
return Err(ErisError::firewall("Failed to create blacklist set"));
} }
} }
} }
Err(e) => { Err(e) => {
log::warn!("Failed to check if blacklist set exists: {e}"); log::error!("Failed to check if blacklist set exists: {e}");
return Err(format!("Failed to check if blacklist set exists: {e}")); return Err(ErisError::firewall(
"Failed to check if blacklist set exists",
));
} }
} }
@ -1034,13 +928,16 @@ async fn setup_firewall() -> Result<(), String> {
.await; .await;
if let Err(e) = result { if let Err(e) = result {
return Err(format!("Failed to add firewall rule: {e}")); log::error!("Failed to add firewall rule: {e}");
return Err(ErisError::firewall("Failed to add firewall rule"));
} }
} }
} }
Err(e) => { Err(e) => {
log::warn!("Failed to check if firewall rule exists: {e}"); log::error!("Failed to check if firewall rule exists: {e}");
return Err(format!("Failed to check if firewall rule exists: {e}")); return Err(ErisError::firewall(
"Failed to check if firewall rule exists",
));
} }
} }
@ -1150,7 +1047,8 @@ async fn main() -> std::io::Result<()> {
let listener = match TcpListener::bind(&config.listen_addr).await { let listener = match TcpListener::bind(&config.listen_addr).await {
Ok(l) => l, Ok(l) => l,
Err(e) => { Err(e) => {
return Err(format!("Failed to bind to {}: {}", config.listen_addr, e)); log::error!("Failed to bind to {}: {}", config.listen_addr, e);
return Err(ErisError::config("Failed to bind to listen address"));
} }
}; };
@ -1184,7 +1082,7 @@ async fn main() -> std::io::Result<()> {
} }
#[allow(unreachable_code)] #[allow(unreachable_code)]
Ok::<(), String>(()) Ok(())
}); });
// Start the metrics server with actix_web only if metrics are not disabled // Start the metrics server with actix_web only if metrics are not disabled
@ -1225,11 +1123,11 @@ async fn main() -> std::io::Result<()> {
Ok(Ok(())) => Ok(()), Ok(Ok(())) => Ok(()),
Ok(Err(e)) => { Ok(Err(e)) => {
log::error!("Tarpit server error: {e}"); log::error!("Tarpit server error: {e}");
Err(std::io::Error::new(std::io::ErrorKind::Other, e)) Err(std::io::Error::other(e))
}, },
Err(e) => { Err(e) => {
log::error!("Tarpit server task error: {e}"); log::error!("Tarpit server task error: {e}");
Err(std::io::Error::new(std::io::ErrorKind::Other, e.to_string())) Err(std::io::Error::other(e.to_string()))
}, },
}, },
result = metrics_server => { result = metrics_server => {
@ -1245,12 +1143,11 @@ async fn main() -> std::io::Result<()> {
Ok(Ok(())) => Ok(()), Ok(Ok(())) => Ok(()),
Ok(Err(e)) => { Ok(Err(e)) => {
log::error!("Tarpit server error: {e}"); log::error!("Tarpit server error: {e}");
Err(std::io::Error::new(std::io::ErrorKind::Other, e)) Err(std::io::Error::other(e))
} }
Err(e) => { Err(e) => {
log::error!("Tarpit server task error: {e}"); log::error!("Tarpit server task error: {e}");
Err(std::io::Error::new( Err(std::io::Error::other(
std::io::ErrorKind::Other,
e.to_string(), e.to_string(),
)) ))
} }

View file

@ -90,6 +90,7 @@ pub struct MarkovGenerator {
} }
impl MarkovGenerator { impl MarkovGenerator {
#[must_use]
pub fn new(corpus_dir: &str) -> Self { pub fn new(corpus_dir: &str) -> Self {
let mut chains = HashMap::new(); let mut chains = HashMap::new();
@ -101,15 +102,14 @@ impl MarkovGenerator {
// Load corpus files if they exist // Load corpus files if they exist
let path = Path::new(corpus_dir); let path = Path::new(corpus_dir);
if path.exists() && path.is_dir() { if path.exists() && path.is_dir()
if let Ok(entries) = fs::read_dir(path) { && let Ok(entries) = fs::read_dir(path) {
for entry in entries { for entry in entries.flatten() {
if let Ok(entry) = entry {
let file_path = entry.path(); let file_path = entry.path();
if let Some(file_name) = file_path.file_stem() { if let Some(file_name) = file_path.file_stem()
if let Some(file_name_str) = file_name.to_str() { && let Some(file_name_str) = file_name.to_str()
if types.contains(&file_name_str) { && types.contains(&file_name_str)
if let Ok(content) = fs::read_to_string(&file_path) { && let Ok(content) = fs::read_to_string(&file_path) {
let mut chain = Chain::new(DEFAULT_ORDER); let mut chain = Chain::new(DEFAULT_ORDER);
for line in content.lines() { for line in content.lines() {
chain.add(line); chain.add(line);
@ -118,11 +118,6 @@ impl MarkovGenerator {
} }
} }
} }
}
}
}
}
}
// If corpus files didn't exist, initialize with some default content // If corpus files didn't exist, initialize with some default content
if chains["php_exploit"].start_states.is_empty() { if chains["php_exploit"].start_states.is_empty() {