Compare commits

..

No commits in common. "main" and "v0.1.0" have entirely different histories.

11 changed files with 700 additions and 1323 deletions

1203
Cargo.lock generated

File diff suppressed because it is too large Load diff

View file

@ -1,33 +1,21 @@
[package]
name = "eris"
description = "Sophisticated HTTP tarpit and honeypot stream"
authors = ["NotAShelf <raf@notashelf.dev"]
version = "0.2.0"
version = "0.1.0"
edition = "2024"
rust-version = "1.91.0"
[dependencies]
actix-web = "4.13.0"
clap = { version = "4.5.60", features = ["derive"] }
chrono = "0.4.44"
futures = "0.3.32"
actix-web = "4.3.1"
clap = { version = "4.3", features = ["derive"] }
chrono = "0.4.24"
futures = "0.3.28"
ipnetwork = "0.21.1"
lazy_static = "1.5.0"
lazy_static = "1.4.0"
prometheus = "0.14.0"
prometheus_exporter = "0.8.5"
rand = "0.10.0"
rand = "0.9.1"
rlua = "0.20.1"
serde = { version = "1.0.228", features = ["derive"] }
serde_json = "1.0.149"
tokio = { version = "1.49.0", features = ["full"] }
log = "0.4.29"
env_logger = "0.11.9"
thiserror = "2.0.18"
[lib]
name = "eris"
path = "src/lib.rs"
[[bin]]
name = "eris"
path = "src/main.rs"
serde = { version = "1.0.162", features = ["derive"] }
serde_json = "1.0.96"
tokio = { version = "1.28.0", features = ["full"] }
log = "0.4.27"
env_logger = "0.11.8"

View file

@ -81,10 +81,8 @@ Pre-built binaries are not yet available.
For static sites served by Nginx, the proper setup is to place Eris in front of
Nginx. Here is a graph of how it's meant to be configured:
```mermaid
graph LR
A[Internet] --> B[Eris (port 80)]
B --> C[Nginx (local port)]
```
Internet → [Eris (port 80)] → [Nginx (local port)]
```
You will want to configure Eris to listen on port 80 (or 443 for SSL) and
@ -134,11 +132,8 @@ eris --listen-addr 0.0.0.0:443 --backend-addr 127.0.0.1:8080 --ssl-cert /path/to
### Option 2: Use a separate SSL terminator
```mermaid
graph LR
A[Internet] --> B[SSL Terminator (port 443)]
B --> C[Eris (local port)]
C --> D[Nginx (local port)]
```
Internet → [SSL Terminator (port 443)] → [Eris (local port)] → [Nginx (local port)]
```
You can use Nginx, HAProxy, or Caddy as the SSL terminator, forwarding decrypted

22
flake.lock generated
View file

@ -1,27 +1,12 @@
{
"nodes": {
"crane": {
"locked": {
"lastModified": 1772080396,
"narHash": "sha256-84W9UNtSk9DNMh43WBkOjpkbfODlmg+RDi854PnNgLE=",
"owner": "ipetkov",
"repo": "crane",
"rev": "8525580bc0316c39dbfa18bd09a1331e98c9e463",
"type": "github"
},
"original": {
"owner": "ipetkov",
"repo": "crane",
"type": "github"
}
},
"nixpkgs": {
"locked": {
"lastModified": 1772198003,
"narHash": "sha256-I45esRSssFtJ8p/gLHUZ1OUaaTaVLluNkABkk6arQwE=",
"lastModified": 1745930157,
"narHash": "sha256-y3h3NLnzRSiUkYpnfvnS669zWZLoqqI6NprtLQ+5dck=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "dd9b079222d43e1943b6ebd802f04fd959dc8e61",
"rev": "46e634be05ce9dc6d4db8e664515ba10b78151ae",
"type": "github"
},
"original": {
@ -33,7 +18,6 @@
},
"root": {
"inputs": {
"crane": "crane",
"nixpkgs": "nixpkgs"
}
}

View file

@ -1,15 +1,11 @@
{
inputs = {
nixpkgs.url = "github:NixOS/nixpkgs?ref=nixos-unstable";
crane.url = "github:ipetkov/crane";
};
inputs.nixpkgs.url = "github:NixOS/nixpkgs?ref=nixos-unstable";
outputs = {
self,
nixpkgs,
crane,
}: let
systems = ["x86_64-linux" "aarch64-linux"];
systems = ["x86_64-linux"];
forEachSystem = nixpkgs.lib.genAttrs systems;
pkgsForEach = nixpkgs.legacyPackages;
in {
@ -18,10 +14,8 @@
default = self.nixosModules.eris;
};
packages = forEachSystem (system: let
craneLib = crane.mkLib pkgsForEach.${system};
in {
eris = pkgsForEach.${system}.callPackage ./nix/package.nix {inherit craneLib;};
packages = forEachSystem (system: {
eris = pkgsForEach.${system}.callPackage ./nix/package.nix {};
default = self.packages.${system}.eris;
});

View file

@ -1,50 +1,39 @@
{
lib,
craneLib,
pkg-config,
openssl,
rustPlatform,
}: let
pname = "eris";
inherit ((lib.importTOML ../Cargo.toml).package) version;
src = let
fs = lib.fileset;
s = ../.;
in
fs.toSource {
root = s;
fileset = fs.unions [
(fs.fileFilter (file: builtins.any file.hasExt ["rs"]) (s + /src))
(s + /Cargo.lock)
(s + /Cargo.toml)
];
};
fs = lib.fileset;
cargoArtifacts = craneLib.buildDepsOnly {
name = "eris";
inherit src;
strictDeps = true;
nativeBuildInputs = [pkg-config];
buildInputs = [openssl];
};
lockfile = ../Cargo.lock;
cargoToml = ../Cargo.toml;
in
craneLib.buildPackage {
inherit pname src version cargoArtifacts;
rustPlatform.buildRustPackage {
pname = "eris";
version = "0.0.1";
strictDeps = true;
src = let
s = ../.;
in
fs.toSource {
root = s;
fileset = fs.unions [
(fs.fileFilter (file: builtins.any file.hasExt ["rs"]) (s + /src))
(s + /contrib)
lockfile
cargoToml
];
};
# FIXME: better provide those via wrappers...
# postFixup = ''
# mkdir -p "$out"/share/contrib/{corpus,lua}
#
# install -Dm755 ${../contrib/corpus}/*.txt $out/share/contrib/corpus
# install -Dm755 ${../contrib/lua}/*.lua $out/share/contrib/lua
# '';
postInstall = ''
mkdir -p $out/share/contrib
cp -rv $src/contrib/corpus $out/share/contrib
cp -rv $src/contrib/lua $out/share/contrib
'';
cargoLock.lockFile = lockfile;
meta = {
description = "Sophisticated HTTP tarpit and honeypot stream";
homepage = "https://git.frzn.dev/NotAShelf/eris";
maintainers = [lib.maintainers.NotAShelf];
mainProgram = "eris";
};
}

View file

@ -1,26 +1,28 @@
{
mkShell,
rustc,
cargo,
rust-analyzer,
rustfmt,
clippy,
taplo,
rust-analyzer-unwrapped,
rustPlatform,
cargo,
gcc,
openssl,
pkg-config,
rustc,
}:
mkShell {
name = "rust";
name = "eris";
packages = [
rustc
cargo
(rustfmt.override {asNightly = true;})
rust-analyzer
rustfmt
clippy
cargo
taplo
rust-analyzer-unwrapped
];
gcc
clippy
rustfmt
rustc
RUST_SRC_PATH = "${rustPlatform.rustLibSrc}";
# For TLS and friends
openssl
pkg-config
];
}

View file

@ -1,281 +0,0 @@
use std::io;
/// Result type alias for the application
pub type Result<T> = std::result::Result<T, ErisError>;
/// Comprehensive error types for the Eris application
#[derive(thiserror::Error, Debug)]
pub enum ErisError {
/// Configuration-related errors
#[error("Configuration error: {message}")]
Config { message: String },
/// Network-related errors
#[error("Network error: {0}")]
Network(#[from] io::Error),
/// HTTP parsing errors
#[error("HTTP parsing error: {message}")]
HttpParse { message: String },
/// Firewall operation errors
#[error("Firewall operation failed: {message}")]
Firewall { message: String },
/// Lua script execution errors
#[error("Lua script error: {message}")]
Lua { message: String },
/// Markov chain generation errors
#[error("Markov generation error: {message}")]
Markov { message: String },
/// Metrics collection errors
#[error("Metrics error: {message}")]
Metrics { message: String },
/// File system errors
#[error("File system error: {message}")]
FileSystem { message: String },
/// Validation errors
#[error("Validation error: {message}")]
Validation { message: String },
/// IP address parsing errors
#[error("Invalid IP address: {address}")]
InvalidIp { address: String },
/// Connection limit exceeded
#[error("Connection limit exceeded: {current}/{max}")]
ConnectionLimit { current: usize, max: usize },
/// Rate limiting errors
#[error("Rate limit exceeded for IP: {ip}")]
RateLimit { ip: String },
/// Timeout errors
#[error("Operation timed out: {operation}")]
Timeout { operation: String },
/// Permission errors
#[error("Permission denied: {operation}")]
Permission { operation: String },
/// Resource not found errors
#[error("Resource not found: {resource}")]
NotFound { resource: String },
/// Generic application errors
#[error("Application error: {message}")]
Application { message: String },
}
impl ErisError {
/// Create a new configuration error
pub fn config<T: Into<String>>(message: T) -> Self {
Self::Config {
message: message.into(),
}
}
/// Create a new HTTP parsing error
pub fn http_parse<T: Into<String>>(message: T) -> Self {
Self::HttpParse {
message: message.into(),
}
}
/// Create a new firewall error
pub fn firewall<T: Into<String>>(message: T) -> Self {
Self::Firewall {
message: message.into(),
}
}
/// Create a new Lua script error
pub fn lua<T: Into<String>>(message: T) -> Self {
Self::Lua {
message: message.into(),
}
}
/// Create a new Markov generation error
pub fn markov<T: Into<String>>(message: T) -> Self {
Self::Markov {
message: message.into(),
}
}
/// Create a new metrics error
pub fn metrics<T: Into<String>>(message: T) -> Self {
Self::Metrics {
message: message.into(),
}
}
/// Create a new file system error
pub fn filesystem<T: Into<String>>(message: T) -> Self {
Self::FileSystem {
message: message.into(),
}
}
/// Create a new validation error
pub fn validation<T: Into<String>>(message: T) -> Self {
Self::Validation {
message: message.into(),
}
}
/// Create a new invalid IP error
pub fn invalid_ip<T: Into<String>>(address: T) -> Self {
Self::InvalidIp {
address: address.into(),
}
}
/// Create a new connection limit error
#[must_use]
pub const fn connection_limit(current: usize, max: usize) -> Self {
Self::ConnectionLimit { current, max }
}
/// Create a new rate limit error
pub fn rate_limit<T: Into<String>>(ip: T) -> Self {
Self::RateLimit { ip: ip.into() }
}
/// Create a new timeout error
pub fn timeout<T: Into<String>>(operation: T) -> Self {
Self::Timeout {
operation: operation.into(),
}
}
/// Create a new permission error
pub fn permission<T: Into<String>>(operation: T) -> Self {
Self::Permission {
operation: operation.into(),
}
}
/// Create a new not found error
pub fn not_found<T: Into<String>>(resource: T) -> Self {
Self::NotFound {
resource: resource.into(),
}
}
/// Create a new application error
pub fn application<T: Into<String>>(message: T) -> Self {
Self::Application {
message: message.into(),
}
}
/// Check if this is a retryable error
#[must_use]
pub const fn is_retryable(&self) -> bool {
matches!(
self,
Self::Network(_)
| Self::Timeout { .. }
| Self::ConnectionLimit { .. }
| Self::RateLimit { .. }
)
}
/// Check if this error should be logged at debug level
#[must_use]
pub const fn is_debug_level(&self) -> bool {
matches!(
self,
Self::Network(_) | Self::HttpParse { .. } | Self::RateLimit { .. }
)
}
/// Get error category for metrics
#[must_use]
pub const fn category(&self) -> &'static str {
match self {
Self::Config { .. } => "config",
Self::Network { .. } => "network",
Self::HttpParse { .. } => "http",
Self::Firewall { .. } => "firewall",
Self::Lua { .. } => "lua",
Self::Markov { .. } => "markov",
Self::Metrics { .. } => "metrics",
Self::FileSystem { .. } => "filesystem",
Self::Validation { .. } => "validation",
Self::InvalidIp { .. } => "network",
Self::ConnectionLimit { .. } => "connection",
Self::RateLimit { .. } => "rate_limit",
Self::Timeout { .. } => "timeout",
Self::Permission { .. } => "permission",
Self::NotFound { .. } => "not_found",
Self::Application { .. } => "application",
}
}
}
/// Convert from `serde_json::Error`
impl From<serde_json::Error> for ErisError {
fn from(err: serde_json::Error) -> Self {
Self::config(format!("JSON parsing error: {err}"))
}
}
/// Convert from `rlua::Error`
impl From<rlua::Error> for ErisError {
fn from(err: rlua::Error) -> Self {
Self::lua(format!("Lua execution error: {err}"))
}
}
/// Convert from `ipnetwork::IpNetworkError`
impl From<ipnetwork::IpNetworkError> for ErisError {
fn from(err: ipnetwork::IpNetworkError) -> Self {
Self::validation(format!("IP network error: {err}"))
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_error_creation() {
let err = ErisError::config("Invalid port");
assert!(matches!(err, ErisError::Config { .. }));
assert_eq!(err.category(), "config");
}
#[test]
fn test_error_retryable() {
assert!(
ErisError::Network(io::Error::new(io::ErrorKind::TimedOut, "timeout")).is_retryable()
);
assert!(!ErisError::config("test").is_retryable());
}
#[test]
fn test_error_debug_level() {
assert!(
ErisError::Network(io::Error::new(io::ErrorKind::ConnectionRefused, "refused"))
.is_debug_level()
);
assert!(!ErisError::config("test").is_debug_level());
}
#[test]
fn test_error_conversions() {
let io_err = io::Error::new(io::ErrorKind::NotFound, "file not found");
let eris_err: ErisError = io_err.into();
assert!(matches!(eris_err, ErisError::Network(_)));
let json_err = serde_json::from_str::<serde_json::Value>("invalid json").unwrap_err();
let eris_err: ErisError = json_err.into();
assert!(matches!(eris_err, ErisError::Config { .. }));
}
}

View file

@ -1,141 +0,0 @@
//! Eris - Sophisticated HTTP tarpit and honeypot
use std::collections::{HashMap, HashSet};
use std::net::IpAddr;
pub mod error;
pub mod markov;
pub mod metrics;
// Re-export commonly used types
pub use error::{ErisError, Result};
pub use markov::MarkovGenerator;
pub use metrics::{
ACTIVE_CONNECTIONS, BLOCKED_IPS, HITS_COUNTER, PATH_HITS, UA_HITS, metrics_handler,
status_handler,
};
/// State of bots/IPs hitting the honeypot
#[derive(Clone, Debug)]
pub struct BotState {
pub hits: HashMap<IpAddr, u32>,
pub blocked: HashSet<IpAddr>,
pub active_connections: HashSet<IpAddr>,
pub data_dir: String,
pub cache_dir: String,
}
impl BotState {
#[must_use]
pub fn new(data_dir: &str, cache_dir: &str) -> Self {
Self {
hits: HashMap::new(),
blocked: HashSet::new(),
active_connections: HashSet::new(),
data_dir: data_dir.to_string(),
cache_dir: cache_dir.to_string(),
}
}
/// Load previous state from disk
#[must_use]
pub fn load_from_disk(data_dir: &str, cache_dir: &str) -> Self {
let mut state = Self::new(data_dir, cache_dir);
let blocked_ips_file = format!("{data_dir}/blocked_ips.txt");
if let Ok(content) = std::fs::read_to_string(&blocked_ips_file) {
let mut loaded = 0;
for line in content.lines() {
if let Ok(ip) = line.parse::<std::net::IpAddr>() {
state.blocked.insert(ip);
loaded += 1;
}
}
log::info!("Loaded {loaded} blocked IPs from {blocked_ips_file}");
} else {
log::info!("No blocked IPs file found at {blocked_ips_file}");
}
// Check for temporary hit counter cache
let hit_cache_file = format!("{cache_dir}/hit_counters.json");
if let Ok(content) = std::fs::read_to_string(&hit_cache_file)
&& let Ok(hit_map) =
serde_json::from_str::<std::collections::HashMap<String, u32>>(&content)
{
for (ip_str, count) in hit_map {
if let Ok(ip) = ip_str.parse::<std::net::IpAddr>() {
state.hits.insert(ip, count);
}
}
log::info!("Loaded hit counters for {} IPs", state.hits.len());
}
BLOCKED_IPS.set(state.blocked.len() as f64);
state
}
/// Persist state to disk for later reloading
pub fn save_to_disk(&self) {
// Save blocked IPs
if let Err(e) = std::fs::create_dir_all(&self.data_dir) {
log::error!("Failed to create data directory: {e}");
return;
}
let blocked_ips_file = format!("{}/blocked_ips.txt", self.data_dir);
match std::fs::File::create(&blocked_ips_file) {
Ok(mut file) => {
let mut count = 0;
for ip in &self.blocked {
if std::io::Write::write_fmt(&mut file, format_args!("{ip}\n")).is_ok() {
count += 1;
}
}
log::info!("Saved {count} blocked IPs to {blocked_ips_file}");
}
Err(e) => {
log::error!("Failed to create blocked IPs file: {e}");
}
}
// Save hit counters to cache
if let Err(e) = std::fs::create_dir_all(&self.cache_dir) {
log::error!("Failed to create cache directory: {e}");
return;
}
let hit_cache_file = format!("{}/hit_counters.json", self.cache_dir);
let mut hit_map = std::collections::HashMap::new();
for (ip, count) in &self.hits {
hit_map.insert(ip.to_string(), *count);
}
match std::fs::File::create(&hit_cache_file) {
Ok(file) => {
if let Err(e) = serde_json::to_writer(file, &hit_map) {
log::error!("Failed to write hit counters to cache: {e}");
} else {
log::debug!("Saved hit counters for {} IPs to cache", hit_map.len());
}
}
Err(e) => {
log::error!("Failed to create hit counter cache file: {e}");
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_library_imports() {
// Test that we can import and use the main types
let _err = ErisError::config("test");
let _result: Result<()> = Ok(());
// Test markov generator creation
let _markov = MarkovGenerator::new("./test_corpora");
}
}

View file

@ -1,13 +1,12 @@
use actix_web::{App, HttpResponse, HttpServer, web};
use clap::Parser;
use eris::{BotState, ErisError, MarkovGenerator, Result};
use ipnetwork::IpNetwork;
use rlua::{Function, Lua};
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::collections::{HashMap, HashSet};
use std::env;
use std::fs;
use std::io::Write;
use std::net::IpAddr;
use std::path::{Path, PathBuf};
use std::sync::Arc;
@ -18,8 +17,11 @@ use tokio::process::Command;
use tokio::sync::RwLock;
use tokio::time::sleep;
// Import metrics from the metrics module
use eris::{
mod markov;
mod metrics;
use markov::MarkovGenerator;
use metrics::{
ACTIVE_CONNECTIONS, BLOCKED_IPS, HITS_COUNTER, PATH_HITS, UA_HITS, metrics_handler,
status_handler,
};
@ -147,8 +149,6 @@ impl Default for Config {
"/config".to_string(),
"/api/".to_string(),
"/actuator/".to_string(),
"/search/feedback".to_string(),
"/wp-json/v1/u".to_string(),
],
whitelist_networks: vec![
"192.168.0.0/16".to_string(),
@ -268,6 +268,114 @@ impl Config {
}
}
// State of bots/IPs hitting the honeypot
#[derive(Clone, Debug)]
struct BotState {
hits: HashMap<IpAddr, u32>,
blocked: HashSet<IpAddr>,
active_connections: HashSet<IpAddr>,
data_dir: String,
cache_dir: String,
}
impl BotState {
fn new(data_dir: &str, cache_dir: &str) -> Self {
Self {
hits: HashMap::new(),
blocked: HashSet::new(),
active_connections: HashSet::new(),
data_dir: data_dir.to_string(),
cache_dir: cache_dir.to_string(),
}
}
// Load previous state from disk
fn load_from_disk(data_dir: &str, cache_dir: &str) -> Self {
let mut state = Self::new(data_dir, cache_dir);
let blocked_ips_file = format!("{data_dir}/blocked_ips.txt");
if let Ok(content) = fs::read_to_string(&blocked_ips_file) {
let mut loaded = 0;
for line in content.lines() {
if let Ok(ip) = line.parse::<IpAddr>() {
state.blocked.insert(ip);
loaded += 1;
}
}
log::info!("Loaded {loaded} blocked IPs from {blocked_ips_file}");
} else {
log::info!("No blocked IPs file found at {blocked_ips_file}");
}
// Check for temporary hit counter cache
let hit_cache_file = format!("{cache_dir}/hit_counters.json");
if let Ok(content) = fs::read_to_string(&hit_cache_file) {
if let Ok(hit_map) = serde_json::from_str::<HashMap<String, u32>>(&content) {
for (ip_str, count) in hit_map {
if let Ok(ip) = ip_str.parse::<IpAddr>() {
state.hits.insert(ip, count);
}
}
log::info!("Loaded hit counters for {} IPs", state.hits.len());
}
}
BLOCKED_IPS.set(state.blocked.len() as f64);
state
}
// Persist state to disk for later reloading
fn save_to_disk(&self) {
// Save blocked IPs
if let Err(e) = fs::create_dir_all(&self.data_dir) {
log::error!("Failed to create data directory: {e}");
return;
}
let blocked_ips_file = format!("{}/blocked_ips.txt", self.data_dir);
match fs::File::create(&blocked_ips_file) {
Ok(mut file) => {
let mut count = 0;
for ip in &self.blocked {
if writeln!(file, "{ip}").is_ok() {
count += 1;
}
}
log::info!("Saved {count} blocked IPs to {blocked_ips_file}");
}
Err(e) => {
log::error!("Failed to create blocked IPs file: {e}");
}
}
// Save hit counters to cache
if let Err(e) = fs::create_dir_all(&self.cache_dir) {
log::error!("Failed to create cache directory: {e}");
return;
}
let hit_cache_file = format!("{}/hit_counters.json", self.cache_dir);
let mut hit_map = HashMap::new();
for (ip, count) in &self.hits {
hit_map.insert(ip.to_string(), *count);
}
match fs::File::create(&hit_cache_file) {
Ok(file) => {
if let Err(e) = serde_json::to_writer(file, &hit_map) {
log::error!("Failed to write hit counters to cache: {e}");
} else {
log::debug!("Saved hit counters for {} IPs to cache", hit_map.len());
}
}
Err(e) => {
log::error!("Failed to create hit counter cache file: {e}");
}
}
}
}
// Lua scripts for response generation and customization
struct ScriptManager {
script_content: String,
@ -284,16 +392,18 @@ impl ScriptManager {
if script_dir.exists() {
log::debug!("Loading Lua scripts from directory: {scripts_dir}");
if let Ok(entries) = fs::read_dir(script_dir) {
for entry in entries.flatten() {
let path = entry.path();
if path.extension().and_then(|ext| ext.to_str()) == Some("lua") {
if let Ok(content) = fs::read_to_string(&path) {
log::debug!("Loaded Lua script: {}", path.display());
script_content.push_str(&content);
script_content.push('\n');
scripts_loaded = true;
} else {
log::warn!("Failed to read Lua script: {}", path.display());
for entry in entries {
if let Ok(entry) = entry {
let path = entry.path();
if path.extension().and_then(|ext| ext.to_str()) == Some("lua") {
if let Ok(content) = fs::read_to_string(&path) {
log::debug!("Loaded Lua script: {}", path.display());
script_content.push_str(&content);
script_content.push('\n');
scripts_loaded = true;
} else {
log::warn!("Failed to read Lua script: {}", path.display());
}
}
}
}
@ -569,11 +679,12 @@ async fn handle_connection(
async fn should_tarpit(path: &str, ip: &IpAddr, config: &Config) -> bool {
// Don't tarpit whitelisted IPs (internal networks, etc)
for network_str in &config.whitelist_networks {
if let Ok(network) = network_str.parse::<IpNetwork>()
&& network.contains(*ip) {
if let Ok(network) = network_str.parse::<IpNetwork>() {
if network.contains(*ip) {
log::debug!("IP {ip} is in whitelist network {network_str}");
return false;
}
}
}
// Check if the request path matches any of our trap patterns
@ -811,7 +922,7 @@ async fn proxy_to_backend(
}
// Set up nftables firewall rules for IP blocking
async fn setup_firewall() -> Result<()> {
async fn setup_firewall() -> Result<(), String> {
log::info!("Setting up firewall rules");
// Check if nft command exists
@ -843,8 +954,7 @@ async fn setup_firewall() -> Result<()> {
.await;
if let Err(e) = result {
log::error!("Failed to create nftables table: {e}");
return Err(ErisError::firewall("Failed to create nftables table"));
return Err(format!("Failed to create nftables table: {e}"));
}
}
}
@ -857,8 +967,7 @@ async fn setup_firewall() -> Result<()> {
.await;
if let Err(e) = result {
log::error!("Failed to create nftables table: {e}");
return Err(ErisError::firewall("Failed to create nftables table"));
return Err(format!("Failed to create nftables table: {e}"));
}
}
}
@ -886,16 +995,13 @@ async fn setup_firewall() -> Result<()> {
.await;
if let Err(e) = result {
log::error!("Failed to create blacklist set: {e}");
return Err(ErisError::firewall("Failed to create blacklist set"));
return Err(format!("Failed to create blacklist set: {e}"));
}
}
}
Err(e) => {
log::error!("Failed to check if blacklist set exists: {e}");
return Err(ErisError::firewall(
"Failed to check if blacklist set exists",
));
log::warn!("Failed to check if blacklist set exists: {e}");
return Err(format!("Failed to check if blacklist set exists: {e}"));
}
}
@ -928,16 +1034,13 @@ async fn setup_firewall() -> Result<()> {
.await;
if let Err(e) = result {
log::error!("Failed to add firewall rule: {e}");
return Err(ErisError::firewall("Failed to add firewall rule"));
return Err(format!("Failed to add firewall rule: {e}"));
}
}
}
Err(e) => {
log::error!("Failed to check if firewall rule exists: {e}");
return Err(ErisError::firewall(
"Failed to check if firewall rule exists",
));
log::warn!("Failed to check if firewall rule exists: {e}");
return Err(format!("Failed to check if firewall rule exists: {e}"));
}
}
@ -1047,8 +1150,7 @@ async fn main() -> std::io::Result<()> {
let listener = match TcpListener::bind(&config.listen_addr).await {
Ok(l) => l,
Err(e) => {
log::error!("Failed to bind to {}: {}", config.listen_addr, e);
return Err(ErisError::config("Failed to bind to listen address"));
return Err(format!("Failed to bind to {}: {}", config.listen_addr, e));
}
};
@ -1082,7 +1184,7 @@ async fn main() -> std::io::Result<()> {
}
#[allow(unreachable_code)]
Ok(())
Ok::<(), String>(())
});
// Start the metrics server with actix_web only if metrics are not disabled
@ -1123,11 +1225,11 @@ async fn main() -> std::io::Result<()> {
Ok(Ok(())) => Ok(()),
Ok(Err(e)) => {
log::error!("Tarpit server error: {e}");
Err(std::io::Error::other(e))
Err(std::io::Error::new(std::io::ErrorKind::Other, e))
},
Err(e) => {
log::error!("Tarpit server task error: {e}");
Err(std::io::Error::other(e.to_string()))
Err(std::io::Error::new(std::io::ErrorKind::Other, e.to_string()))
},
},
result = metrics_server => {
@ -1143,11 +1245,12 @@ async fn main() -> std::io::Result<()> {
Ok(Ok(())) => Ok(()),
Ok(Err(e)) => {
log::error!("Tarpit server error: {e}");
Err(std::io::Error::other(e))
Err(std::io::Error::new(std::io::ErrorKind::Other, e))
}
Err(e) => {
log::error!("Tarpit server task error: {e}");
Err(std::io::Error::other(
Err(std::io::Error::new(
std::io::ErrorKind::Other,
e.to_string(),
))
}

View file

@ -90,7 +90,6 @@ pub struct MarkovGenerator {
}
impl MarkovGenerator {
#[must_use]
pub fn new(corpus_dir: &str) -> Self {
let mut chains = HashMap::new();
@ -102,22 +101,28 @@ impl MarkovGenerator {
// Load corpus files if they exist
let path = Path::new(corpus_dir);
if path.exists() && path.is_dir()
&& let Ok(entries) = fs::read_dir(path) {
for entry in entries.flatten() {
let file_path = entry.path();
if let Some(file_name) = file_path.file_stem()
&& let Some(file_name_str) = file_name.to_str()
&& types.contains(&file_name_str)
&& let Ok(content) = fs::read_to_string(&file_path) {
let mut chain = Chain::new(DEFAULT_ORDER);
for line in content.lines() {
chain.add(line);
if path.exists() && path.is_dir() {
if let Ok(entries) = fs::read_dir(path) {
for entry in entries {
if let Ok(entry) = entry {
let file_path = entry.path();
if let Some(file_name) = file_path.file_stem() {
if let Some(file_name_str) = file_name.to_str() {
if types.contains(&file_name_str) {
if let Ok(content) = fs::read_to_string(&file_path) {
let mut chain = Chain::new(DEFAULT_ORDER);
for line in content.lines() {
chain.add(line);
}
chains.insert(file_name_str.to_string(), chain);
}
chains.insert(file_name_str.to_string(), chain);
}
}
}
}
}
}
}
// If corpus files didn't exist, initialize with some default content
if chains["php_exploit"].start_states.is_empty() {