//! Diagnostic system for nftables configuration files //! //! This module provides comprehensive diagnostic capabilities including: //! - Syntax errors with precise location information //! - Semantic validation warnings //! - Style and best practice recommendations //! - Language Server Protocol (LSP) compatible output //! - JSON output for tooling integration use crate::lexer::LexError; use crate::parser::{ParseError, Parser}; use serde::{Deserialize, Serialize}; use std::collections::{HashMap, HashSet}; use std::fmt; use std::net::IpAddr; use text_size::TextSize; /// Diagnostic severity levels following LSP specification #[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] pub enum DiagnosticSeverity { /// Reports an error that prevents successful processing Error = 1, /// Reports a warning that should be addressed Warning = 2, /// Reports information that might be useful Information = 3, /// Reports a hint for potential improvements Hint = 4, } impl fmt::Display for DiagnosticSeverity { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { DiagnosticSeverity::Error => write!(f, "error"), DiagnosticSeverity::Warning => write!(f, "warning"), DiagnosticSeverity::Information => write!(f, "info"), DiagnosticSeverity::Hint => write!(f, "hint"), } } } /// Diagnostic codes for categorizing issues #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] pub enum DiagnosticCode { // Syntax errors SyntaxError, UnexpectedToken, MissingToken, UnterminatedString, InvalidNumber, InvalidToken, // Semantic errors UnknownTableFamily, UnknownChainType, UnknownHook, InvalidPriority, InvalidPolicy, DuplicateTableName, DuplicateChainName, UndefinedVariable, InvalidCidrNotation, InvalidPortRange, InvalidProtocol, // Style warnings MissingShebang, InconsistentIndentation, TrailingWhitespace, TooManyEmptyLines, LongLine, PreferredAlternative, // Best practices ChainWithoutPolicy, RuleWithoutAction, OverlyPermissiveRule, DuplicateRule, ConflictingRules, UnusedVariable, UnusedSet, DeprecatedSyntax, MissingDocumentation, SecurityRisk, // Performance InefficientRuleOrder, LargeSetWithoutTimeout, MissingCounters, // Indentation and formatting MixedIndentation, IncorrectIndentationLevel, MissingSpaceAfterComma, ExtraWhitespace, // nftables specific ChainMissingHook, InvalidTableFamily, InvalidChainPriority, MissingChainType, RedundantRule, UnnecessaryJump, } impl fmt::Display for DiagnosticCode { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let code = match self { DiagnosticCode::SyntaxError => "NFT001", DiagnosticCode::UnexpectedToken => "NFT002", DiagnosticCode::MissingToken => "NFT003", DiagnosticCode::UnterminatedString => "NFT004", DiagnosticCode::InvalidNumber => "NFT005", DiagnosticCode::InvalidToken => "NFT006", DiagnosticCode::UnknownTableFamily => "NFT101", DiagnosticCode::UnknownChainType => "NFT102", DiagnosticCode::UnknownHook => "NFT103", DiagnosticCode::InvalidPriority => "NFT104", DiagnosticCode::InvalidPolicy => "NFT105", DiagnosticCode::DuplicateTableName => "NFT106", DiagnosticCode::DuplicateChainName => "NFT107", DiagnosticCode::UndefinedVariable => "NFT108", DiagnosticCode::InvalidCidrNotation => "NFT109", DiagnosticCode::InvalidPortRange => "NFT110", DiagnosticCode::InvalidProtocol => "NFT111", DiagnosticCode::MissingShebang => "NFT201", DiagnosticCode::InconsistentIndentation => "NFT202", DiagnosticCode::TrailingWhitespace => "NFT203", DiagnosticCode::TooManyEmptyLines => "NFT204", DiagnosticCode::LongLine => "NFT205", DiagnosticCode::PreferredAlternative => "NFT206", DiagnosticCode::ChainWithoutPolicy => "NFT301", DiagnosticCode::RuleWithoutAction => "NFT302", DiagnosticCode::OverlyPermissiveRule => "NFT303", DiagnosticCode::DuplicateRule => "NFT304", DiagnosticCode::ConflictingRules => "NFT305", DiagnosticCode::UnusedVariable => "NFT306", DiagnosticCode::UnusedSet => "NFT307", DiagnosticCode::DeprecatedSyntax => "NFT308", DiagnosticCode::MissingDocumentation => "NFT309", DiagnosticCode::SecurityRisk => "NFT310", DiagnosticCode::InefficientRuleOrder => "NFT401", DiagnosticCode::LargeSetWithoutTimeout => "NFT402", DiagnosticCode::MissingCounters => "NFT403", DiagnosticCode::MixedIndentation => "NFT501", DiagnosticCode::IncorrectIndentationLevel => "NFT502", DiagnosticCode::MissingSpaceAfterComma => "NFT503", DiagnosticCode::ExtraWhitespace => "NFT504", DiagnosticCode::ChainMissingHook => "NFT601", DiagnosticCode::InvalidTableFamily => "NFT602", DiagnosticCode::InvalidChainPriority => "NFT603", DiagnosticCode::MissingChainType => "NFT604", DiagnosticCode::RedundantRule => "NFT605", DiagnosticCode::UnnecessaryJump => "NFT606", }; write!(f, "{}", code) } } /// Position information for diagnostics #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] pub struct Position { pub line: u32, pub character: u32, } impl Position { /// Creates a new position with line and character coordinates /// /// # Parameters /// * `line` - Zero-based line number /// * `character` - Zero-based character offset in the line pub fn new(line: u32, character: u32) -> Self { Self { line, character } } /// Converts a text offset to line and character coordinates /// /// # Parameters /// * `text_size` - Byte offset in the source code /// * `source` - Complete source text to analyze for line breaks /// /// # Returns /// A position with line and character coordinates corresponding to the text offset pub fn from_text_size(text_size: TextSize, source: &str) -> Self { let mut line = 0; let mut character = 0; let offset = text_size.into(); for (i, ch) in source.char_indices() { if i >= offset { break; } if ch == '\n' { line += 1; character = 0; } else { character += 1; } } Self { line, character } } } /// Range information for diagnostics #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] pub struct Range { pub start: Position, pub end: Position, } impl Range { /// Creates a new range with start and end positions /// /// # Parameters /// * `start` - Starting position (line and character) /// * `end` - Ending position (line and character) pub fn new(start: Position, end: Position) -> Self { Self { start, end } } /// Creates a range that covers only a single position /// /// Useful for diagnostics that point to a specific location rather than a range /// /// # Parameters /// * `position` - The position to create a single-point range for pub fn single_position(position: Position) -> Self { Self { start: position.clone(), end: position, } } } /// Related information for diagnostics #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] pub struct DiagnosticRelatedInformation { pub location: Range, pub message: String, } /// Code action that can fix a diagnostic #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] pub struct CodeAction { pub title: String, pub kind: String, pub edit: Option, } /// Text edit for code actions #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] pub struct TextEdit { pub range: Range, pub new_text: String, } /// Workspace edit containing text changes #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] pub struct WorkspaceEdit { pub changes: HashMap>, } /// A single diagnostic issue #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] pub struct Diagnostic { /// The range at which the message applies pub range: Range, /// The diagnostic's severity pub severity: DiagnosticSeverity, /// The diagnostic's code pub code: DiagnosticCode, /// A human-readable string describing the source of this diagnostic pub source: String, /// The diagnostic's message pub message: String, /// Additional metadata about the diagnostic pub related_information: Vec, /// Code actions that can address this diagnostic pub code_actions: Vec, /// Tags providing additional metadata pub tags: Vec, } impl Diagnostic { /// Creates a new diagnostic with essential information /// /// # Parameters /// * `range` - The source code range the diagnostic applies to /// * `severity` - The severity level of the diagnostic /// * `code` - The diagnostic code indicating the type of issue /// * `message` - A human-readable description of the diagnostic /// /// # Returns /// A new diagnostic with default values for other fields pub fn new( range: Range, severity: DiagnosticSeverity, code: DiagnosticCode, message: String, ) -> Self { Self { range, severity, code, source: "nff".to_string(), message, related_information: Vec::new(), code_actions: Vec::new(), tags: Vec::new(), } } } /// Collection of diagnostics for a file #[derive(Debug, Clone, Default, Serialize, Deserialize)] pub struct DiagnosticCollection { pub diagnostics: Vec, pub file_path: String, pub source_text: String, } impl DiagnosticCollection { /// Creates a new diagnostic collection for a file /// /// # Parameters /// * `file_path` - Path to the file being analyzed /// * `source_text` - The content of the file pub fn new(file_path: String, source_text: String) -> Self { Self { diagnostics: Vec::new(), file_path, source_text, } } /// Adds multiple diagnostics to the collection /// /// # Parameters /// * `diagnostics` - Vector of diagnostics to add pub fn extend(&mut self, diagnostics: Vec) { self.diagnostics.extend(diagnostics); } /// Returns an iterator over all error-level diagnostics in the collection pub fn errors(&self) -> impl Iterator { self.diagnostics .iter() .filter(|d| d.severity == DiagnosticSeverity::Error) } /// Checks if the collection contains any error-level diagnostics pub fn has_errors(&self) -> bool { self.errors().count() > 0 } /// Converts the diagnostic collection to JSON format /// /// Useful for integrating with Language Server Protocol (LSP) clients /// or other tools that consume structured diagnostic data. /// /// # Returns /// A JSON string representation of the diagnostic collection, or an error if serialization fails pub fn to_json(&self) -> serde_json::Result { serde_json::to_string_pretty(self) } /// Converts the diagnostic collection to a human-readable text format /// /// Produces a formatted report suitable for display in a terminal, /// with file locations, error codes, and code snippets for context. /// /// # Returns /// A formatted string containing all diagnostics with relevant context pub fn to_human_readable(&self) -> String { let mut output = String::new(); if self.diagnostics.is_empty() { output.push_str("No issues found.\n"); return output; } output.push_str(&format!( "Found {} issues in {}:\n\n", self.diagnostics.len(), self.file_path )); for diagnostic in &self.diagnostics { output.push_str(&format!( "{}:{}:{}: {}: {} [{}]\n", self.file_path, diagnostic.range.start.line + 1, diagnostic.range.start.character + 1, diagnostic.severity, diagnostic.message, diagnostic.code )); // Add code snippet context if let Some(context) = self.get_context_lines(&diagnostic.range, 2) { for line in context { output.push_str(&format!(" {}\n", line)); } output.push('\n'); } } output } /// Extracts source code lines around a diagnostic location /// /// Provides context for a diagnostic by showing the lines of code surrounding /// the issue, with line numbers and a marker pointing to the problematic line. /// /// # Parameters /// * `range` - The range in the source code to provide context for /// * `context_lines` - Number of lines to include before and after the range /// /// # Returns /// A vector of formatted strings containing the context lines with line numbers, /// or None if the range is invalid fn get_context_lines(&self, range: &Range, context_lines: usize) -> Option> { let lines: Vec<&str> = self.source_text.lines().collect(); let start_line = range.start.line as usize; let end_line = range.end.line as usize; if start_line >= lines.len() { return None; } let context_start = start_line.saturating_sub(context_lines); let context_end = std::cmp::min(end_line + context_lines + 1, lines.len()); let mut result = Vec::new(); for (i, line) in lines[context_start..context_end].iter().enumerate() { let line_num = context_start + i + 1; if i + context_start == start_line { result.push(format!("→ {:4}: {}", line_num, line)); } else { result.push(format!(" {:4}: {}", line_num, line)); } } Some(result) } } /// Configuration for diagnostic analysis #[derive(Debug, Clone, Serialize, Deserialize)] pub struct DiagnosticConfig { /// Enable style warnings pub enable_style_warnings: bool, /// Enable best practice checks pub enable_best_practices: bool, /// Enable performance hints pub enable_performance_hints: bool, /// Enable security warnings pub enable_security_warnings: bool, /// Maximum line length for style checks pub max_line_length: usize, /// Maximum consecutive empty lines pub max_empty_lines: usize, /// Preferred indentation style pub preferred_indent: Option, } impl Default for DiagnosticConfig { fn default() -> Self { Self { enable_style_warnings: true, enable_best_practices: true, enable_performance_hints: true, enable_security_warnings: true, max_line_length: 120, max_empty_lines: 2, preferred_indent: Some("tabs".to_string()), } } } /// Trait for specialized diagnostic analyzers pub trait AnalyzerModule { fn analyze(&self, source: &str, config: &DiagnosticConfig) -> Vec; fn name(&self) -> &'static str; } /// Lexical analysis module pub struct LexicalAnalyzer; impl AnalyzerModule for LexicalAnalyzer { fn analyze(&self, source: &str, _config: &DiagnosticConfig) -> Vec { use crate::lexer::NftablesLexer; let mut diagnostics = Vec::new(); let mut lexer = NftablesLexer::new(source); match lexer.tokenize() { Ok(_) => {} Err(lex_error) => { let diagnostic = Self::lex_error_to_diagnostic(&lex_error, source); diagnostics.push(diagnostic); } } diagnostics } fn name(&self) -> &'static str { "lexical" } } impl LexicalAnalyzer { /// Converts a lexical error to a diagnostic /// /// Translates lexer-specific errors into the generic diagnostic format /// with appropriate severity, code, and location information. /// /// # Parameters /// * `error` - The lexical error to convert /// * `source` - Source code for position calculation /// /// # Returns /// A diagnostic describing the lexical error pub fn lex_error_to_diagnostic(error: &LexError, source: &str) -> Diagnostic { match error { LexError::InvalidToken { position, text } => { let pos = Position::from_text_size(TextSize::from(*position as u32), source); let range = Range::new( pos.clone(), Position::new(pos.line, pos.character + text.len() as u32), ); Diagnostic::new( range, DiagnosticSeverity::Error, DiagnosticCode::InvalidToken, format!("Invalid token: '{}'", text), ) } LexError::UnterminatedString { position } => { let pos = Position::from_text_size(TextSize::from(*position as u32), source); let range = Range::single_position(pos); Diagnostic::new( range, DiagnosticSeverity::Error, DiagnosticCode::UnterminatedString, "Unterminated string literal".to_string(), ) } LexError::InvalidNumber { position, text } => { let start_pos = Position::from_text_size(TextSize::from(*position as u32), source); let end_pos = Position::new(start_pos.line, start_pos.character + text.len() as u32); let range = Range::new(start_pos, end_pos); Diagnostic::new( range, DiagnosticSeverity::Error, DiagnosticCode::InvalidNumber, format!("Invalid number: '{}'", text), ) } } } } /// Syntax analysis module pub struct SyntaxAnalyzer; impl AnalyzerModule for SyntaxAnalyzer { fn analyze(&self, source: &str, _config: &DiagnosticConfig) -> Vec { use crate::lexer::NftablesLexer; let mut diagnostics = Vec::new(); let mut lexer = NftablesLexer::new(source); match lexer.tokenize() { Ok(tokens) => { let mut parser = Parser::new(tokens); match parser.parse() { Ok(_) => {} Err(parse_error) => { let diagnostic = Self::parse_error_to_diagnostic(&parse_error, source); diagnostics.push(diagnostic); } } } Err(_) => {} } diagnostics } fn name(&self) -> &'static str { "syntax" } } impl SyntaxAnalyzer { /// Converts a parse error to a diagnostic /// /// Translates parser-specific errors into the generic diagnostic format /// with appropriate severity, code, and meaningful error messages. /// /// # Parameters /// * `error` - The parse error to convert /// * `_source` - Source code for position calculation /// /// # Returns /// A diagnostic describing the syntax error fn parse_error_to_diagnostic(error: &ParseError, _source: &str) -> Diagnostic { match error { ParseError::UnexpectedToken { line, column, expected, found, } => { let pos = Position::new(*line as u32, *column as u32); let range = Range::single_position(pos); Diagnostic::new( range, DiagnosticSeverity::Error, DiagnosticCode::UnexpectedToken, format!("Expected {}, found '{}'", expected, found), ) } ParseError::MissingToken { expected } => { let range = Range::single_position(Position::new(0, 0)); Diagnostic::new( range, DiagnosticSeverity::Error, DiagnosticCode::MissingToken, format!("Missing token: expected {}", expected), ) } ParseError::InvalidExpression { message } => { let range = Range::single_position(Position::new(0, 0)); Diagnostic::new( range, DiagnosticSeverity::Error, DiagnosticCode::SyntaxError, format!("Invalid expression: {}", message), ) } ParseError::InvalidStatement { message } => { let range = Range::single_position(Position::new(0, 0)); Diagnostic::new( range, DiagnosticSeverity::Error, DiagnosticCode::SyntaxError, format!("Invalid statement: {}", message), ) } ParseError::SemanticError { message } => { let range = Range::single_position(Position::new(0, 0)); Diagnostic::new( range, DiagnosticSeverity::Error, DiagnosticCode::SyntaxError, format!("Semantic error: {}", message), ) } ParseError::LexError(lex_error) => { LexicalAnalyzer::lex_error_to_diagnostic(lex_error, _source) } ParseError::AnyhowError(anyhow_error) => { let range = Range::single_position(Position::new(0, 0)); Diagnostic::new( range, DiagnosticSeverity::Error, DiagnosticCode::SyntaxError, format!("Parse error: {}", anyhow_error), ) } } } } /// Style and formatting analysis module pub struct StyleAnalyzer; impl AnalyzerModule for StyleAnalyzer { fn analyze(&self, source: &str, config: &DiagnosticConfig) -> Vec { let mut diagnostics = Vec::new(); if !config.enable_style_warnings { return diagnostics; } if !source.starts_with("#!") { let range = Range::new(Position::new(0, 0), Position::new(0, 0)); let diagnostic = Diagnostic::new( range, DiagnosticSeverity::Warning, DiagnosticCode::MissingShebang, "Consider adding a shebang line (e.g., #!/usr/sbin/nft -f)".to_string(), ); diagnostics.push(diagnostic); } diagnostics.extend(self.analyze_line_issues(source, config)); diagnostics.extend(self.analyze_whitespace_issues(source, config)); diagnostics.extend(self.analyze_indentation(source, config)); diagnostics } fn name(&self) -> &'static str { "style" } } impl StyleAnalyzer { /// Analyzes line issues in nftables configuration /// /// Detects: /// - Lines exceeding maximum length /// - Trailing whitespace at end of lines fn analyze_line_issues(&self, source: &str, config: &DiagnosticConfig) -> Vec { let mut diagnostics = Vec::new(); for (line_idx, line) in source.lines().enumerate() { let line_num = line_idx as u32; // Long lines if line.len() > config.max_line_length { let start = Position::new(line_num, config.max_line_length as u32); let end = Position::new(line_num, line.len() as u32); let range = Range::new(start, end); let diagnostic = Diagnostic::new( range, DiagnosticSeverity::Warning, DiagnosticCode::LongLine, format!( "Line too long ({} > {} characters)", line.len(), config.max_line_length ), ); diagnostics.push(diagnostic); } // Trailing whitespace if line.ends_with(' ') || line.ends_with('\t') { let trimmed_len = line.trim_end().len(); let start = Position::new(line_num, trimmed_len as u32); let end = Position::new(line_num, line.len() as u32); let range = Range::new(start, end); let diagnostic = Diagnostic::new( range, DiagnosticSeverity::Warning, DiagnosticCode::TrailingWhitespace, "Trailing whitespace".to_string(), ); diagnostics.push(diagnostic); } } diagnostics } /// Analyzes whitespace issues in nftables configuration /// /// Detects: /// - Too many consecutive empty lines /// - Trailing empty lines at end of file fn analyze_whitespace_issues( &self, source: &str, config: &DiagnosticConfig, ) -> Vec { let mut diagnostics = Vec::new(); let lines: Vec<&str> = source.lines().collect(); let mut empty_count = 0; let mut empty_start = 0; for (line_idx, line) in lines.iter().enumerate() { if line.trim().is_empty() { if empty_count == 0 { empty_start = line_idx; } empty_count += 1; } else { if empty_count > config.max_empty_lines { let start = Position::new(empty_start as u32, 0); let end = Position::new((empty_start + empty_count - 1) as u32, 0); let range = Range::new(start, end); let diagnostic = Diagnostic::new( range, DiagnosticSeverity::Warning, DiagnosticCode::TooManyEmptyLines, format!( "Too many consecutive empty lines ({} > {})", empty_count, config.max_empty_lines ), ); diagnostics.push(diagnostic); } empty_count = 0; } } // Check for trailing empty lines at the end of the file if empty_count > config.max_empty_lines { let start = Position::new(empty_start as u32, 0); let end = Position::new((empty_start + empty_count - 1) as u32, 0); let range = Range::new(start, end); let diagnostic = Diagnostic::new( range, DiagnosticSeverity::Warning, DiagnosticCode::TooManyEmptyLines, format!( "Too many consecutive empty lines at end of file ({} > {})", empty_count, config.max_empty_lines ), ); diagnostics.push(diagnostic); } diagnostics } /// Analyzes indentation consistency in nftables configuration /// /// Checks for: /// - Mixed tabs and spaces in a single line /// - Inconsistent indentation styles across the file /// - Adherence to preferred indentation style if specified fn analyze_indentation(&self, source: &str, config: &DiagnosticConfig) -> Vec { let mut diagnostics = Vec::new(); let mut has_tabs = false; let mut has_spaces = false; for (line_idx, line) in source.lines().enumerate() { let line_num = line_idx as u32; if line.trim().is_empty() { continue; } let leading_whitespace: String = line .chars() .take_while(|&c| c == ' ' || c == '\t') .collect(); if leading_whitespace.contains('\t') { has_tabs = true; } if leading_whitespace.contains(' ') { has_spaces = true; } // Check for mixed indentation in a single line if leading_whitespace.contains('\t') && leading_whitespace.contains(' ') { let range = Range::new( Position::new(line_num, 0), Position::new(line_num, leading_whitespace.len() as u32), ); let diagnostic = Diagnostic::new( range, DiagnosticSeverity::Warning, DiagnosticCode::MixedIndentation, "Mixed tabs and spaces in indentation".to_string(), ); diagnostics.push(diagnostic); } } // Check for mixed indentation across the file if has_tabs && has_spaces { let range = Range::single_position(Position::new(0, 0)); let (severity, message) = if let Some(preferred) = &config.preferred_indent { ( DiagnosticSeverity::Information, format!("File uses mixed indentation; prefer {}", preferred), ) } else { ( DiagnosticSeverity::Warning, "File uses mixed indentation (tabs and spaces)".to_string(), ) }; let diagnostic = Diagnostic::new( range, severity, DiagnosticCode::InconsistentIndentation, message, ); diagnostics.push(diagnostic); } diagnostics } } /// Semantic analysis module for nftables-specific validation pub struct SemanticAnalyzer; impl AnalyzerModule for SemanticAnalyzer { fn analyze(&self, source: &str, config: &DiagnosticConfig) -> Vec { let mut diagnostics = Vec::new(); diagnostics.extend(self.validate_table_declarations(source)); diagnostics.extend(self.validate_chain_declarations_semantic(source)); diagnostics.extend(self.validate_cidr_notation(source)); if config.enable_best_practices { diagnostics.extend(self.validate_chain_best_practices(source)); diagnostics.extend(self.check_for_redundant_rules(source)); } if config.enable_performance_hints { diagnostics.extend(self.check_performance_hints(source)); } if config.enable_security_warnings { diagnostics.extend(self.check_security_warnings(source)); } diagnostics } fn name(&self) -> &'static str { "semantic" } } impl SemanticAnalyzer { /// Validates table declarations in nftables configuration /// /// Checks for: /// - Valid table family (ip, ip6, inet, arp, bridge, netdev) /// - Duplicate table names fn validate_table_declarations(&self, source: &str) -> Vec { let mut diagnostics = Vec::new(); let mut seen_tables = HashSet::new(); for (line_idx, line) in source.lines().enumerate() { let line_num = line_idx as u32; let trimmed = line.trim(); if trimmed.starts_with("table ") { let parts: Vec<&str> = trimmed.split_whitespace().collect(); if parts.len() >= 3 { let family = parts[1]; let name = parts[2]; match family { "ip" | "ip6" | "inet" | "arp" | "bridge" | "netdev" => {} _ => { let start_col = line.find(family).unwrap_or(0); let range = Range::new( Position::new(line_num, start_col as u32), Position::new(line_num, (start_col + family.len()) as u32), ); let diagnostic = Diagnostic::new( range, DiagnosticSeverity::Error, DiagnosticCode::InvalidTableFamily, format!("Unknown table family: '{}'", family), ); diagnostics.push(diagnostic); } } // Check for duplicate table names let table_key = format!("{}:{}", family, name); if seen_tables.contains(&table_key) { let start_col = line.find(name).unwrap_or(0); let range = Range::new( Position::new(line_num, start_col as u32), Position::new(line_num, (start_col + name.len()) as u32), ); let diagnostic = Diagnostic::new( range, DiagnosticSeverity::Error, DiagnosticCode::DuplicateTableName, format!("Duplicate table name: '{}'", name), ); diagnostics.push(diagnostic); } else { seen_tables.insert(table_key); } } } } diagnostics } /// Validates chain declarations for correct hook types /// /// Checks for valid hooks in chain declarations: /// - input, output, forward, prerouting, postrouting /// - Reports unknown hooks as errors fn validate_chain_declarations_semantic(&self, source: &str) -> Vec { let mut diagnostics = Vec::new(); for (line_idx, line) in source.lines().enumerate() { let line_num = line_idx as u32; let trimmed = line.trim(); if trimmed.starts_with("type ") && trimmed.contains("hook") { if let Some(hook_pos) = trimmed.find("hook") { let hook_part = &trimmed[hook_pos..]; let hook_words: Vec<&str> = hook_part.split_whitespace().collect(); if hook_words.len() >= 2 { let hook = hook_words[1]; match hook { "input" | "output" | "forward" | "prerouting" | "postrouting" => {} _ => { let start_col = line.find(hook).unwrap_or(0); let range = Range::new( Position::new(line_num, start_col as u32), Position::new(line_num, (start_col + hook.len()) as u32), ); let diagnostic = Diagnostic::new( range, DiagnosticSeverity::Error, DiagnosticCode::UnknownHook, format!("Unknown hook: '{}'", hook), ); diagnostics.push(diagnostic); } } } } } } diagnostics } /// Checks for best practices in nftables chain definitions /// /// Verifies that: /// - Filter chains have explicit policies defined fn validate_chain_best_practices(&self, source: &str) -> Vec { let mut diagnostics = Vec::new(); for (line_idx, line) in source.lines().enumerate() { let line_num = line_idx as u32; let trimmed = line.trim(); if trimmed.contains("type filter") && !trimmed.contains("policy") { let range = Range::new( Position::new(line_num, 0), Position::new(line_num, line.len() as u32), ); let diagnostic = Diagnostic::new( range, DiagnosticSeverity::Warning, DiagnosticCode::ChainWithoutPolicy, "Filter chain should have an explicit policy".to_string(), ); diagnostics.push(diagnostic); } } diagnostics } /// Analyzes nftables configuration for performance optimizations /// /// Checks for: /// - Large sets without timeouts /// - Rules without counters for monitoring fn check_performance_hints(&self, source: &str) -> Vec { let mut diagnostics = Vec::new(); for (line_idx, line) in source.lines().enumerate() { let line_num = line_idx as u32; let trimmed = line.trim(); if trimmed.contains("set ") && trimmed.contains("{") && !trimmed.contains("timeout") { if trimmed.len() > 100 { let range = Range::new( Position::new(line_num, 0), Position::new(line_num, line.len() as u32), ); let diagnostic = Diagnostic::new( range, DiagnosticSeverity::Hint, DiagnosticCode::LargeSetWithoutTimeout, "Consider adding a timeout to large sets for better performance" .to_string(), ); diagnostics.push(diagnostic); } } // Check for missing counters (performance hint) if (trimmed.contains(" accept") || trimmed.contains(" drop")) && !trimmed.contains("counter") { let range = Range::new( Position::new(line_num, 0), Position::new(line_num, line.len() as u32), ); let diagnostic = Diagnostic::new( range, DiagnosticSeverity::Hint, DiagnosticCode::MissingCounters, "Consider adding counters to rules for monitoring and debugging".to_string(), ); diagnostics.push(diagnostic); } } diagnostics } /// Checks for security risks in nftables configuration /// /// Identifies: /// - Overly permissive rules that accept traffic from anywhere (0.0.0.0/0 or ::/0) fn check_security_warnings(&self, source: &str) -> Vec { let mut diagnostics = Vec::new(); for (line_idx, line) in source.lines().enumerate() { let line_num = line_idx as u32; let trimmed = line.trim(); // Check for overly permissive rules (security warning) if trimmed.contains(" accept") && (trimmed.contains("0.0.0.0/0") || trimmed.contains("::/0")) { let range = Range::new( Position::new(line_num, 0), Position::new(line_num, line.len() as u32), ); let diagnostic = Diagnostic::new( range, DiagnosticSeverity::Warning, DiagnosticCode::SecurityRisk, "Rule accepts traffic from anywhere - consider restricting source addresses" .to_string(), ); diagnostics.push(diagnostic); } } diagnostics } /// Validates CIDR notation in nftables configuration /// /// Checks for: /// - Valid IP address format (both IPv4 and IPv6) /// - Valid prefix length (0-32 for IPv4, 0-128 for IPv6) /// - Valid numeric prefix format fn validate_cidr_notation(&self, source: &str) -> Vec { let mut diagnostics = Vec::new(); for (line_idx, line) in source.lines().enumerate() { let line_num = line_idx as u32; // Look for potential CIDR notation patterns let words: Vec<&str> = line.split_whitespace().collect(); for word in words { if word.contains('/') && word.chars().any(|c| c.is_ascii_digit()) { if let Some(slash_pos) = word.find('/') { let (ip_part, prefix_part) = word.split_at(slash_pos); let prefix_part = &prefix_part[1..]; match ip_part.parse::() { Ok(ip_addr) => match prefix_part.parse::() { Ok(prefix) => { let max_prefix = match ip_addr { IpAddr::V4(_) => 32, IpAddr::V6(_) => 128, }; if prefix > max_prefix { if let Some(start_col) = line.find(word) { let range = Range::new( Position::new(line_num, start_col as u32), Position::new( line_num, (start_col + word.len()) as u32, ), ); let ip_type = match ip_addr { IpAddr::V4(_) => "IPv4", IpAddr::V6(_) => "IPv6", }; let diagnostic = Diagnostic::new( range, DiagnosticSeverity::Error, DiagnosticCode::InvalidCidrNotation, format!( "Invalid CIDR prefix length: '{}' (max {} for {})", prefix, max_prefix, ip_type ), ); diagnostics.push(diagnostic); } } } Err(_) => { if !prefix_part.is_empty() { if let Some(start_col) = line.find(word) { let range = Range::new( Position::new(line_num, start_col as u32), Position::new( line_num, (start_col + word.len()) as u32, ), ); let diagnostic = Diagnostic::new( range, DiagnosticSeverity::Error, DiagnosticCode::InvalidCidrNotation, format!( "Invalid CIDR prefix: '{}' must be a number", prefix_part ), ); diagnostics.push(diagnostic); } } } }, Err(_) => { if (ip_part.contains('.') && ip_part.chars().any(|c| c.is_ascii_digit())) || (ip_part.contains(':') && ip_part .chars() .any(|c| c.is_ascii_digit() || c.is_ascii_hexdigit())) { if let Some(start_col) = line.find(word) { let range = Range::new( Position::new(line_num, start_col as u32), Position::new( line_num, (start_col + word.len()) as u32, ), ); let diagnostic = Diagnostic::new( range, DiagnosticSeverity::Error, DiagnosticCode::InvalidCidrNotation, format!( "Invalid IP address in CIDR notation: '{}'", ip_part ), ); diagnostics.push(diagnostic); } } } } } } } } diagnostics } /// Identifies redundant rules in nftables configuration /// /// Detects: /// - Duplicate accept/drop/reject rules fn check_for_redundant_rules(&self, source: &str) -> Vec { let mut diagnostics = Vec::new(); let mut seen_rules = HashSet::new(); for (line_idx, line) in source.lines().enumerate() { let line_num = line_idx as u32; let trimmed = line.trim(); if trimmed.contains(" accept") || trimmed.contains(" drop") || trimmed.contains(" reject") { if seen_rules.contains(trimmed) { let range = Range::new( Position::new(line_num, 0), Position::new(line_num, line.len() as u32), ); let diagnostic = Diagnostic::new( range, DiagnosticSeverity::Warning, DiagnosticCode::RedundantRule, "Duplicate rule found".to_string(), ); diagnostics.push(diagnostic); } else { seen_rules.insert(trimmed.to_string()); } } } diagnostics } } /// Main diagnostic analyzer pub struct DiagnosticAnalyzer { config: DiagnosticConfig, } impl DiagnosticAnalyzer { /// Creates a new diagnostic analyzer with the specified configuration /// /// # Parameters /// * `config` - Configuration settings for the analyzer pub fn new(config: DiagnosticConfig) -> Self { Self { config } } /// Analyzes source code with all standard analysis modules /// /// This is the main entry point for a complete analysis of nftables configurations. /// It runs lexical, syntax, style, and semantic analysis on the provided source. /// /// # Parameters /// * `source` - Source code to analyze /// * `file_path` - Path to the file being analyzed /// /// # Returns /// A collection of all diagnostics found in the source pub fn analyze(&self, source: &str, file_path: &str) -> DiagnosticCollection { self.analyze_with_modules( source, file_path, &["lexical", "syntax", "style", "semantic"], ) } /// Analyzes source code with specific analysis modules /// /// Allows running only selected analysis modules for more targeted diagnostics. /// /// # Parameters /// * `source` - Source code to analyze /// * `file_path` - Path to the file being analyzed /// * `module_names` - Names of modules to run ("lexical", "syntax", "style", "semantic") /// /// # Returns /// A collection of diagnostics from the selected modules pub fn analyze_with_modules( &self, source: &str, file_path: &str, module_names: &[&str], ) -> DiagnosticCollection { let mut collection = DiagnosticCollection::new(file_path.to_string(), source.to_string()); let modules: Vec> = vec![ Box::new(LexicalAnalyzer), Box::new(SyntaxAnalyzer), Box::new(StyleAnalyzer), Box::new(SemanticAnalyzer), ]; for module in modules { if module_names.contains(&module.name()) { let diagnostics = module.analyze(source, &self.config); collection.extend(diagnostics); } } collection } } impl Default for DiagnosticAnalyzer { fn default() -> Self { Self::new(DiagnosticConfig::default()) } } #[cfg(test)] mod tests { use super::*; #[test] fn test_diagnostic_creation() { let range = Range::new(Position::new(0, 0), Position::new(0, 10)); let diagnostic = Diagnostic::new( range, DiagnosticSeverity::Error, DiagnosticCode::SyntaxError, "Test error".to_string(), ); assert_eq!(diagnostic.severity, DiagnosticSeverity::Error); assert_eq!(diagnostic.code, DiagnosticCode::SyntaxError); assert_eq!(diagnostic.message, "Test error"); } #[test] fn test_position_from_text_size() { let source = "line 1\nline 2\nline 3"; let pos = Position::from_text_size(TextSize::from(8), source); assert_eq!(pos.line, 1); assert_eq!(pos.character, 1); } #[test] fn test_style_analysis() { let analyzer = DiagnosticAnalyzer::default(); let source = "table inet filter {\n chain input \n chain output\n}"; let diagnostics = analyzer.analyze(source, "test.nft"); // Should find missing shebang and trailing whitespace assert!(!diagnostics.diagnostics.is_empty()); assert!( diagnostics .diagnostics .iter() .any(|d| d.code == DiagnosticCode::MissingShebang) ); assert!( diagnostics .diagnostics .iter() .any(|d| d.code == DiagnosticCode::TrailingWhitespace) ); } #[test] fn test_cidr_validation_ipv4_valid() { let analyzer = SemanticAnalyzer; let source = "ip saddr 192.168.1.0/24 accept"; let diagnostics = analyzer.validate_cidr_notation(source); // Verify valid IPv4 CIDR notation doesn't produce errors assert!( !diagnostics .iter() .any(|d| d.code == DiagnosticCode::InvalidCidrNotation) ); } #[test] fn test_cidr_validation_ipv4_invalid_prefix() { let analyzer = SemanticAnalyzer; let source = "ip saddr 192.168.1.0/33 accept"; let diagnostics = analyzer.validate_cidr_notation(source); // Verify detection of IPv4 prefix exceeding max (32) assert!(diagnostics.iter().any(|d| { d.code == DiagnosticCode::InvalidCidrNotation && d.message .contains("Invalid CIDR prefix length: '33' (max 32 for IPv4)") })); } #[test] fn test_cidr_validation_ipv6_valid() { let analyzer = SemanticAnalyzer; let source = "ip6 saddr 2001:db8::/32 accept"; let diagnostics = analyzer.validate_cidr_notation(source); // Verify valid IPv6 CIDR notation doesn't produce errors assert!( !diagnostics .iter() .any(|d| d.code == DiagnosticCode::InvalidCidrNotation) ); } #[test] fn test_cidr_validation_ipv6_invalid_prefix() { let analyzer = SemanticAnalyzer; let source = "ip6 saddr 2001:db8::/129 accept"; let diagnostics = analyzer.validate_cidr_notation(source); // Verify detection of IPv6 prefix exceeding max (128) assert!(diagnostics.iter().any(|d| { d.code == DiagnosticCode::InvalidCidrNotation && d.message .contains("Invalid CIDR prefix length: '129' (max 128 for IPv6)") })); } #[test] fn test_cidr_validation_invalid_ip_address() { let analyzer = SemanticAnalyzer; let source = "ip saddr 192.168.1.256/24 accept"; let diagnostics = analyzer.validate_cidr_notation(source); // Verify detection of invalid IP address format assert!(diagnostics.iter().any(|d| { d.code == DiagnosticCode::InvalidCidrNotation && d.message .contains("Invalid IP address in CIDR notation: '192.168.1.256'") })); } #[test] fn test_cidr_validation_invalid_prefix_format() { let analyzer = SemanticAnalyzer; let source = "ip saddr 192.168.1.0/abc accept"; let diagnostics = analyzer.validate_cidr_notation(source); // Verify detection of non-numeric CIDR prefix assert!(diagnostics.iter().any(|d| { d.code == DiagnosticCode::InvalidCidrNotation && d.message .contains("Invalid CIDR prefix: 'abc' must be a number") })); } #[test] fn test_cidr_validation_ipv6_compressed_notation() { let analyzer = SemanticAnalyzer; let source = "ip6 saddr ::1/128 accept"; let diagnostics = analyzer.validate_cidr_notation(source); // Verify compressed IPv6 notation is properly handled assert!( !diagnostics .iter() .any(|d| d.code == DiagnosticCode::InvalidCidrNotation) ); } #[test] fn test_cidr_validation_multiple_cidrs() { let analyzer = SemanticAnalyzer; let source = r#" ip saddr 192.168.1.0/24 accept ip6 saddr 2001:db8::/32 accept ip saddr 10.0.0.0/8 drop ip6 saddr fe80::/64 accept "#; let diagnostics = analyzer.validate_cidr_notation(source); // Verify multiple valid CIDRs are properly parsed assert!( !diagnostics .iter() .any(|d| d.code == DiagnosticCode::InvalidCidrNotation) ); } #[test] fn test_cidr_validation_mixed_valid_invalid() { let analyzer = SemanticAnalyzer; let source = r#" ip saddr 192.168.1.0/24 accept ip saddr 192.168.1.0/33 drop ip6 saddr 2001:db8::/32 accept ip6 saddr 2001:db8::/129 drop "#; let diagnostics = analyzer.validate_cidr_notation(source); // Verify detection of specific invalid prefixes in a mixed content let cidr_errors: Vec<_> = diagnostics .iter() .filter(|d| d.code == DiagnosticCode::InvalidCidrNotation) .collect(); assert_eq!(cidr_errors.len(), 2); // Check for IPv4 error assert!( cidr_errors .iter() .any(|d| d.message.contains("33") && d.message.contains("IPv4")) ); // Check for IPv6 error assert!( cidr_errors .iter() .any(|d| d.message.contains("129") && d.message.contains("IPv6")) ); } #[test] fn test_cidr_validation_edge_cases() { let analyzer = SemanticAnalyzer; // Test edge case: maximum allowed prefix lengths let source_ipv4_max = "ip saddr 192.168.1.0/32 accept"; let diagnostics_ipv4 = analyzer.validate_cidr_notation(source_ipv4_max); assert!( !diagnostics_ipv4 .iter() .any(|d| d.code == DiagnosticCode::InvalidCidrNotation) ); let source_ipv6_max = "ip6 saddr 2001:db8::/128 accept"; let diagnostics_ipv6 = analyzer.validate_cidr_notation(source_ipv6_max); assert!( !diagnostics_ipv6 .iter() .any(|d| d.code == DiagnosticCode::InvalidCidrNotation) ); // Test edge case: minimum prefix (catch-all address) let source_zero = "ip saddr 0.0.0.0/0 accept"; let diagnostics_zero = analyzer.validate_cidr_notation(source_zero); assert!( !diagnostics_zero .iter() .any(|d| d.code == DiagnosticCode::InvalidCidrNotation) ); } }