diff --git a/.config/nextest.toml b/.config/nextest.toml deleted file mode 100644 index 59fe6d4..0000000 --- a/.config/nextest.toml +++ /dev/null @@ -1,28 +0,0 @@ -[test-groups] -holochain-process = { max-threads = 1 } - -[profile.default] - -# "retries" defines the number of times a test should be retried. If set to a -# non-zero value, tests that succeed on a subsequent attempt will be marked as -# flaky. Can be overridden through the `--retries` option. -retries = 2 - -# This will display all of fail, retry, slow -# see https://nexte.st/book/other-options.html?highlight=failure-output#--status-level-and---final-status-level -status-level = "skip" - -# Treat a test that takes longer than this period as slow, and print a message. -# Given a non-zero positive integer, shutdown the tests when the number periods -# have passed. -slow-timeout = { period = "30s", terminate-after = 4 } - -# * "immediate-final": output failures as soon as they happen and at the end of -# the test run -failure-output = "immediate-final" - -# Do not cancel the test run on the first failure. -fail-fast = false - -test-threads = 2 - diff --git a/README.md b/README.md index bb49636..66f23f0 100644 --- a/README.md +++ b/README.md @@ -1,33 +1,27 @@ # nff -This is a high performance, low overhead configuration parser for nftables, -written in Rust. Syntax-aware parsing allows nff to provide a complete formatter -_and_ a linter for nftables. With the formatter the goal is to receive possibly -jumbled up nftables rule files, and output ✨ pretty ✨ human readable output in -return. The linter on another hand, will demonstrate possible syntax, -performance or stylistic errors. - -The main emphasis, however, is on the syntax-aware formatting with comprehensive -grammar support. +This is a high performance configuration parser and written in Rust. The goal is +to receive possibly jumbled up nftables rule files, and output ✨ pretty ✨ +human readable output in return. The main emphasis is on the syntax-aware +formatting with comprehensive grammar support. It is a future goal to allow +editors to hook into nff in order to format your rulesets directly from your +editor, or as a diagnostics source. ## Features -> [!NOTE] -> nff is in its early stages of development. While _most_ of the syntax is -> supported, I cannot guarantee that _everything_ is supported just yet. +nff is in its early stages of development. While _most_ of the syntax is +supported, I cannot guarantee that _everything_ is supported just yet. ### Core Functionality -Basic functionality of nff that most users will be interested in - - **Syntax-aware formatting** - Deep understanding of nftables grammar with semantic preservation - - **Multi-family support** - Handles `inet`, `ip`, `ip6`, `arp`, `bridge`, and - `netdev` table families - - **CIDR notation** - Proper handling of network addresses (`192.168.1.0/24`) - - **Chain properties** - Hooks, priorities (including negative), policies, - device bindings +- **Multi-family support** - Handles `inet`, `ip`, `ip6`, `arp`, `bridge`, and + `netdev` table families - **Flexible indentation** - Configurable tabs/spaces with custom depth +- **CIDR notation** - Proper handling of network addresses (`192.168.1.0/24`) +- **Chain properties** - Hooks, priorities (including negative), policies, + device bindings ### Advanced Features @@ -151,26 +145,16 @@ graph TD ## Installation -Recommended way of installing nff is to use Nix. Add `nff` to your flake inputs, -and add the package to your `environment.systemPackages`. Alternatively, on -non-NixOS systems, it is possible to use `nix profile install` to install nff. +Recommended way of installing nff is to use Nix. ### Editor Integration -> [!TIP] -> Your editor not here? Open an issue. I can only add support for editors I use -> but pull requests documenting alternative editor setups are appreciated! - #### Neovim Setup nff can be integrated into Neovim as a diagnostics source for nftables files. Here are several setup approaches: -##### Option 1: Using none-ls - -none-ls is the most common method of adding diagnostics sources in Neovim. While -I recommend using nvim-lint for its simplicity, below instructions document how -to set up null-ls. +##### Option 2: Using none-ls ```lua local null_ls = require("null-ls") @@ -198,9 +182,6 @@ null_ls.setup({ ##### Option 2: Using nvim-lint (recommended) -Recommended way of adding nff as a diagnostics source in Neovim. Simple, low -overhead and not as error-prone as null-ls. - ```lua -- ~/.config/nvim/lua/config/lint.lua require('lint').linters.nff = { @@ -243,8 +224,7 @@ vim.api.nvim_create_autocmd({ "BufEnter", "BufWritePost" }, { ##### Option 3: Custom Lua Function -Alternatively, if you don't want to use plugins, consider a setup such as this -one to do it without any reliance on plugins: +For a simple custom solution: ```lua -- ~/.config/nvim/lua/nff.lua @@ -584,11 +564,6 @@ style.nft:1:16: warning: Missing space after '{' [NFT503] ## Contributing -### Building - -Build with `cargo build` as usual. If you are using Nix, you will also want to -ensure that the Nix package builds as expected. - ### Code Style - Follow `cargo fmt` formatting @@ -603,6 +578,11 @@ ensure that the Nix package builds as expected. - **Regression tests**: Known issue prevention - **Performance tests**: Benchmark critical paths +### Building + +Build with `cargo build` as usual. If you are using Nix, you will also want to +ensure that the Nix package builds as expected. + ## Technical Notes ### CST Implementation diff --git a/src/cst.rs b/src/cst.rs index 835e23e..41ac715 100644 --- a/src/cst.rs +++ b/src/cst.rs @@ -150,7 +150,7 @@ pub enum SyntaxKind { // Error recovery Error = 106, - // Protocol keywords for nftables + // Additional protocol keywords VmapKw = 107, NdRouterAdvertKw = 108, NdNeighborSolicitKw = 109, @@ -237,7 +237,7 @@ impl From for SyntaxKind { New => SyntaxKind::NewKw, Invalid => SyntaxKind::InvalidKw, - // Protocol keywords for ICMP/ICMPv6 + // Additional protocol keywords Vmap => SyntaxKind::VmapKw, NdRouterAdvert => SyntaxKind::NdRouterAdvertKw, NdNeighborSolicit => SyntaxKind::NdNeighborSolicitKw, @@ -358,7 +358,7 @@ pub enum CstError { /// Result type for CST operations pub type CstResult = Result; -/// CST builder for nftables syntax +/// Basic CST builder pub struct CstBuilder; impl CstBuilder { diff --git a/src/diagnostic.rs b/src/diagnostic.rs index 4d0c6c9..a3a46b0 100644 --- a/src/diagnostic.rs +++ b/src/diagnostic.rs @@ -10,9 +10,9 @@ use crate::lexer::LexError; use crate::parser::{ParseError, Parser}; use serde::{Deserialize, Serialize}; +use serde_json; use std::collections::{HashMap, HashSet}; use std::fmt; -use std::net::IpAddr; use text_size::TextSize; /// Diagnostic severity levels following LSP specification @@ -171,23 +171,10 @@ pub struct Position { } impl Position { - /// Creates a new position with line and character coordinates - /// - /// # Parameters - /// * `line` - Zero-based line number - /// * `character` - Zero-based character offset in the line pub fn new(line: u32, character: u32) -> Self { Self { line, character } } - /// Converts a text offset to line and character coordinates - /// - /// # Parameters - /// * `text_size` - Byte offset in the source code - /// * `source` - Complete source text to analyze for line breaks - /// - /// # Returns - /// A position with line and character coordinates corresponding to the text offset pub fn from_text_size(text_size: TextSize, source: &str) -> Self { let mut line = 0; let mut character = 0; @@ -217,21 +204,10 @@ pub struct Range { } impl Range { - /// Creates a new range with start and end positions - /// - /// # Parameters - /// * `start` - Starting position (line and character) - /// * `end` - Ending position (line and character) pub fn new(start: Position, end: Position) -> Self { Self { start, end } } - /// Creates a range that covers only a single position - /// - /// Useful for diagnostics that point to a specific location rather than a range - /// - /// # Parameters - /// * `position` - The position to create a single-point range for pub fn single_position(position: Position) -> Self { Self { start: position.clone(), @@ -290,16 +266,6 @@ pub struct Diagnostic { } impl Diagnostic { - /// Creates a new diagnostic with essential information - /// - /// # Parameters - /// * `range` - The source code range the diagnostic applies to - /// * `severity` - The severity level of the diagnostic - /// * `code` - The diagnostic code indicating the type of issue - /// * `message` - A human-readable description of the diagnostic - /// - /// # Returns - /// A new diagnostic with default values for other fields pub fn new( range: Range, severity: DiagnosticSeverity, @@ -328,11 +294,6 @@ pub struct DiagnosticCollection { } impl DiagnosticCollection { - /// Creates a new diagnostic collection for a file - /// - /// # Parameters - /// * `file_path` - Path to the file being analyzed - /// * `source_text` - The content of the file pub fn new(file_path: String, source_text: String) -> Self { Self { diagnostics: Vec::new(), @@ -341,44 +302,26 @@ impl DiagnosticCollection { } } - /// Adds multiple diagnostics to the collection - /// - /// # Parameters - /// * `diagnostics` - Vector of diagnostics to add pub fn extend(&mut self, diagnostics: Vec) { self.diagnostics.extend(diagnostics); } - /// Returns an iterator over all error-level diagnostics in the collection pub fn errors(&self) -> impl Iterator { self.diagnostics .iter() .filter(|d| d.severity == DiagnosticSeverity::Error) } - /// Checks if the collection contains any error-level diagnostics pub fn has_errors(&self) -> bool { self.errors().count() > 0 } - /// Converts the diagnostic collection to JSON format - /// - /// Useful for integrating with Language Server Protocol (LSP) clients - /// or other tools that consume structured diagnostic data. - /// - /// # Returns - /// A JSON string representation of the diagnostic collection, or an error if serialization fails + /// Convert to JSON for LSP or tooling integration pub fn to_json(&self) -> serde_json::Result { serde_json::to_string_pretty(self) } - /// Converts the diagnostic collection to a human-readable text format - /// - /// Produces a formatted report suitable for display in a terminal, - /// with file locations, error codes, and code snippets for context. - /// - /// # Returns - /// A formatted string containing all diagnostics with relevant context + /// Convert to a human-readable format pub fn to_human_readable(&self) -> String { let mut output = String::new(); @@ -416,18 +359,6 @@ impl DiagnosticCollection { output } - /// Extracts source code lines around a diagnostic location - /// - /// Provides context for a diagnostic by showing the lines of code surrounding - /// the issue, with line numbers and a marker pointing to the problematic line. - /// - /// # Parameters - /// * `range` - The range in the source code to provide context for - /// * `context_lines` - Number of lines to include before and after the range - /// - /// # Returns - /// A vector of formatted strings containing the context lines with line numbers, - /// or None if the range is invalid fn get_context_lines(&self, range: &Range, context_lines: usize) -> Option> { let lines: Vec<&str> = self.source_text.lines().collect(); let start_line = range.start.line as usize; @@ -504,7 +435,9 @@ impl AnalyzerModule for LexicalAnalyzer { let mut lexer = NftablesLexer::new(source); match lexer.tokenize() { - Ok(_) => {} + Ok(_) => { + // No lexical errors + } Err(lex_error) => { let diagnostic = Self::lex_error_to_diagnostic(&lex_error, source); diagnostics.push(diagnostic); @@ -520,17 +453,6 @@ impl AnalyzerModule for LexicalAnalyzer { } impl LexicalAnalyzer { - /// Converts a lexical error to a diagnostic - /// - /// Translates lexer-specific errors into the generic diagnostic format - /// with appropriate severity, code, and location information. - /// - /// # Parameters - /// * `error` - The lexical error to convert - /// * `source` - Source code for position calculation - /// - /// # Returns - /// A diagnostic describing the lexical error pub fn lex_error_to_diagnostic(error: &LexError, source: &str) -> Diagnostic { match error { LexError::InvalidToken { position, text } => { @@ -586,14 +508,18 @@ impl AnalyzerModule for SyntaxAnalyzer { Ok(tokens) => { let mut parser = Parser::new(tokens); match parser.parse() { - Ok(_) => {} + Ok(_) => { + // No parse errors + } Err(parse_error) => { let diagnostic = Self::parse_error_to_diagnostic(&parse_error, source); diagnostics.push(diagnostic); } } } - Err(_) => {} + Err(_) => { + // Already handled in lexical analysis + } } diagnostics @@ -605,17 +531,6 @@ impl AnalyzerModule for SyntaxAnalyzer { } impl SyntaxAnalyzer { - /// Converts a parse error to a diagnostic - /// - /// Translates parser-specific errors into the generic diagnostic format - /// with appropriate severity, code, and meaningful error messages. - /// - /// # Parameters - /// * `error` - The parse error to convert - /// * `_source` - Source code for position calculation - /// - /// # Returns - /// A diagnostic describing the syntax error fn parse_error_to_diagnostic(error: &ParseError, _source: &str) -> Diagnostic { match error { ParseError::UnexpectedToken { @@ -692,9 +607,12 @@ impl AnalyzerModule for StyleAnalyzer { fn analyze(&self, source: &str, config: &DiagnosticConfig) -> Vec { let mut diagnostics = Vec::new(); + // Only perform style analysis if enabled if !config.enable_style_warnings { return diagnostics; } + + // Check for missing shebang if !source.starts_with("#!") { let range = Range::new(Position::new(0, 0), Position::new(0, 0)); let diagnostic = Diagnostic::new( @@ -719,11 +637,6 @@ impl AnalyzerModule for StyleAnalyzer { } impl StyleAnalyzer { - /// Analyzes line issues in nftables configuration - /// - /// Detects: - /// - Lines exceeding maximum length - /// - Trailing whitespace at end of lines fn analyze_line_issues(&self, source: &str, config: &DiagnosticConfig) -> Vec { let mut diagnostics = Vec::new(); @@ -767,11 +680,6 @@ impl StyleAnalyzer { diagnostics } - /// Analyzes whitespace issues in nftables configuration - /// - /// Detects: - /// - Too many consecutive empty lines - /// - Trailing empty lines at end of file fn analyze_whitespace_issues( &self, source: &str, @@ -828,12 +736,6 @@ impl StyleAnalyzer { diagnostics } - /// Analyzes indentation consistency in nftables configuration - /// - /// Checks for: - /// - Mixed tabs and spaces in a single line - /// - Inconsistent indentation styles across the file - /// - Adherence to preferred indentation style if specified fn analyze_indentation(&self, source: &str, config: &DiagnosticConfig) -> Vec { let mut diagnostics = Vec::new(); let mut has_tabs = false; @@ -908,19 +810,23 @@ impl AnalyzerModule for SemanticAnalyzer { fn analyze(&self, source: &str, config: &DiagnosticConfig) -> Vec { let mut diagnostics = Vec::new(); + // Always run semantic validation (syntax/semantic errors) diagnostics.extend(self.validate_table_declarations(source)); diagnostics.extend(self.validate_chain_declarations_semantic(source)); diagnostics.extend(self.validate_cidr_notation(source)); + // Best practices checks (only if enabled) if config.enable_best_practices { diagnostics.extend(self.validate_chain_best_practices(source)); diagnostics.extend(self.check_for_redundant_rules(source)); } + // Performance hints (only if enabled) if config.enable_performance_hints { diagnostics.extend(self.check_performance_hints(source)); } + // Security warnings (only if enabled) if config.enable_security_warnings { diagnostics.extend(self.check_security_warnings(source)); } @@ -934,11 +840,6 @@ impl AnalyzerModule for SemanticAnalyzer { } impl SemanticAnalyzer { - /// Validates table declarations in nftables configuration - /// - /// Checks for: - /// - Valid table family (ip, ip6, inet, arp, bridge, netdev) - /// - Duplicate table names fn validate_table_declarations(&self, source: &str) -> Vec { let mut diagnostics = Vec::new(); let mut seen_tables = HashSet::new(); @@ -953,8 +854,11 @@ impl SemanticAnalyzer { let family = parts[1]; let name = parts[2]; + // Validate family match family { - "ip" | "ip6" | "inet" | "arp" | "bridge" | "netdev" => {} + "ip" | "ip6" | "inet" | "arp" | "bridge" | "netdev" => { + // Valid family + } _ => { let start_col = line.find(family).unwrap_or(0); let range = Range::new( @@ -996,11 +900,6 @@ impl SemanticAnalyzer { diagnostics } - /// Validates chain declarations for correct hook types - /// - /// Checks for valid hooks in chain declarations: - /// - input, output, forward, prerouting, postrouting - /// - Reports unknown hooks as errors fn validate_chain_declarations_semantic(&self, source: &str) -> Vec { let mut diagnostics = Vec::new(); @@ -1009,6 +908,7 @@ impl SemanticAnalyzer { let trimmed = line.trim(); if trimmed.starts_with("type ") && trimmed.contains("hook") { + // Validate chain type and hook (semantic validation) if let Some(hook_pos) = trimmed.find("hook") { let hook_part = &trimmed[hook_pos..]; let hook_words: Vec<&str> = hook_part.split_whitespace().collect(); @@ -1016,7 +916,9 @@ impl SemanticAnalyzer { if hook_words.len() >= 2 { let hook = hook_words[1]; match hook { - "input" | "output" | "forward" | "prerouting" | "postrouting" => {} + "input" | "output" | "forward" | "prerouting" | "postrouting" => { + // Valid hook + } _ => { let start_col = line.find(hook).unwrap_or(0); let range = Range::new( @@ -1040,10 +942,6 @@ impl SemanticAnalyzer { diagnostics } - /// Checks for best practices in nftables chain definitions - /// - /// Verifies that: - /// - Filter chains have explicit policies defined fn validate_chain_best_practices(&self, source: &str) -> Vec { let mut diagnostics = Vec::new(); @@ -1051,6 +949,7 @@ impl SemanticAnalyzer { let line_num = line_idx as u32; let trimmed = line.trim(); + // Check for missing policy in filter chains (best practice) if trimmed.contains("type filter") && !trimmed.contains("policy") { let range = Range::new( Position::new(line_num, 0), @@ -1069,11 +968,6 @@ impl SemanticAnalyzer { diagnostics } - /// Analyzes nftables configuration for performance optimizations - /// - /// Checks for: - /// - Large sets without timeouts - /// - Rules without counters for monitoring fn check_performance_hints(&self, source: &str) -> Vec { let mut diagnostics = Vec::new(); @@ -1081,7 +975,9 @@ impl SemanticAnalyzer { let line_num = line_idx as u32; let trimmed = line.trim(); + // Check for large sets without timeout (performance hint) if trimmed.contains("set ") && trimmed.contains("{") && !trimmed.contains("timeout") { + // Simple heuristic: if set definition is long, suggest timeout if trimmed.len() > 100 { let range = Range::new( Position::new(line_num, 0), @@ -1119,10 +1015,6 @@ impl SemanticAnalyzer { diagnostics } - /// Checks for security risks in nftables configuration - /// - /// Identifies: - /// - Overly permissive rules that accept traffic from anywhere (0.0.0.0/0 or ::/0) fn check_security_warnings(&self, source: &str) -> Vec { let mut diagnostics = Vec::new(); @@ -1152,62 +1044,35 @@ impl SemanticAnalyzer { diagnostics } - /// Validates CIDR notation in nftables configuration - /// - /// Checks for: - /// - Valid IP address format (both IPv4 and IPv6) - /// - Valid prefix length (0-32 for IPv4, 0-128 for IPv6) - /// - Valid numeric prefix format fn validate_cidr_notation(&self, source: &str) -> Vec { let mut diagnostics = Vec::new(); for (line_idx, line) in source.lines().enumerate() { let line_num = line_idx as u32; - // Look for potential CIDR notation patterns + // Simple CIDR validation without regex dependency let words: Vec<&str> = line.split_whitespace().collect(); for word in words { if word.contains('/') && word.chars().any(|c| c.is_ascii_digit()) { if let Some(slash_pos) = word.find('/') { let (ip_part, prefix_part) = word.split_at(slash_pos); - let prefix_part = &prefix_part[1..]; + let prefix_part = &prefix_part[1..]; // Remove the '/' - match ip_part.parse::() { - Ok(ip_addr) => match prefix_part.parse::() { - Ok(prefix) => { - let max_prefix = match ip_addr { - IpAddr::V4(_) => 32, - IpAddr::V6(_) => 128, - }; - - if prefix > max_prefix { - if let Some(start_col) = line.find(word) { - let range = Range::new( - Position::new(line_num, start_col as u32), - Position::new( - line_num, - (start_col + word.len()) as u32, - ), - ); - let ip_type = match ip_addr { - IpAddr::V4(_) => "IPv4", - IpAddr::V6(_) => "IPv6", - }; - let diagnostic = Diagnostic::new( - range, - DiagnosticSeverity::Error, - DiagnosticCode::InvalidCidrNotation, - format!( - "Invalid CIDR prefix length: '{}' (max {} for {})", - prefix, max_prefix, ip_type - ), - ); - diagnostics.push(diagnostic); - } - } + // Basic IPv4 validation + let ip_parts: Vec<&str> = ip_part.split('.').collect(); + if ip_parts.len() == 4 { + let mut valid_ip = true; + for part in ip_parts { + if part.parse::().is_err() { + valid_ip = false; + break; } - Err(_) => { - if !prefix_part.is_empty() { + } + + // Validate prefix length + if valid_ip { + if let Ok(prefix) = prefix_part.parse::() { + if prefix > 32 { if let Some(start_col) = line.find(word) { let range = Range::new( Position::new(line_num, start_col as u32), @@ -1221,23 +1086,14 @@ impl SemanticAnalyzer { DiagnosticSeverity::Error, DiagnosticCode::InvalidCidrNotation, format!( - "Invalid CIDR prefix: '{}' must be a number", - prefix_part + "Invalid CIDR prefix length: '{}' (max 32 for IPv4)", + prefix ), ); diagnostics.push(diagnostic); } } - } - }, - Err(_) => { - if (ip_part.contains('.') - && ip_part.chars().any(|c| c.is_ascii_digit())) - || (ip_part.contains(':') - && ip_part - .chars() - .any(|c| c.is_ascii_digit() || c.is_ascii_hexdigit())) - { + } else if !prefix_part.is_empty() { if let Some(start_col) = line.find(word) { let range = Range::new( Position::new(line_num, start_col as u32), @@ -1250,10 +1106,7 @@ impl SemanticAnalyzer { range, DiagnosticSeverity::Error, DiagnosticCode::InvalidCidrNotation, - format!( - "Invalid IP address in CIDR notation: '{}'", - ip_part - ), + format!("Invalid CIDR notation: '{}'", word), ); diagnostics.push(diagnostic); } @@ -1268,10 +1121,6 @@ impl SemanticAnalyzer { diagnostics } - /// Identifies redundant rules in nftables configuration - /// - /// Detects: - /// - Duplicate accept/drop/reject rules fn check_for_redundant_rules(&self, source: &str) -> Vec { let mut diagnostics = Vec::new(); let mut seen_rules = HashSet::new(); @@ -1280,10 +1129,12 @@ impl SemanticAnalyzer { let line_num = line_idx as u32; let trimmed = line.trim(); + // Simple rule detection (lines that contain actions) if trimmed.contains(" accept") || trimmed.contains(" drop") || trimmed.contains(" reject") { + // Check if rule already exists in the HashSet if seen_rules.contains(trimmed) { let range = Range::new( Position::new(line_num, 0), @@ -1297,6 +1148,7 @@ impl SemanticAnalyzer { ); diagnostics.push(diagnostic); } else { + // Add the rule to the HashSet if it's not a duplicate seen_rules.insert(trimmed.to_string()); } } @@ -1312,25 +1164,11 @@ pub struct DiagnosticAnalyzer { } impl DiagnosticAnalyzer { - /// Creates a new diagnostic analyzer with the specified configuration - /// - /// # Parameters - /// * `config` - Configuration settings for the analyzer pub fn new(config: DiagnosticConfig) -> Self { Self { config } } - /// Analyzes source code with all standard analysis modules - /// - /// This is the main entry point for a complete analysis of nftables configurations. - /// It runs lexical, syntax, style, and semantic analysis on the provided source. - /// - /// # Parameters - /// * `source` - Source code to analyze - /// * `file_path` - Path to the file being analyzed - /// - /// # Returns - /// A collection of all diagnostics found in the source + /// Analyze source code with all available modules pub fn analyze(&self, source: &str, file_path: &str) -> DiagnosticCollection { self.analyze_with_modules( source, @@ -1339,17 +1177,7 @@ impl DiagnosticAnalyzer { ) } - /// Analyzes source code with specific analysis modules - /// - /// Allows running only selected analysis modules for more targeted diagnostics. - /// - /// # Parameters - /// * `source` - Source code to analyze - /// * `file_path` - Path to the file being analyzed - /// * `module_names` - Names of modules to run ("lexical", "syntax", "style", "semantic") - /// - /// # Returns - /// A collection of diagnostics from the selected modules + /// Analyze source code with specific modules pub fn analyze_with_modules( &self, source: &str, @@ -1430,193 +1258,4 @@ mod tests { .any(|d| d.code == DiagnosticCode::TrailingWhitespace) ); } - - #[test] - fn test_cidr_validation_ipv4_valid() { - let analyzer = SemanticAnalyzer; - let source = "ip saddr 192.168.1.0/24 accept"; - let diagnostics = analyzer.validate_cidr_notation(source); - - // Verify valid IPv4 CIDR notation doesn't produce errors - assert!( - !diagnostics - .iter() - .any(|d| d.code == DiagnosticCode::InvalidCidrNotation) - ); - } - - #[test] - fn test_cidr_validation_ipv4_invalid_prefix() { - let analyzer = SemanticAnalyzer; - let source = "ip saddr 192.168.1.0/33 accept"; - let diagnostics = analyzer.validate_cidr_notation(source); - - // Verify detection of IPv4 prefix exceeding max (32) - assert!(diagnostics.iter().any(|d| { - d.code == DiagnosticCode::InvalidCidrNotation - && d.message - .contains("Invalid CIDR prefix length: '33' (max 32 for IPv4)") - })); - } - - #[test] - fn test_cidr_validation_ipv6_valid() { - let analyzer = SemanticAnalyzer; - let source = "ip6 saddr 2001:db8::/32 accept"; - let diagnostics = analyzer.validate_cidr_notation(source); - - // Verify valid IPv6 CIDR notation doesn't produce errors - assert!( - !diagnostics - .iter() - .any(|d| d.code == DiagnosticCode::InvalidCidrNotation) - ); - } - - #[test] - fn test_cidr_validation_ipv6_invalid_prefix() { - let analyzer = SemanticAnalyzer; - let source = "ip6 saddr 2001:db8::/129 accept"; - let diagnostics = analyzer.validate_cidr_notation(source); - - // Verify detection of IPv6 prefix exceeding max (128) - assert!(diagnostics.iter().any(|d| { - d.code == DiagnosticCode::InvalidCidrNotation - && d.message - .contains("Invalid CIDR prefix length: '129' (max 128 for IPv6)") - })); - } - - #[test] - fn test_cidr_validation_invalid_ip_address() { - let analyzer = SemanticAnalyzer; - let source = "ip saddr 192.168.1.256/24 accept"; - let diagnostics = analyzer.validate_cidr_notation(source); - - // Verify detection of invalid IP address format - assert!(diagnostics.iter().any(|d| { - d.code == DiagnosticCode::InvalidCidrNotation - && d.message - .contains("Invalid IP address in CIDR notation: '192.168.1.256'") - })); - } - - #[test] - fn test_cidr_validation_invalid_prefix_format() { - let analyzer = SemanticAnalyzer; - let source = "ip saddr 192.168.1.0/abc accept"; - let diagnostics = analyzer.validate_cidr_notation(source); - - // Verify detection of non-numeric CIDR prefix - assert!(diagnostics.iter().any(|d| { - d.code == DiagnosticCode::InvalidCidrNotation - && d.message - .contains("Invalid CIDR prefix: 'abc' must be a number") - })); - } - - #[test] - fn test_cidr_validation_ipv6_compressed_notation() { - let analyzer = SemanticAnalyzer; - let source = "ip6 saddr ::1/128 accept"; - let diagnostics = analyzer.validate_cidr_notation(source); - - // Verify compressed IPv6 notation is properly handled - assert!( - !diagnostics - .iter() - .any(|d| d.code == DiagnosticCode::InvalidCidrNotation) - ); - } - - #[test] - fn test_cidr_validation_multiple_cidrs() { - let analyzer = SemanticAnalyzer; - let source = r#" - ip saddr 192.168.1.0/24 accept - ip6 saddr 2001:db8::/32 accept - ip saddr 10.0.0.0/8 drop - ip6 saddr fe80::/64 accept - "#; - let diagnostics = analyzer.validate_cidr_notation(source); - - // Verify multiple valid CIDRs are properly parsed - assert!( - !diagnostics - .iter() - .any(|d| d.code == DiagnosticCode::InvalidCidrNotation) - ); - } - - #[test] - fn test_cidr_validation_mixed_valid_invalid() { - let analyzer = SemanticAnalyzer; - let source = r#" - ip saddr 192.168.1.0/24 accept - ip saddr 192.168.1.0/33 drop - ip6 saddr 2001:db8::/32 accept - ip6 saddr 2001:db8::/129 drop - "#; - let diagnostics = analyzer.validate_cidr_notation(source); - - // Verify detection of specific invalid prefixes in a mixed content - let cidr_errors: Vec<_> = diagnostics - .iter() - .filter(|d| d.code == DiagnosticCode::InvalidCidrNotation) - .collect(); - assert_eq!(cidr_errors.len(), 2); - - // Check for IPv4 error - assert!( - cidr_errors - .iter() - .any(|d| d.message.contains("33") && d.message.contains("IPv4")) - ); - // Check for IPv6 error - assert!( - cidr_errors - .iter() - .any(|d| d.message.contains("129") && d.message.contains("IPv6")) - ); - } - - #[test] - fn test_cidr_validation_edge_cases() { - let analyzer = SemanticAnalyzer; - - // Test edge case: maximum allowed prefix lengths - let source_ipv4_max = "ip saddr 192.168.1.0/32 accept"; - let diagnostics_ipv4 = analyzer.validate_cidr_notation(source_ipv4_max); - assert!( - !diagnostics_ipv4 - .iter() - .any(|d| d.code == DiagnosticCode::InvalidCidrNotation) - ); - - let source_ipv6_max = "ip6 saddr 2001:db8::/128 accept"; - let diagnostics_ipv6 = analyzer.validate_cidr_notation(source_ipv6_max); - assert!( - !diagnostics_ipv6 - .iter() - .any(|d| d.code == DiagnosticCode::InvalidCidrNotation) - ); - - // Test edge case: minimum prefix (catch-all address) - let source_zero = "ip saddr 0.0.0.0/0 accept"; - let diagnostics_zero = analyzer.validate_cidr_notation(source_zero); - assert!( - !diagnostics_zero - .iter() - .any(|d| d.code == DiagnosticCode::InvalidCidrNotation) - ); - - // Test edge case: IPv6 minimum prefix - let source_ipv6_zero = "ip6 saddr ::/0 accept"; - let diagnostics_ipv6_zero = analyzer.validate_cidr_notation(source_ipv6_zero); - assert!( - !diagnostics_ipv6_zero - .iter() - .any(|d| d.code == DiagnosticCode::InvalidCidrNotation) - ); - } } diff --git a/src/main.rs b/src/main.rs index ae1f85c..ea6be4f 100755 --- a/src/main.rs +++ b/src/main.rs @@ -506,6 +506,7 @@ fn convert_parse_error_to_formatter_error( suggestion: None, }, ParseError::MissingToken { expected } => { + // Try to find current position from last token let (line, column) = if let Some(last_token) = tokens.last() { position_from_range(&last_token.range, source) } else { @@ -519,6 +520,7 @@ fn convert_parse_error_to_formatter_error( } } ParseError::InvalidExpression { message } => { + // Try to find the current token position let (line, column) = find_current_parse_position(tokens, source); FormatterError::SyntaxError { line,