Compare commits
No commits in common. "4d1e05662e8ae947d632b2ddb35e85b3cc0cf87b" and "13245c08fe4959cafa5920854911e404eb3e8481" have entirely different histories.
4d1e05662e
...
13245c08fe
5 changed files with 81 additions and 488 deletions
|
|
@ -1,28 +0,0 @@
|
||||||
[test-groups]
|
|
||||||
holochain-process = { max-threads = 1 }
|
|
||||||
|
|
||||||
[profile.default]
|
|
||||||
|
|
||||||
# "retries" defines the number of times a test should be retried. If set to a
|
|
||||||
# non-zero value, tests that succeed on a subsequent attempt will be marked as
|
|
||||||
# flaky. Can be overridden through the `--retries` option.
|
|
||||||
retries = 2
|
|
||||||
|
|
||||||
# This will display all of fail, retry, slow
|
|
||||||
# see https://nexte.st/book/other-options.html?highlight=failure-output#--status-level-and---final-status-level
|
|
||||||
status-level = "skip"
|
|
||||||
|
|
||||||
# Treat a test that takes longer than this period as slow, and print a message.
|
|
||||||
# Given a non-zero positive integer, shutdown the tests when the number periods
|
|
||||||
# have passed.
|
|
||||||
slow-timeout = { period = "30s", terminate-after = 4 }
|
|
||||||
|
|
||||||
# * "immediate-final": output failures as soon as they happen and at the end of
|
|
||||||
# the test run
|
|
||||||
failure-output = "immediate-final"
|
|
||||||
|
|
||||||
# Do not cancel the test run on the first failure.
|
|
||||||
fail-fast = false
|
|
||||||
|
|
||||||
test-threads = 2
|
|
||||||
|
|
||||||
62
README.md
62
README.md
|
|
@ -1,33 +1,27 @@
|
||||||
# nff
|
# nff
|
||||||
|
|
||||||
This is a high performance, low overhead configuration parser for nftables,
|
This is a high performance configuration parser and written in Rust. The goal is
|
||||||
written in Rust. Syntax-aware parsing allows nff to provide a complete formatter
|
to receive possibly jumbled up nftables rule files, and output ✨ pretty ✨
|
||||||
_and_ a linter for nftables. With the formatter the goal is to receive possibly
|
human readable output in return. The main emphasis is on the syntax-aware
|
||||||
jumbled up nftables rule files, and output ✨ pretty ✨ human readable output in
|
formatting with comprehensive grammar support. It is a future goal to allow
|
||||||
return. The linter on another hand, will demonstrate possible syntax,
|
editors to hook into nff in order to format your rulesets directly from your
|
||||||
performance or stylistic errors.
|
editor, or as a diagnostics source.
|
||||||
|
|
||||||
The main emphasis, however, is on the syntax-aware formatting with comprehensive
|
|
||||||
grammar support.
|
|
||||||
|
|
||||||
## Features
|
## Features
|
||||||
|
|
||||||
> [!NOTE]
|
nff is in its early stages of development. While _most_ of the syntax is
|
||||||
> nff is in its early stages of development. While _most_ of the syntax is
|
supported, I cannot guarantee that _everything_ is supported just yet.
|
||||||
> supported, I cannot guarantee that _everything_ is supported just yet.
|
|
||||||
|
|
||||||
### Core Functionality
|
### Core Functionality
|
||||||
|
|
||||||
Basic functionality of nff that most users will be interested in
|
|
||||||
|
|
||||||
- **Syntax-aware formatting** - Deep understanding of nftables grammar with
|
- **Syntax-aware formatting** - Deep understanding of nftables grammar with
|
||||||
semantic preservation
|
semantic preservation
|
||||||
- **Multi-family support** - Handles `inet`, `ip`, `ip6`, `arp`, `bridge`, and
|
- **Multi-family support** - Handles `inet`, `ip`, `ip6`, `arp`, `bridge`, and
|
||||||
`netdev` table families
|
`netdev` table families
|
||||||
- **CIDR notation** - Proper handling of network addresses (`192.168.1.0/24`)
|
|
||||||
- **Chain properties** - Hooks, priorities (including negative), policies,
|
|
||||||
device bindings
|
|
||||||
- **Flexible indentation** - Configurable tabs/spaces with custom depth
|
- **Flexible indentation** - Configurable tabs/spaces with custom depth
|
||||||
|
- **CIDR notation** - Proper handling of network addresses (`192.168.1.0/24`)
|
||||||
|
- **Chain properties** - Hooks, priorities (including negative), policies,
|
||||||
|
device bindings
|
||||||
|
|
||||||
### Advanced Features
|
### Advanced Features
|
||||||
|
|
||||||
|
|
@ -151,26 +145,16 @@ graph TD
|
||||||
|
|
||||||
## Installation
|
## Installation
|
||||||
|
|
||||||
Recommended way of installing nff is to use Nix. Add `nff` to your flake inputs,
|
Recommended way of installing nff is to use Nix.
|
||||||
and add the package to your `environment.systemPackages`. Alternatively, on
|
|
||||||
non-NixOS systems, it is possible to use `nix profile install` to install nff.
|
|
||||||
|
|
||||||
### Editor Integration
|
### Editor Integration
|
||||||
|
|
||||||
> [!TIP]
|
|
||||||
> Your editor not here? Open an issue. I can only add support for editors I use
|
|
||||||
> but pull requests documenting alternative editor setups are appreciated!
|
|
||||||
|
|
||||||
#### Neovim Setup
|
#### Neovim Setup
|
||||||
|
|
||||||
nff can be integrated into Neovim as a diagnostics source for nftables files.
|
nff can be integrated into Neovim as a diagnostics source for nftables files.
|
||||||
Here are several setup approaches:
|
Here are several setup approaches:
|
||||||
|
|
||||||
##### Option 1: Using none-ls
|
##### Option 2: Using none-ls
|
||||||
|
|
||||||
none-ls is the most common method of adding diagnostics sources in Neovim. While
|
|
||||||
I recommend using nvim-lint for its simplicity, below instructions document how
|
|
||||||
to set up null-ls.
|
|
||||||
|
|
||||||
```lua
|
```lua
|
||||||
local null_ls = require("null-ls")
|
local null_ls = require("null-ls")
|
||||||
|
|
@ -198,9 +182,6 @@ null_ls.setup({
|
||||||
|
|
||||||
##### Option 2: Using nvim-lint (recommended)
|
##### Option 2: Using nvim-lint (recommended)
|
||||||
|
|
||||||
Recommended way of adding nff as a diagnostics source in Neovim. Simple, low
|
|
||||||
overhead and not as error-prone as null-ls.
|
|
||||||
|
|
||||||
```lua
|
```lua
|
||||||
-- ~/.config/nvim/lua/config/lint.lua
|
-- ~/.config/nvim/lua/config/lint.lua
|
||||||
require('lint').linters.nff = {
|
require('lint').linters.nff = {
|
||||||
|
|
@ -243,8 +224,7 @@ vim.api.nvim_create_autocmd({ "BufEnter", "BufWritePost" }, {
|
||||||
|
|
||||||
##### Option 3: Custom Lua Function
|
##### Option 3: Custom Lua Function
|
||||||
|
|
||||||
Alternatively, if you don't want to use plugins, consider a setup such as this
|
For a simple custom solution:
|
||||||
one to do it without any reliance on plugins:
|
|
||||||
|
|
||||||
```lua
|
```lua
|
||||||
-- ~/.config/nvim/lua/nff.lua
|
-- ~/.config/nvim/lua/nff.lua
|
||||||
|
|
@ -584,11 +564,6 @@ style.nft:1:16: warning: Missing space after '{' [NFT503]
|
||||||
|
|
||||||
## Contributing
|
## Contributing
|
||||||
|
|
||||||
### Building
|
|
||||||
|
|
||||||
Build with `cargo build` as usual. If you are using Nix, you will also want to
|
|
||||||
ensure that the Nix package builds as expected.
|
|
||||||
|
|
||||||
### Code Style
|
### Code Style
|
||||||
|
|
||||||
- Follow `cargo fmt` formatting
|
- Follow `cargo fmt` formatting
|
||||||
|
|
@ -603,6 +578,11 @@ ensure that the Nix package builds as expected.
|
||||||
- **Regression tests**: Known issue prevention
|
- **Regression tests**: Known issue prevention
|
||||||
- **Performance tests**: Benchmark critical paths
|
- **Performance tests**: Benchmark critical paths
|
||||||
|
|
||||||
|
### Building
|
||||||
|
|
||||||
|
Build with `cargo build` as usual. If you are using Nix, you will also want to
|
||||||
|
ensure that the Nix package builds as expected.
|
||||||
|
|
||||||
## Technical Notes
|
## Technical Notes
|
||||||
|
|
||||||
### CST Implementation
|
### CST Implementation
|
||||||
|
|
|
||||||
|
|
@ -150,7 +150,7 @@ pub enum SyntaxKind {
|
||||||
// Error recovery
|
// Error recovery
|
||||||
Error = 106,
|
Error = 106,
|
||||||
|
|
||||||
// Protocol keywords for nftables
|
// Additional protocol keywords
|
||||||
VmapKw = 107,
|
VmapKw = 107,
|
||||||
NdRouterAdvertKw = 108,
|
NdRouterAdvertKw = 108,
|
||||||
NdNeighborSolicitKw = 109,
|
NdNeighborSolicitKw = 109,
|
||||||
|
|
@ -237,7 +237,7 @@ impl From<TokenKind> for SyntaxKind {
|
||||||
New => SyntaxKind::NewKw,
|
New => SyntaxKind::NewKw,
|
||||||
Invalid => SyntaxKind::InvalidKw,
|
Invalid => SyntaxKind::InvalidKw,
|
||||||
|
|
||||||
// Protocol keywords for ICMP/ICMPv6
|
// Additional protocol keywords
|
||||||
Vmap => SyntaxKind::VmapKw,
|
Vmap => SyntaxKind::VmapKw,
|
||||||
NdRouterAdvert => SyntaxKind::NdRouterAdvertKw,
|
NdRouterAdvert => SyntaxKind::NdRouterAdvertKw,
|
||||||
NdNeighborSolicit => SyntaxKind::NdNeighborSolicitKw,
|
NdNeighborSolicit => SyntaxKind::NdNeighborSolicitKw,
|
||||||
|
|
@ -358,7 +358,7 @@ pub enum CstError {
|
||||||
/// Result type for CST operations
|
/// Result type for CST operations
|
||||||
pub type CstResult<T> = Result<T, CstError>;
|
pub type CstResult<T> = Result<T, CstError>;
|
||||||
|
|
||||||
/// CST builder for nftables syntax
|
/// Basic CST builder
|
||||||
pub struct CstBuilder;
|
pub struct CstBuilder;
|
||||||
|
|
||||||
impl CstBuilder {
|
impl CstBuilder {
|
||||||
|
|
|
||||||
|
|
@ -10,9 +10,9 @@
|
||||||
use crate::lexer::LexError;
|
use crate::lexer::LexError;
|
||||||
use crate::parser::{ParseError, Parser};
|
use crate::parser::{ParseError, Parser};
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
use serde_json;
|
||||||
use std::collections::{HashMap, HashSet};
|
use std::collections::{HashMap, HashSet};
|
||||||
use std::fmt;
|
use std::fmt;
|
||||||
use std::net::IpAddr;
|
|
||||||
use text_size::TextSize;
|
use text_size::TextSize;
|
||||||
|
|
||||||
/// Diagnostic severity levels following LSP specification
|
/// Diagnostic severity levels following LSP specification
|
||||||
|
|
@ -171,23 +171,10 @@ pub struct Position {
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Position {
|
impl Position {
|
||||||
/// Creates a new position with line and character coordinates
|
|
||||||
///
|
|
||||||
/// # Parameters
|
|
||||||
/// * `line` - Zero-based line number
|
|
||||||
/// * `character` - Zero-based character offset in the line
|
|
||||||
pub fn new(line: u32, character: u32) -> Self {
|
pub fn new(line: u32, character: u32) -> Self {
|
||||||
Self { line, character }
|
Self { line, character }
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Converts a text offset to line and character coordinates
|
|
||||||
///
|
|
||||||
/// # Parameters
|
|
||||||
/// * `text_size` - Byte offset in the source code
|
|
||||||
/// * `source` - Complete source text to analyze for line breaks
|
|
||||||
///
|
|
||||||
/// # Returns
|
|
||||||
/// A position with line and character coordinates corresponding to the text offset
|
|
||||||
pub fn from_text_size(text_size: TextSize, source: &str) -> Self {
|
pub fn from_text_size(text_size: TextSize, source: &str) -> Self {
|
||||||
let mut line = 0;
|
let mut line = 0;
|
||||||
let mut character = 0;
|
let mut character = 0;
|
||||||
|
|
@ -217,21 +204,10 @@ pub struct Range {
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Range {
|
impl Range {
|
||||||
/// Creates a new range with start and end positions
|
|
||||||
///
|
|
||||||
/// # Parameters
|
|
||||||
/// * `start` - Starting position (line and character)
|
|
||||||
/// * `end` - Ending position (line and character)
|
|
||||||
pub fn new(start: Position, end: Position) -> Self {
|
pub fn new(start: Position, end: Position) -> Self {
|
||||||
Self { start, end }
|
Self { start, end }
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Creates a range that covers only a single position
|
|
||||||
///
|
|
||||||
/// Useful for diagnostics that point to a specific location rather than a range
|
|
||||||
///
|
|
||||||
/// # Parameters
|
|
||||||
/// * `position` - The position to create a single-point range for
|
|
||||||
pub fn single_position(position: Position) -> Self {
|
pub fn single_position(position: Position) -> Self {
|
||||||
Self {
|
Self {
|
||||||
start: position.clone(),
|
start: position.clone(),
|
||||||
|
|
@ -290,16 +266,6 @@ pub struct Diagnostic {
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Diagnostic {
|
impl Diagnostic {
|
||||||
/// Creates a new diagnostic with essential information
|
|
||||||
///
|
|
||||||
/// # Parameters
|
|
||||||
/// * `range` - The source code range the diagnostic applies to
|
|
||||||
/// * `severity` - The severity level of the diagnostic
|
|
||||||
/// * `code` - The diagnostic code indicating the type of issue
|
|
||||||
/// * `message` - A human-readable description of the diagnostic
|
|
||||||
///
|
|
||||||
/// # Returns
|
|
||||||
/// A new diagnostic with default values for other fields
|
|
||||||
pub fn new(
|
pub fn new(
|
||||||
range: Range,
|
range: Range,
|
||||||
severity: DiagnosticSeverity,
|
severity: DiagnosticSeverity,
|
||||||
|
|
@ -328,11 +294,6 @@ pub struct DiagnosticCollection {
|
||||||
}
|
}
|
||||||
|
|
||||||
impl DiagnosticCollection {
|
impl DiagnosticCollection {
|
||||||
/// Creates a new diagnostic collection for a file
|
|
||||||
///
|
|
||||||
/// # Parameters
|
|
||||||
/// * `file_path` - Path to the file being analyzed
|
|
||||||
/// * `source_text` - The content of the file
|
|
||||||
pub fn new(file_path: String, source_text: String) -> Self {
|
pub fn new(file_path: String, source_text: String) -> Self {
|
||||||
Self {
|
Self {
|
||||||
diagnostics: Vec::new(),
|
diagnostics: Vec::new(),
|
||||||
|
|
@ -341,44 +302,26 @@ impl DiagnosticCollection {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Adds multiple diagnostics to the collection
|
|
||||||
///
|
|
||||||
/// # Parameters
|
|
||||||
/// * `diagnostics` - Vector of diagnostics to add
|
|
||||||
pub fn extend(&mut self, diagnostics: Vec<Diagnostic>) {
|
pub fn extend(&mut self, diagnostics: Vec<Diagnostic>) {
|
||||||
self.diagnostics.extend(diagnostics);
|
self.diagnostics.extend(diagnostics);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns an iterator over all error-level diagnostics in the collection
|
|
||||||
pub fn errors(&self) -> impl Iterator<Item = &Diagnostic> {
|
pub fn errors(&self) -> impl Iterator<Item = &Diagnostic> {
|
||||||
self.diagnostics
|
self.diagnostics
|
||||||
.iter()
|
.iter()
|
||||||
.filter(|d| d.severity == DiagnosticSeverity::Error)
|
.filter(|d| d.severity == DiagnosticSeverity::Error)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Checks if the collection contains any error-level diagnostics
|
|
||||||
pub fn has_errors(&self) -> bool {
|
pub fn has_errors(&self) -> bool {
|
||||||
self.errors().count() > 0
|
self.errors().count() > 0
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Converts the diagnostic collection to JSON format
|
/// Convert to JSON for LSP or tooling integration
|
||||||
///
|
|
||||||
/// Useful for integrating with Language Server Protocol (LSP) clients
|
|
||||||
/// or other tools that consume structured diagnostic data.
|
|
||||||
///
|
|
||||||
/// # Returns
|
|
||||||
/// A JSON string representation of the diagnostic collection, or an error if serialization fails
|
|
||||||
pub fn to_json(&self) -> serde_json::Result<String> {
|
pub fn to_json(&self) -> serde_json::Result<String> {
|
||||||
serde_json::to_string_pretty(self)
|
serde_json::to_string_pretty(self)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Converts the diagnostic collection to a human-readable text format
|
/// Convert to a human-readable format
|
||||||
///
|
|
||||||
/// Produces a formatted report suitable for display in a terminal,
|
|
||||||
/// with file locations, error codes, and code snippets for context.
|
|
||||||
///
|
|
||||||
/// # Returns
|
|
||||||
/// A formatted string containing all diagnostics with relevant context
|
|
||||||
pub fn to_human_readable(&self) -> String {
|
pub fn to_human_readable(&self) -> String {
|
||||||
let mut output = String::new();
|
let mut output = String::new();
|
||||||
|
|
||||||
|
|
@ -416,18 +359,6 @@ impl DiagnosticCollection {
|
||||||
output
|
output
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Extracts source code lines around a diagnostic location
|
|
||||||
///
|
|
||||||
/// Provides context for a diagnostic by showing the lines of code surrounding
|
|
||||||
/// the issue, with line numbers and a marker pointing to the problematic line.
|
|
||||||
///
|
|
||||||
/// # Parameters
|
|
||||||
/// * `range` - The range in the source code to provide context for
|
|
||||||
/// * `context_lines` - Number of lines to include before and after the range
|
|
||||||
///
|
|
||||||
/// # Returns
|
|
||||||
/// A vector of formatted strings containing the context lines with line numbers,
|
|
||||||
/// or None if the range is invalid
|
|
||||||
fn get_context_lines(&self, range: &Range, context_lines: usize) -> Option<Vec<String>> {
|
fn get_context_lines(&self, range: &Range, context_lines: usize) -> Option<Vec<String>> {
|
||||||
let lines: Vec<&str> = self.source_text.lines().collect();
|
let lines: Vec<&str> = self.source_text.lines().collect();
|
||||||
let start_line = range.start.line as usize;
|
let start_line = range.start.line as usize;
|
||||||
|
|
@ -504,7 +435,9 @@ impl AnalyzerModule for LexicalAnalyzer {
|
||||||
let mut lexer = NftablesLexer::new(source);
|
let mut lexer = NftablesLexer::new(source);
|
||||||
|
|
||||||
match lexer.tokenize() {
|
match lexer.tokenize() {
|
||||||
Ok(_) => {}
|
Ok(_) => {
|
||||||
|
// No lexical errors
|
||||||
|
}
|
||||||
Err(lex_error) => {
|
Err(lex_error) => {
|
||||||
let diagnostic = Self::lex_error_to_diagnostic(&lex_error, source);
|
let diagnostic = Self::lex_error_to_diagnostic(&lex_error, source);
|
||||||
diagnostics.push(diagnostic);
|
diagnostics.push(diagnostic);
|
||||||
|
|
@ -520,17 +453,6 @@ impl AnalyzerModule for LexicalAnalyzer {
|
||||||
}
|
}
|
||||||
|
|
||||||
impl LexicalAnalyzer {
|
impl LexicalAnalyzer {
|
||||||
/// Converts a lexical error to a diagnostic
|
|
||||||
///
|
|
||||||
/// Translates lexer-specific errors into the generic diagnostic format
|
|
||||||
/// with appropriate severity, code, and location information.
|
|
||||||
///
|
|
||||||
/// # Parameters
|
|
||||||
/// * `error` - The lexical error to convert
|
|
||||||
/// * `source` - Source code for position calculation
|
|
||||||
///
|
|
||||||
/// # Returns
|
|
||||||
/// A diagnostic describing the lexical error
|
|
||||||
pub fn lex_error_to_diagnostic(error: &LexError, source: &str) -> Diagnostic {
|
pub fn lex_error_to_diagnostic(error: &LexError, source: &str) -> Diagnostic {
|
||||||
match error {
|
match error {
|
||||||
LexError::InvalidToken { position, text } => {
|
LexError::InvalidToken { position, text } => {
|
||||||
|
|
@ -586,14 +508,18 @@ impl AnalyzerModule for SyntaxAnalyzer {
|
||||||
Ok(tokens) => {
|
Ok(tokens) => {
|
||||||
let mut parser = Parser::new(tokens);
|
let mut parser = Parser::new(tokens);
|
||||||
match parser.parse() {
|
match parser.parse() {
|
||||||
Ok(_) => {}
|
Ok(_) => {
|
||||||
|
// No parse errors
|
||||||
|
}
|
||||||
Err(parse_error) => {
|
Err(parse_error) => {
|
||||||
let diagnostic = Self::parse_error_to_diagnostic(&parse_error, source);
|
let diagnostic = Self::parse_error_to_diagnostic(&parse_error, source);
|
||||||
diagnostics.push(diagnostic);
|
diagnostics.push(diagnostic);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Err(_) => {}
|
Err(_) => {
|
||||||
|
// Already handled in lexical analysis
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
diagnostics
|
diagnostics
|
||||||
|
|
@ -605,17 +531,6 @@ impl AnalyzerModule for SyntaxAnalyzer {
|
||||||
}
|
}
|
||||||
|
|
||||||
impl SyntaxAnalyzer {
|
impl SyntaxAnalyzer {
|
||||||
/// Converts a parse error to a diagnostic
|
|
||||||
///
|
|
||||||
/// Translates parser-specific errors into the generic diagnostic format
|
|
||||||
/// with appropriate severity, code, and meaningful error messages.
|
|
||||||
///
|
|
||||||
/// # Parameters
|
|
||||||
/// * `error` - The parse error to convert
|
|
||||||
/// * `_source` - Source code for position calculation
|
|
||||||
///
|
|
||||||
/// # Returns
|
|
||||||
/// A diagnostic describing the syntax error
|
|
||||||
fn parse_error_to_diagnostic(error: &ParseError, _source: &str) -> Diagnostic {
|
fn parse_error_to_diagnostic(error: &ParseError, _source: &str) -> Diagnostic {
|
||||||
match error {
|
match error {
|
||||||
ParseError::UnexpectedToken {
|
ParseError::UnexpectedToken {
|
||||||
|
|
@ -692,9 +607,12 @@ impl AnalyzerModule for StyleAnalyzer {
|
||||||
fn analyze(&self, source: &str, config: &DiagnosticConfig) -> Vec<Diagnostic> {
|
fn analyze(&self, source: &str, config: &DiagnosticConfig) -> Vec<Diagnostic> {
|
||||||
let mut diagnostics = Vec::new();
|
let mut diagnostics = Vec::new();
|
||||||
|
|
||||||
|
// Only perform style analysis if enabled
|
||||||
if !config.enable_style_warnings {
|
if !config.enable_style_warnings {
|
||||||
return diagnostics;
|
return diagnostics;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Check for missing shebang
|
||||||
if !source.starts_with("#!") {
|
if !source.starts_with("#!") {
|
||||||
let range = Range::new(Position::new(0, 0), Position::new(0, 0));
|
let range = Range::new(Position::new(0, 0), Position::new(0, 0));
|
||||||
let diagnostic = Diagnostic::new(
|
let diagnostic = Diagnostic::new(
|
||||||
|
|
@ -719,11 +637,6 @@ impl AnalyzerModule for StyleAnalyzer {
|
||||||
}
|
}
|
||||||
|
|
||||||
impl StyleAnalyzer {
|
impl StyleAnalyzer {
|
||||||
/// Analyzes line issues in nftables configuration
|
|
||||||
///
|
|
||||||
/// Detects:
|
|
||||||
/// - Lines exceeding maximum length
|
|
||||||
/// - Trailing whitespace at end of lines
|
|
||||||
fn analyze_line_issues(&self, source: &str, config: &DiagnosticConfig) -> Vec<Diagnostic> {
|
fn analyze_line_issues(&self, source: &str, config: &DiagnosticConfig) -> Vec<Diagnostic> {
|
||||||
let mut diagnostics = Vec::new();
|
let mut diagnostics = Vec::new();
|
||||||
|
|
||||||
|
|
@ -767,11 +680,6 @@ impl StyleAnalyzer {
|
||||||
diagnostics
|
diagnostics
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Analyzes whitespace issues in nftables configuration
|
|
||||||
///
|
|
||||||
/// Detects:
|
|
||||||
/// - Too many consecutive empty lines
|
|
||||||
/// - Trailing empty lines at end of file
|
|
||||||
fn analyze_whitespace_issues(
|
fn analyze_whitespace_issues(
|
||||||
&self,
|
&self,
|
||||||
source: &str,
|
source: &str,
|
||||||
|
|
@ -828,12 +736,6 @@ impl StyleAnalyzer {
|
||||||
diagnostics
|
diagnostics
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Analyzes indentation consistency in nftables configuration
|
|
||||||
///
|
|
||||||
/// Checks for:
|
|
||||||
/// - Mixed tabs and spaces in a single line
|
|
||||||
/// - Inconsistent indentation styles across the file
|
|
||||||
/// - Adherence to preferred indentation style if specified
|
|
||||||
fn analyze_indentation(&self, source: &str, config: &DiagnosticConfig) -> Vec<Diagnostic> {
|
fn analyze_indentation(&self, source: &str, config: &DiagnosticConfig) -> Vec<Diagnostic> {
|
||||||
let mut diagnostics = Vec::new();
|
let mut diagnostics = Vec::new();
|
||||||
let mut has_tabs = false;
|
let mut has_tabs = false;
|
||||||
|
|
@ -908,19 +810,23 @@ impl AnalyzerModule for SemanticAnalyzer {
|
||||||
fn analyze(&self, source: &str, config: &DiagnosticConfig) -> Vec<Diagnostic> {
|
fn analyze(&self, source: &str, config: &DiagnosticConfig) -> Vec<Diagnostic> {
|
||||||
let mut diagnostics = Vec::new();
|
let mut diagnostics = Vec::new();
|
||||||
|
|
||||||
|
// Always run semantic validation (syntax/semantic errors)
|
||||||
diagnostics.extend(self.validate_table_declarations(source));
|
diagnostics.extend(self.validate_table_declarations(source));
|
||||||
diagnostics.extend(self.validate_chain_declarations_semantic(source));
|
diagnostics.extend(self.validate_chain_declarations_semantic(source));
|
||||||
diagnostics.extend(self.validate_cidr_notation(source));
|
diagnostics.extend(self.validate_cidr_notation(source));
|
||||||
|
|
||||||
|
// Best practices checks (only if enabled)
|
||||||
if config.enable_best_practices {
|
if config.enable_best_practices {
|
||||||
diagnostics.extend(self.validate_chain_best_practices(source));
|
diagnostics.extend(self.validate_chain_best_practices(source));
|
||||||
diagnostics.extend(self.check_for_redundant_rules(source));
|
diagnostics.extend(self.check_for_redundant_rules(source));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Performance hints (only if enabled)
|
||||||
if config.enable_performance_hints {
|
if config.enable_performance_hints {
|
||||||
diagnostics.extend(self.check_performance_hints(source));
|
diagnostics.extend(self.check_performance_hints(source));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Security warnings (only if enabled)
|
||||||
if config.enable_security_warnings {
|
if config.enable_security_warnings {
|
||||||
diagnostics.extend(self.check_security_warnings(source));
|
diagnostics.extend(self.check_security_warnings(source));
|
||||||
}
|
}
|
||||||
|
|
@ -934,11 +840,6 @@ impl AnalyzerModule for SemanticAnalyzer {
|
||||||
}
|
}
|
||||||
|
|
||||||
impl SemanticAnalyzer {
|
impl SemanticAnalyzer {
|
||||||
/// Validates table declarations in nftables configuration
|
|
||||||
///
|
|
||||||
/// Checks for:
|
|
||||||
/// - Valid table family (ip, ip6, inet, arp, bridge, netdev)
|
|
||||||
/// - Duplicate table names
|
|
||||||
fn validate_table_declarations(&self, source: &str) -> Vec<Diagnostic> {
|
fn validate_table_declarations(&self, source: &str) -> Vec<Diagnostic> {
|
||||||
let mut diagnostics = Vec::new();
|
let mut diagnostics = Vec::new();
|
||||||
let mut seen_tables = HashSet::new();
|
let mut seen_tables = HashSet::new();
|
||||||
|
|
@ -953,8 +854,11 @@ impl SemanticAnalyzer {
|
||||||
let family = parts[1];
|
let family = parts[1];
|
||||||
let name = parts[2];
|
let name = parts[2];
|
||||||
|
|
||||||
|
// Validate family
|
||||||
match family {
|
match family {
|
||||||
"ip" | "ip6" | "inet" | "arp" | "bridge" | "netdev" => {}
|
"ip" | "ip6" | "inet" | "arp" | "bridge" | "netdev" => {
|
||||||
|
// Valid family
|
||||||
|
}
|
||||||
_ => {
|
_ => {
|
||||||
let start_col = line.find(family).unwrap_or(0);
|
let start_col = line.find(family).unwrap_or(0);
|
||||||
let range = Range::new(
|
let range = Range::new(
|
||||||
|
|
@ -996,11 +900,6 @@ impl SemanticAnalyzer {
|
||||||
diagnostics
|
diagnostics
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Validates chain declarations for correct hook types
|
|
||||||
///
|
|
||||||
/// Checks for valid hooks in chain declarations:
|
|
||||||
/// - input, output, forward, prerouting, postrouting
|
|
||||||
/// - Reports unknown hooks as errors
|
|
||||||
fn validate_chain_declarations_semantic(&self, source: &str) -> Vec<Diagnostic> {
|
fn validate_chain_declarations_semantic(&self, source: &str) -> Vec<Diagnostic> {
|
||||||
let mut diagnostics = Vec::new();
|
let mut diagnostics = Vec::new();
|
||||||
|
|
||||||
|
|
@ -1009,6 +908,7 @@ impl SemanticAnalyzer {
|
||||||
let trimmed = line.trim();
|
let trimmed = line.trim();
|
||||||
|
|
||||||
if trimmed.starts_with("type ") && trimmed.contains("hook") {
|
if trimmed.starts_with("type ") && trimmed.contains("hook") {
|
||||||
|
// Validate chain type and hook (semantic validation)
|
||||||
if let Some(hook_pos) = trimmed.find("hook") {
|
if let Some(hook_pos) = trimmed.find("hook") {
|
||||||
let hook_part = &trimmed[hook_pos..];
|
let hook_part = &trimmed[hook_pos..];
|
||||||
let hook_words: Vec<&str> = hook_part.split_whitespace().collect();
|
let hook_words: Vec<&str> = hook_part.split_whitespace().collect();
|
||||||
|
|
@ -1016,7 +916,9 @@ impl SemanticAnalyzer {
|
||||||
if hook_words.len() >= 2 {
|
if hook_words.len() >= 2 {
|
||||||
let hook = hook_words[1];
|
let hook = hook_words[1];
|
||||||
match hook {
|
match hook {
|
||||||
"input" | "output" | "forward" | "prerouting" | "postrouting" => {}
|
"input" | "output" | "forward" | "prerouting" | "postrouting" => {
|
||||||
|
// Valid hook
|
||||||
|
}
|
||||||
_ => {
|
_ => {
|
||||||
let start_col = line.find(hook).unwrap_or(0);
|
let start_col = line.find(hook).unwrap_or(0);
|
||||||
let range = Range::new(
|
let range = Range::new(
|
||||||
|
|
@ -1040,10 +942,6 @@ impl SemanticAnalyzer {
|
||||||
diagnostics
|
diagnostics
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Checks for best practices in nftables chain definitions
|
|
||||||
///
|
|
||||||
/// Verifies that:
|
|
||||||
/// - Filter chains have explicit policies defined
|
|
||||||
fn validate_chain_best_practices(&self, source: &str) -> Vec<Diagnostic> {
|
fn validate_chain_best_practices(&self, source: &str) -> Vec<Diagnostic> {
|
||||||
let mut diagnostics = Vec::new();
|
let mut diagnostics = Vec::new();
|
||||||
|
|
||||||
|
|
@ -1051,6 +949,7 @@ impl SemanticAnalyzer {
|
||||||
let line_num = line_idx as u32;
|
let line_num = line_idx as u32;
|
||||||
let trimmed = line.trim();
|
let trimmed = line.trim();
|
||||||
|
|
||||||
|
// Check for missing policy in filter chains (best practice)
|
||||||
if trimmed.contains("type filter") && !trimmed.contains("policy") {
|
if trimmed.contains("type filter") && !trimmed.contains("policy") {
|
||||||
let range = Range::new(
|
let range = Range::new(
|
||||||
Position::new(line_num, 0),
|
Position::new(line_num, 0),
|
||||||
|
|
@ -1069,11 +968,6 @@ impl SemanticAnalyzer {
|
||||||
diagnostics
|
diagnostics
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Analyzes nftables configuration for performance optimizations
|
|
||||||
///
|
|
||||||
/// Checks for:
|
|
||||||
/// - Large sets without timeouts
|
|
||||||
/// - Rules without counters for monitoring
|
|
||||||
fn check_performance_hints(&self, source: &str) -> Vec<Diagnostic> {
|
fn check_performance_hints(&self, source: &str) -> Vec<Diagnostic> {
|
||||||
let mut diagnostics = Vec::new();
|
let mut diagnostics = Vec::new();
|
||||||
|
|
||||||
|
|
@ -1081,7 +975,9 @@ impl SemanticAnalyzer {
|
||||||
let line_num = line_idx as u32;
|
let line_num = line_idx as u32;
|
||||||
let trimmed = line.trim();
|
let trimmed = line.trim();
|
||||||
|
|
||||||
|
// Check for large sets without timeout (performance hint)
|
||||||
if trimmed.contains("set ") && trimmed.contains("{") && !trimmed.contains("timeout") {
|
if trimmed.contains("set ") && trimmed.contains("{") && !trimmed.contains("timeout") {
|
||||||
|
// Simple heuristic: if set definition is long, suggest timeout
|
||||||
if trimmed.len() > 100 {
|
if trimmed.len() > 100 {
|
||||||
let range = Range::new(
|
let range = Range::new(
|
||||||
Position::new(line_num, 0),
|
Position::new(line_num, 0),
|
||||||
|
|
@ -1119,10 +1015,6 @@ impl SemanticAnalyzer {
|
||||||
diagnostics
|
diagnostics
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Checks for security risks in nftables configuration
|
|
||||||
///
|
|
||||||
/// Identifies:
|
|
||||||
/// - Overly permissive rules that accept traffic from anywhere (0.0.0.0/0 or ::/0)
|
|
||||||
fn check_security_warnings(&self, source: &str) -> Vec<Diagnostic> {
|
fn check_security_warnings(&self, source: &str) -> Vec<Diagnostic> {
|
||||||
let mut diagnostics = Vec::new();
|
let mut diagnostics = Vec::new();
|
||||||
|
|
||||||
|
|
@ -1152,62 +1044,35 @@ impl SemanticAnalyzer {
|
||||||
diagnostics
|
diagnostics
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Validates CIDR notation in nftables configuration
|
|
||||||
///
|
|
||||||
/// Checks for:
|
|
||||||
/// - Valid IP address format (both IPv4 and IPv6)
|
|
||||||
/// - Valid prefix length (0-32 for IPv4, 0-128 for IPv6)
|
|
||||||
/// - Valid numeric prefix format
|
|
||||||
fn validate_cidr_notation(&self, source: &str) -> Vec<Diagnostic> {
|
fn validate_cidr_notation(&self, source: &str) -> Vec<Diagnostic> {
|
||||||
let mut diagnostics = Vec::new();
|
let mut diagnostics = Vec::new();
|
||||||
|
|
||||||
for (line_idx, line) in source.lines().enumerate() {
|
for (line_idx, line) in source.lines().enumerate() {
|
||||||
let line_num = line_idx as u32;
|
let line_num = line_idx as u32;
|
||||||
|
|
||||||
// Look for potential CIDR notation patterns
|
// Simple CIDR validation without regex dependency
|
||||||
let words: Vec<&str> = line.split_whitespace().collect();
|
let words: Vec<&str> = line.split_whitespace().collect();
|
||||||
for word in words {
|
for word in words {
|
||||||
if word.contains('/') && word.chars().any(|c| c.is_ascii_digit()) {
|
if word.contains('/') && word.chars().any(|c| c.is_ascii_digit()) {
|
||||||
if let Some(slash_pos) = word.find('/') {
|
if let Some(slash_pos) = word.find('/') {
|
||||||
let (ip_part, prefix_part) = word.split_at(slash_pos);
|
let (ip_part, prefix_part) = word.split_at(slash_pos);
|
||||||
let prefix_part = &prefix_part[1..];
|
let prefix_part = &prefix_part[1..]; // Remove the '/'
|
||||||
|
|
||||||
match ip_part.parse::<IpAddr>() {
|
// Basic IPv4 validation
|
||||||
Ok(ip_addr) => match prefix_part.parse::<u8>() {
|
let ip_parts: Vec<&str> = ip_part.split('.').collect();
|
||||||
Ok(prefix) => {
|
if ip_parts.len() == 4 {
|
||||||
let max_prefix = match ip_addr {
|
let mut valid_ip = true;
|
||||||
IpAddr::V4(_) => 32,
|
for part in ip_parts {
|
||||||
IpAddr::V6(_) => 128,
|
if part.parse::<u8>().is_err() {
|
||||||
};
|
valid_ip = false;
|
||||||
|
break;
|
||||||
if prefix > max_prefix {
|
|
||||||
if let Some(start_col) = line.find(word) {
|
|
||||||
let range = Range::new(
|
|
||||||
Position::new(line_num, start_col as u32),
|
|
||||||
Position::new(
|
|
||||||
line_num,
|
|
||||||
(start_col + word.len()) as u32,
|
|
||||||
),
|
|
||||||
);
|
|
||||||
let ip_type = match ip_addr {
|
|
||||||
IpAddr::V4(_) => "IPv4",
|
|
||||||
IpAddr::V6(_) => "IPv6",
|
|
||||||
};
|
|
||||||
let diagnostic = Diagnostic::new(
|
|
||||||
range,
|
|
||||||
DiagnosticSeverity::Error,
|
|
||||||
DiagnosticCode::InvalidCidrNotation,
|
|
||||||
format!(
|
|
||||||
"Invalid CIDR prefix length: '{}' (max {} for {})",
|
|
||||||
prefix, max_prefix, ip_type
|
|
||||||
),
|
|
||||||
);
|
|
||||||
diagnostics.push(diagnostic);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
Err(_) => {
|
}
|
||||||
if !prefix_part.is_empty() {
|
|
||||||
|
// Validate prefix length
|
||||||
|
if valid_ip {
|
||||||
|
if let Ok(prefix) = prefix_part.parse::<u8>() {
|
||||||
|
if prefix > 32 {
|
||||||
if let Some(start_col) = line.find(word) {
|
if let Some(start_col) = line.find(word) {
|
||||||
let range = Range::new(
|
let range = Range::new(
|
||||||
Position::new(line_num, start_col as u32),
|
Position::new(line_num, start_col as u32),
|
||||||
|
|
@ -1221,23 +1086,14 @@ impl SemanticAnalyzer {
|
||||||
DiagnosticSeverity::Error,
|
DiagnosticSeverity::Error,
|
||||||
DiagnosticCode::InvalidCidrNotation,
|
DiagnosticCode::InvalidCidrNotation,
|
||||||
format!(
|
format!(
|
||||||
"Invalid CIDR prefix: '{}' must be a number",
|
"Invalid CIDR prefix length: '{}' (max 32 for IPv4)",
|
||||||
prefix_part
|
prefix
|
||||||
),
|
),
|
||||||
);
|
);
|
||||||
diagnostics.push(diagnostic);
|
diagnostics.push(diagnostic);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
} else if !prefix_part.is_empty() {
|
||||||
},
|
|
||||||
Err(_) => {
|
|
||||||
if (ip_part.contains('.')
|
|
||||||
&& ip_part.chars().any(|c| c.is_ascii_digit()))
|
|
||||||
|| (ip_part.contains(':')
|
|
||||||
&& ip_part
|
|
||||||
.chars()
|
|
||||||
.any(|c| c.is_ascii_digit() || c.is_ascii_hexdigit()))
|
|
||||||
{
|
|
||||||
if let Some(start_col) = line.find(word) {
|
if let Some(start_col) = line.find(word) {
|
||||||
let range = Range::new(
|
let range = Range::new(
|
||||||
Position::new(line_num, start_col as u32),
|
Position::new(line_num, start_col as u32),
|
||||||
|
|
@ -1250,10 +1106,7 @@ impl SemanticAnalyzer {
|
||||||
range,
|
range,
|
||||||
DiagnosticSeverity::Error,
|
DiagnosticSeverity::Error,
|
||||||
DiagnosticCode::InvalidCidrNotation,
|
DiagnosticCode::InvalidCidrNotation,
|
||||||
format!(
|
format!("Invalid CIDR notation: '{}'", word),
|
||||||
"Invalid IP address in CIDR notation: '{}'",
|
|
||||||
ip_part
|
|
||||||
),
|
|
||||||
);
|
);
|
||||||
diagnostics.push(diagnostic);
|
diagnostics.push(diagnostic);
|
||||||
}
|
}
|
||||||
|
|
@ -1268,10 +1121,6 @@ impl SemanticAnalyzer {
|
||||||
diagnostics
|
diagnostics
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Identifies redundant rules in nftables configuration
|
|
||||||
///
|
|
||||||
/// Detects:
|
|
||||||
/// - Duplicate accept/drop/reject rules
|
|
||||||
fn check_for_redundant_rules(&self, source: &str) -> Vec<Diagnostic> {
|
fn check_for_redundant_rules(&self, source: &str) -> Vec<Diagnostic> {
|
||||||
let mut diagnostics = Vec::new();
|
let mut diagnostics = Vec::new();
|
||||||
let mut seen_rules = HashSet::new();
|
let mut seen_rules = HashSet::new();
|
||||||
|
|
@ -1280,10 +1129,12 @@ impl SemanticAnalyzer {
|
||||||
let line_num = line_idx as u32;
|
let line_num = line_idx as u32;
|
||||||
let trimmed = line.trim();
|
let trimmed = line.trim();
|
||||||
|
|
||||||
|
// Simple rule detection (lines that contain actions)
|
||||||
if trimmed.contains(" accept")
|
if trimmed.contains(" accept")
|
||||||
|| trimmed.contains(" drop")
|
|| trimmed.contains(" drop")
|
||||||
|| trimmed.contains(" reject")
|
|| trimmed.contains(" reject")
|
||||||
{
|
{
|
||||||
|
// Check if rule already exists in the HashSet
|
||||||
if seen_rules.contains(trimmed) {
|
if seen_rules.contains(trimmed) {
|
||||||
let range = Range::new(
|
let range = Range::new(
|
||||||
Position::new(line_num, 0),
|
Position::new(line_num, 0),
|
||||||
|
|
@ -1297,6 +1148,7 @@ impl SemanticAnalyzer {
|
||||||
);
|
);
|
||||||
diagnostics.push(diagnostic);
|
diagnostics.push(diagnostic);
|
||||||
} else {
|
} else {
|
||||||
|
// Add the rule to the HashSet if it's not a duplicate
|
||||||
seen_rules.insert(trimmed.to_string());
|
seen_rules.insert(trimmed.to_string());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -1312,25 +1164,11 @@ pub struct DiagnosticAnalyzer {
|
||||||
}
|
}
|
||||||
|
|
||||||
impl DiagnosticAnalyzer {
|
impl DiagnosticAnalyzer {
|
||||||
/// Creates a new diagnostic analyzer with the specified configuration
|
|
||||||
///
|
|
||||||
/// # Parameters
|
|
||||||
/// * `config` - Configuration settings for the analyzer
|
|
||||||
pub fn new(config: DiagnosticConfig) -> Self {
|
pub fn new(config: DiagnosticConfig) -> Self {
|
||||||
Self { config }
|
Self { config }
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Analyzes source code with all standard analysis modules
|
/// Analyze source code with all available modules
|
||||||
///
|
|
||||||
/// This is the main entry point for a complete analysis of nftables configurations.
|
|
||||||
/// It runs lexical, syntax, style, and semantic analysis on the provided source.
|
|
||||||
///
|
|
||||||
/// # Parameters
|
|
||||||
/// * `source` - Source code to analyze
|
|
||||||
/// * `file_path` - Path to the file being analyzed
|
|
||||||
///
|
|
||||||
/// # Returns
|
|
||||||
/// A collection of all diagnostics found in the source
|
|
||||||
pub fn analyze(&self, source: &str, file_path: &str) -> DiagnosticCollection {
|
pub fn analyze(&self, source: &str, file_path: &str) -> DiagnosticCollection {
|
||||||
self.analyze_with_modules(
|
self.analyze_with_modules(
|
||||||
source,
|
source,
|
||||||
|
|
@ -1339,17 +1177,7 @@ impl DiagnosticAnalyzer {
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Analyzes source code with specific analysis modules
|
/// Analyze source code with specific modules
|
||||||
///
|
|
||||||
/// Allows running only selected analysis modules for more targeted diagnostics.
|
|
||||||
///
|
|
||||||
/// # Parameters
|
|
||||||
/// * `source` - Source code to analyze
|
|
||||||
/// * `file_path` - Path to the file being analyzed
|
|
||||||
/// * `module_names` - Names of modules to run ("lexical", "syntax", "style", "semantic")
|
|
||||||
///
|
|
||||||
/// # Returns
|
|
||||||
/// A collection of diagnostics from the selected modules
|
|
||||||
pub fn analyze_with_modules(
|
pub fn analyze_with_modules(
|
||||||
&self,
|
&self,
|
||||||
source: &str,
|
source: &str,
|
||||||
|
|
@ -1430,193 +1258,4 @@ mod tests {
|
||||||
.any(|d| d.code == DiagnosticCode::TrailingWhitespace)
|
.any(|d| d.code == DiagnosticCode::TrailingWhitespace)
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_cidr_validation_ipv4_valid() {
|
|
||||||
let analyzer = SemanticAnalyzer;
|
|
||||||
let source = "ip saddr 192.168.1.0/24 accept";
|
|
||||||
let diagnostics = analyzer.validate_cidr_notation(source);
|
|
||||||
|
|
||||||
// Verify valid IPv4 CIDR notation doesn't produce errors
|
|
||||||
assert!(
|
|
||||||
!diagnostics
|
|
||||||
.iter()
|
|
||||||
.any(|d| d.code == DiagnosticCode::InvalidCidrNotation)
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_cidr_validation_ipv4_invalid_prefix() {
|
|
||||||
let analyzer = SemanticAnalyzer;
|
|
||||||
let source = "ip saddr 192.168.1.0/33 accept";
|
|
||||||
let diagnostics = analyzer.validate_cidr_notation(source);
|
|
||||||
|
|
||||||
// Verify detection of IPv4 prefix exceeding max (32)
|
|
||||||
assert!(diagnostics.iter().any(|d| {
|
|
||||||
d.code == DiagnosticCode::InvalidCidrNotation
|
|
||||||
&& d.message
|
|
||||||
.contains("Invalid CIDR prefix length: '33' (max 32 for IPv4)")
|
|
||||||
}));
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_cidr_validation_ipv6_valid() {
|
|
||||||
let analyzer = SemanticAnalyzer;
|
|
||||||
let source = "ip6 saddr 2001:db8::/32 accept";
|
|
||||||
let diagnostics = analyzer.validate_cidr_notation(source);
|
|
||||||
|
|
||||||
// Verify valid IPv6 CIDR notation doesn't produce errors
|
|
||||||
assert!(
|
|
||||||
!diagnostics
|
|
||||||
.iter()
|
|
||||||
.any(|d| d.code == DiagnosticCode::InvalidCidrNotation)
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_cidr_validation_ipv6_invalid_prefix() {
|
|
||||||
let analyzer = SemanticAnalyzer;
|
|
||||||
let source = "ip6 saddr 2001:db8::/129 accept";
|
|
||||||
let diagnostics = analyzer.validate_cidr_notation(source);
|
|
||||||
|
|
||||||
// Verify detection of IPv6 prefix exceeding max (128)
|
|
||||||
assert!(diagnostics.iter().any(|d| {
|
|
||||||
d.code == DiagnosticCode::InvalidCidrNotation
|
|
||||||
&& d.message
|
|
||||||
.contains("Invalid CIDR prefix length: '129' (max 128 for IPv6)")
|
|
||||||
}));
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_cidr_validation_invalid_ip_address() {
|
|
||||||
let analyzer = SemanticAnalyzer;
|
|
||||||
let source = "ip saddr 192.168.1.256/24 accept";
|
|
||||||
let diagnostics = analyzer.validate_cidr_notation(source);
|
|
||||||
|
|
||||||
// Verify detection of invalid IP address format
|
|
||||||
assert!(diagnostics.iter().any(|d| {
|
|
||||||
d.code == DiagnosticCode::InvalidCidrNotation
|
|
||||||
&& d.message
|
|
||||||
.contains("Invalid IP address in CIDR notation: '192.168.1.256'")
|
|
||||||
}));
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_cidr_validation_invalid_prefix_format() {
|
|
||||||
let analyzer = SemanticAnalyzer;
|
|
||||||
let source = "ip saddr 192.168.1.0/abc accept";
|
|
||||||
let diagnostics = analyzer.validate_cidr_notation(source);
|
|
||||||
|
|
||||||
// Verify detection of non-numeric CIDR prefix
|
|
||||||
assert!(diagnostics.iter().any(|d| {
|
|
||||||
d.code == DiagnosticCode::InvalidCidrNotation
|
|
||||||
&& d.message
|
|
||||||
.contains("Invalid CIDR prefix: 'abc' must be a number")
|
|
||||||
}));
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_cidr_validation_ipv6_compressed_notation() {
|
|
||||||
let analyzer = SemanticAnalyzer;
|
|
||||||
let source = "ip6 saddr ::1/128 accept";
|
|
||||||
let diagnostics = analyzer.validate_cidr_notation(source);
|
|
||||||
|
|
||||||
// Verify compressed IPv6 notation is properly handled
|
|
||||||
assert!(
|
|
||||||
!diagnostics
|
|
||||||
.iter()
|
|
||||||
.any(|d| d.code == DiagnosticCode::InvalidCidrNotation)
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_cidr_validation_multiple_cidrs() {
|
|
||||||
let analyzer = SemanticAnalyzer;
|
|
||||||
let source = r#"
|
|
||||||
ip saddr 192.168.1.0/24 accept
|
|
||||||
ip6 saddr 2001:db8::/32 accept
|
|
||||||
ip saddr 10.0.0.0/8 drop
|
|
||||||
ip6 saddr fe80::/64 accept
|
|
||||||
"#;
|
|
||||||
let diagnostics = analyzer.validate_cidr_notation(source);
|
|
||||||
|
|
||||||
// Verify multiple valid CIDRs are properly parsed
|
|
||||||
assert!(
|
|
||||||
!diagnostics
|
|
||||||
.iter()
|
|
||||||
.any(|d| d.code == DiagnosticCode::InvalidCidrNotation)
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_cidr_validation_mixed_valid_invalid() {
|
|
||||||
let analyzer = SemanticAnalyzer;
|
|
||||||
let source = r#"
|
|
||||||
ip saddr 192.168.1.0/24 accept
|
|
||||||
ip saddr 192.168.1.0/33 drop
|
|
||||||
ip6 saddr 2001:db8::/32 accept
|
|
||||||
ip6 saddr 2001:db8::/129 drop
|
|
||||||
"#;
|
|
||||||
let diagnostics = analyzer.validate_cidr_notation(source);
|
|
||||||
|
|
||||||
// Verify detection of specific invalid prefixes in a mixed content
|
|
||||||
let cidr_errors: Vec<_> = diagnostics
|
|
||||||
.iter()
|
|
||||||
.filter(|d| d.code == DiagnosticCode::InvalidCidrNotation)
|
|
||||||
.collect();
|
|
||||||
assert_eq!(cidr_errors.len(), 2);
|
|
||||||
|
|
||||||
// Check for IPv4 error
|
|
||||||
assert!(
|
|
||||||
cidr_errors
|
|
||||||
.iter()
|
|
||||||
.any(|d| d.message.contains("33") && d.message.contains("IPv4"))
|
|
||||||
);
|
|
||||||
// Check for IPv6 error
|
|
||||||
assert!(
|
|
||||||
cidr_errors
|
|
||||||
.iter()
|
|
||||||
.any(|d| d.message.contains("129") && d.message.contains("IPv6"))
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_cidr_validation_edge_cases() {
|
|
||||||
let analyzer = SemanticAnalyzer;
|
|
||||||
|
|
||||||
// Test edge case: maximum allowed prefix lengths
|
|
||||||
let source_ipv4_max = "ip saddr 192.168.1.0/32 accept";
|
|
||||||
let diagnostics_ipv4 = analyzer.validate_cidr_notation(source_ipv4_max);
|
|
||||||
assert!(
|
|
||||||
!diagnostics_ipv4
|
|
||||||
.iter()
|
|
||||||
.any(|d| d.code == DiagnosticCode::InvalidCidrNotation)
|
|
||||||
);
|
|
||||||
|
|
||||||
let source_ipv6_max = "ip6 saddr 2001:db8::/128 accept";
|
|
||||||
let diagnostics_ipv6 = analyzer.validate_cidr_notation(source_ipv6_max);
|
|
||||||
assert!(
|
|
||||||
!diagnostics_ipv6
|
|
||||||
.iter()
|
|
||||||
.any(|d| d.code == DiagnosticCode::InvalidCidrNotation)
|
|
||||||
);
|
|
||||||
|
|
||||||
// Test edge case: minimum prefix (catch-all address)
|
|
||||||
let source_zero = "ip saddr 0.0.0.0/0 accept";
|
|
||||||
let diagnostics_zero = analyzer.validate_cidr_notation(source_zero);
|
|
||||||
assert!(
|
|
||||||
!diagnostics_zero
|
|
||||||
.iter()
|
|
||||||
.any(|d| d.code == DiagnosticCode::InvalidCidrNotation)
|
|
||||||
);
|
|
||||||
|
|
||||||
// Test edge case: IPv6 minimum prefix
|
|
||||||
let source_ipv6_zero = "ip6 saddr ::/0 accept";
|
|
||||||
let diagnostics_ipv6_zero = analyzer.validate_cidr_notation(source_ipv6_zero);
|
|
||||||
assert!(
|
|
||||||
!diagnostics_ipv6_zero
|
|
||||||
.iter()
|
|
||||||
.any(|d| d.code == DiagnosticCode::InvalidCidrNotation)
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -506,6 +506,7 @@ fn convert_parse_error_to_formatter_error(
|
||||||
suggestion: None,
|
suggestion: None,
|
||||||
},
|
},
|
||||||
ParseError::MissingToken { expected } => {
|
ParseError::MissingToken { expected } => {
|
||||||
|
// Try to find current position from last token
|
||||||
let (line, column) = if let Some(last_token) = tokens.last() {
|
let (line, column) = if let Some(last_token) = tokens.last() {
|
||||||
position_from_range(&last_token.range, source)
|
position_from_range(&last_token.range, source)
|
||||||
} else {
|
} else {
|
||||||
|
|
@ -519,6 +520,7 @@ fn convert_parse_error_to_formatter_error(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
ParseError::InvalidExpression { message } => {
|
ParseError::InvalidExpression { message } => {
|
||||||
|
// Try to find the current token position
|
||||||
let (line, column) = find_current_parse_position(tokens, source);
|
let (line, column) = find_current_parse_position(tokens, source);
|
||||||
FormatterError::SyntaxError {
|
FormatterError::SyntaxError {
|
||||||
line,
|
line,
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue