Compare commits

..

No commits in common. "diagnostics" and "main" have entirely different histories.

12 changed files with 291 additions and 3498 deletions

View file

@ -1,27 +0,0 @@
[test-groups]
nff = { max-threads = 1 }
[profile.default]
# "retries" defines the number of times a test should be retried. If set to a
# non-zero value, tests that succeed on a subsequent attempt will be marked as
# flaky. Can be overridden through the `--retries` option.
retries = 2
# This will display all of fail, retry, slow
# see https://nexte.st/book/other-options.html?highlight=failure-output#--status-level-and---final-status-level
status-level = "skip"
# Treat a test that takes longer than this period as slow, and print a message.
# Given a non-zero positive integer, shutdown the tests when the number periods
# have passed.
slow-timeout = { period = "30s", terminate-after = 4 }
# * "immediate-final": output failures as soon as they happen and at the end of
# the test run
failure-output = "immediate-final"
# Do not cancel the test run on the first failure.
fail-fast = false
test-threads = 2

149
Cargo.lock generated
View file

@ -2,15 +2,6 @@
# It is not intended for manual editing. # It is not intended for manual editing.
version = 4 version = 4
[[package]]
name = "aho-corasick"
version = "1.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916"
dependencies = [
"memchr",
]
[[package]] [[package]]
name = "anstream" name = "anstream"
version = "0.6.13" version = "0.6.13"
@ -176,12 +167,6 @@ dependencies = [
"byteorder", "byteorder",
] ]
[[package]]
name = "glob"
version = "0.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a8d1add55171497b4705a648c6b583acafb01d58050a51727785f0b2c8e0a2b2"
[[package]] [[package]]
name = "hashbrown" name = "hashbrown"
version = "0.15.3" version = "0.15.3"
@ -204,12 +189,6 @@ dependencies = [
"hashbrown", "hashbrown",
] ]
[[package]]
name = "itoa"
version = "1.0.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c"
[[package]] [[package]]
name = "lazy_static" name = "lazy_static"
version = "1.5.0" version = "1.5.0"
@ -266,12 +245,6 @@ dependencies = [
"logos-codegen", "logos-codegen",
] ]
[[package]]
name = "memchr"
version = "2.7.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3"
[[package]] [[package]]
name = "nff" name = "nff"
version = "0.1.0" version = "0.1.0"
@ -279,37 +252,11 @@ dependencies = [
"anyhow", "anyhow",
"clap", "clap",
"cstree", "cstree",
"glob",
"logos", "logos",
"num_enum",
"regex",
"serde",
"serde_json",
"text-size", "text-size",
"thiserror", "thiserror",
] ]
[[package]]
name = "num_enum"
version = "0.7.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4e613fc340b2220f734a8595782c551f1250e969d87d3be1ae0579e8d4065179"
dependencies = [
"num_enum_derive",
]
[[package]]
name = "num_enum_derive"
version = "0.7.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "af1844ef2428cc3e1cb900be36181049ef3d3193c63e43026cfe202983b27a56"
dependencies = [
"proc-macro-crate",
"proc-macro2",
"quote",
"syn",
]
[[package]] [[package]]
name = "parking_lot" name = "parking_lot"
version = "0.12.3" version = "0.12.3"
@ -333,15 +280,6 @@ dependencies = [
"windows-targets", "windows-targets",
] ]
[[package]]
name = "proc-macro-crate"
version = "3.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "edce586971a4dfaa28950c6f18ed55e0406c1ab88bbce2c6f6293a7aaba73d35"
dependencies = [
"toml_edit",
]
[[package]] [[package]]
name = "proc-macro2" name = "proc-macro2"
version = "1.0.95" version = "1.0.95"
@ -369,29 +307,6 @@ dependencies = [
"bitflags", "bitflags",
] ]
[[package]]
name = "regex"
version = "1.11.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191"
dependencies = [
"aho-corasick",
"memchr",
"regex-automata",
"regex-syntax",
]
[[package]]
name = "regex-automata"
version = "0.4.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "809e8dc61f6de73b46c85f4c96486310fe304c434cfa43669d7b40f711150908"
dependencies = [
"aho-corasick",
"memchr",
"regex-syntax",
]
[[package]] [[package]]
name = "regex-syntax" name = "regex-syntax"
version = "0.8.5" version = "0.8.5"
@ -407,12 +322,6 @@ dependencies = [
"semver", "semver",
] ]
[[package]]
name = "ryu"
version = "1.0.20"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f"
[[package]] [[package]]
name = "scopeguard" name = "scopeguard"
version = "1.2.0" version = "1.2.0"
@ -425,38 +334,6 @@ version = "1.0.26"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "56e6fa9c48d24d85fb3de5ad847117517440f6beceb7798af16b4a87d616b8d0" checksum = "56e6fa9c48d24d85fb3de5ad847117517440f6beceb7798af16b4a87d616b8d0"
[[package]]
name = "serde"
version = "1.0.219"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5f0e2c6ed6606019b4e29e69dbaba95b11854410e5347d525002456dbbb786b6"
dependencies = [
"serde_derive",
]
[[package]]
name = "serde_derive"
version = "1.0.219"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "serde_json"
version = "1.0.140"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "20068b6e96dc6c9bd23e01df8827e6c7e1f2fddd43c21810382803c136b99373"
dependencies = [
"itoa",
"memchr",
"ryu",
"serde",
]
[[package]] [[package]]
name = "smallvec" name = "smallvec"
version = "1.15.0" version = "1.15.0"
@ -518,23 +395,6 @@ dependencies = [
"syn", "syn",
] ]
[[package]]
name = "toml_datetime"
version = "0.6.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3da5db5a963e24bc68be8b17b6fa82814bb22ee8660f192bb182771d498f09a3"
[[package]]
name = "toml_edit"
version = "0.22.26"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "310068873db2c5b3e7659d2cc35d21855dbafa50d1ce336397c666e3cb08137e"
dependencies = [
"indexmap",
"toml_datetime",
"winnow",
]
[[package]] [[package]]
name = "triomphe" name = "triomphe"
version = "0.1.14" version = "0.1.14"
@ -621,12 +481,3 @@ name = "windows_x86_64_msvc"
version = "0.52.4" version = "0.52.4"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "32b752e52a2da0ddfbdbcc6fceadfeede4c939ed16d13e648833a61dfb611ed8" checksum = "32b752e52a2da0ddfbdbcc6fceadfeede4c939ed16d13e648833a61dfb611ed8"
[[package]]
name = "winnow"
version = "0.7.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c06928c8748d81b05c9be96aad92e1b6ff01833332f281e8cfca3be4b35fc9ec"
dependencies = [
"memchr",
]

View file

@ -12,8 +12,3 @@ thiserror = "2.0"
logos = "0.15" logos = "0.15"
cstree = "0.12" cstree = "0.12"
text-size = "1.1" text-size = "1.1"
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
regex = "1.11"
glob = "0.3"
num_enum = "0.7"

451
README.md
View file

@ -1,33 +1,27 @@
# nff # nff
This is a high performance, low overhead configuration parser for nftables, This is a high performance configuration parser and written in Rust. The goal is
written in Rust. Syntax-aware parsing allows nff to provide a complete formatter to receive possibly jumbled up nftables rule files, and output ✨ pretty ✨
_and_ a linter for nftables. With the formatter the goal is to receive possibly human readable output in return. The main emphasis is on the syntax-aware
jumbled up nftables rule files, and output ✨ pretty ✨ human readable output in formatting with comprehensive grammar support. It is a future goal to allow
return. The linter on another hand, will demonstrate possible syntax, editors to hook into nff in order to format your rulesets directly from your
performance or stylistic errors. editor, or as a diagnostics source.
The main emphasis, however, is on the syntax-aware formatting with comprehensive
grammar support.
## Features ## Features
> [!NOTE] nff is in its early stages of development. While _most_ of the syntax is
> nff is in its early stages of development. While _most_ of the syntax is supported, I cannot guarantee that _everything_ is supported just yet.
> supported, I cannot guarantee that _everything_ is supported just yet.
### Core Functionality ### Core Functionality
Basic functionality of nff that most users will be interested in
- **Syntax-aware formatting** - Deep understanding of nftables grammar with - **Syntax-aware formatting** - Deep understanding of nftables grammar with
semantic preservation semantic preservation
- **Multi-family support** - Handles `inet`, `ip`, `ip6`, `arp`, `bridge`, and - **Multi-family support** - Handles `inet`, `ip`, `ip6`, `arp`, `bridge`, and
`netdev` table families `netdev` table families
- **CIDR notation** - Proper handling of network addresses (`192.168.1.0/24`)
- **Chain properties** - Hooks, priorities (including negative), policies,
device bindings
- **Flexible indentation** - Configurable tabs/spaces with custom depth - **Flexible indentation** - Configurable tabs/spaces with custom depth
- **CIDR notation** - Proper handling of network addresses (`192.168.1.0/24`)
- **Chain properties** - Hooks, priorities (including negative), policies,
device bindings
### Advanced Features ### Advanced Features
@ -36,87 +30,26 @@ Basic functionality of nff that most users will be interested in
- **Validation** - Syntax checking with precise error locations - **Validation** - Syntax checking with precise error locations
- **Optimization** - Configurable empty line reduction and whitespace control - **Optimization** - Configurable empty line reduction and whitespace control
### Diagnostics & Analysis
- **Comprehensive diagnostics** - Syntax, semantic, style, and best practice
analysis
- **Modular analysis** - Run specific diagnostic modules (`lexical`, `syntax`,
`style`, `semantic`)
- **LSP-compatible output** - JSON format for editor integration
- **Human-readable reports** - Detailed error messages with context and location
information
- **Configurable severity** - Control which diagnostic categories to
enable/disable
## Usage ## Usage
### Formatting
```bash ```bash
# Format a specific file (in place) # Basic formatting
nff format /etc/nftables.conf nff -f /etc/nftables.conf
# Format all .nft files in current directory (in place)
nff format
# Custom indentation (4 spaces) # Custom indentation (4 spaces)
nff format config.nft --indent spaces --spaces 4 nff -f config.nft --indent spaces --spaces 4
# Optimize formatting (reduce empty lines) # Optimize formatting (reduce empty lines)
nff format config.nft --optimize nff -f config.nft --optimize
# Output to stdout instead of modifying files # Output to file
nff format config.nft --stdout nff -f config.nft -o formatted.nft
# Syntax validation only # Syntax validation only
nff format config.nft --check nff -f config.nft --check
# Debug output for development (or debugging) # Debug output for development (or debugging)
nff format config.nft --debug nff -f config.nft --debug
```
### Linting and Diagnostics
```bash
# Run comprehensive diagnostics on a file
nff lint /etc/nftables.conf
# Lint all .nft files in current directory
nff lint
# JSON output for editor integration
nff lint config.nft --json
# Run specific diagnostic modules
nff lint config.nft --modules syntax,style
# Available modules: lexical, syntax, style, semantic
nff lint config.nft --modules semantic
# Configure diagnostic settings (note: flags are enabled by default)
nff lint config.nft --style-warnings=false --best-practices=false
# Debug output with diagnostics
nff lint config.nft --debug
```
### Parsing and CST Inspection
```bash
# Parse and display CST structure for debugging
nff parse /etc/nftables.conf
# Show tree structure with indentation
nff parse config.nft --tree
# Show detailed node information
nff parse config.nft --verbose
# Combined tree and verbose output
nff parse config.nft --tree --verbose
# Debug output with tokens and CST validation
nff parse config.nft --debug
``` ```
## Architecture ## Architecture
@ -136,248 +69,11 @@ graph TD
AST --> Formatter AST --> Formatter
Formatter --> Output Formatter --> Output
CST --> Formatter CST --> Formatter
Input --> Diagnostics[Diagnostic System]
Diagnostics --> LexAnalyzer[Lexical Analyzer]
Diagnostics --> SyntaxAnalyzer[Syntax Analyzer]
Diagnostics --> StyleAnalyzer[Style Analyzer]
Diagnostics --> SemanticAnalyzer[Semantic Analyzer]
LexAnalyzer --> DiagOutput[JSON/Human Output]
SyntaxAnalyzer --> DiagOutput
StyleAnalyzer --> DiagOutput
SemanticAnalyzer --> DiagOutput
``` ```
## Installation ## Installation
Recommended way of installing nff is to use Nix. Add `nff` to your flake inputs, Recommended way of installing nff is to use Nix.
and add the package to your `environment.systemPackages`. Alternatively, on
non-NixOS systems, it is possible to use `nix profile install` to install nff.
### Editor Integration
> [!TIP]
> Your editor not here? Open an issue. I can only add support for editors I use
> but pull requests documenting alternative editor setups are appreciated!
#### Neovim Setup
nff can be integrated into Neovim as a diagnostics source for nftables files.
Here are several setup approaches:
##### Option 1: Using none-ls
none-ls is the most common method of adding diagnostics sources in Neovim. While
I recommend using nvim-lint for its simplicity, below instructions document how
to set up null-ls.
```lua
local null_ls = require("null-ls")
null_ls.setup({
sources = {
-- nftables diagnostics
null_ls.builtins.diagnostics.nff.with({
command = "nff",
args = { "lint", "$FILENAME", "--json" },
format = "json",
check_exit_code = false,
filetypes = { "nftables" },
}),
-- nftables formatting
null_ls.builtins.formatting.nff.with({
command = "nff",
args = { "format", "$FILENAME", "--stdout" },
filetypes = { "nftables" },
}),
},
})
```
##### Option 2: Using nvim-lint (recommended)
Recommended way of adding nff as a diagnostics source in Neovim. Simple, low
overhead and not as error-prone as null-ls.
```lua
-- ~/.config/nvim/lua/config/lint.lua
require('lint').linters.nff = {
cmd = 'nff',
stdin = false,
args = { 'lint', '%s', '--json' },
stream = 'stdout',
ignore_exitcode = true,
parser = function(output)
local diagnostics = {}
local ok, decoded = pcall(vim.fn.json_decode, output)
if not ok or not decoded.diagnostics then
return diagnostics
end
for _, diagnostic in ipairs(decoded.diagnostics) do
table.insert(diagnostics, {
lnum = diagnostic.range.start.line,
col = diagnostic.range.start.character,
severity = diagnostic.severity == "Error" and vim.diagnostic.severity.ERROR or vim.diagnostic.severity.WARN,
message = diagnostic.message,
source = "nff",
code = diagnostic.code,
})
end
return diagnostics
end,
}
-- Setup linting for nftables files
vim.api.nvim_create_autocmd({ "BufEnter", "BufWritePost" }, {
pattern = "*.nft",
callback = function()
require("lint").try_lint("nff")
end,
})
```
##### Option 3: Custom Lua Function
Alternatively, if you don't want to use plugins, consider a setup such as this
one to do it without any reliance on plugins:
```lua
-- ~/.config/nvim/lua/nff.lua
local M = {}
function M.lint_nftables()
local filename = vim.fn.expand('%:p')
if vim.bo.filetype ~= 'nftables' then
return
end
local cmd = { 'nff', 'lint', filename, '--json' }
vim.fn.jobstart(cmd, {
stdout_buffered = true,
on_stdout = function(_, data)
if data then
local output = table.concat(data, '\n')
local ok, result = pcall(vim.fn.json_decode, output)
if ok and result.diagnostics then
local diagnostics = {}
for _, diag in ipairs(result.diagnostics) do
table.insert(diagnostics, {
lnum = diag.range.start.line,
col = diag.range.start.character,
severity = diag.severity == "Error" and vim.diagnostic.severity.ERROR or vim.diagnostic.severity.WARN,
message = diag.message,
source = "nff",
})
end
vim.diagnostic.set(vim.api.nvim_create_namespace('nff'), 0, diagnostics)
end
end
end,
})
end
-- Auto-run on save
vim.api.nvim_create_autocmd("BufWritePost", {
pattern = "*.nft",
callback = M.lint_nftables,
})
return M
```
## Diagnostic Categories
nff provides comprehensive analysis across multiple categories:
### Syntax Errors
- Parse errors with precise location information
- Missing tokens (semicolons, braces, etc.)
- Unexpected tokens
- Unterminated strings
- Invalid numbers
### Semantic Validation
- Unknown table families (`inet`, `ip`, `ip6`, etc.)
- Invalid chain types and hooks
- Incorrect priority values
- Missing chain policies
- Duplicate table/chain names
- Invalid CIDR notation
- Invalid port ranges
### Style Warnings
- Missing shebang line
- Inconsistent indentation (mixed tabs/spaces)
- Trailing whitespace
- Lines exceeding maximum length (configurable)
- Excessive empty lines
- Preferred syntax alternatives
### Best Practices
- Chains without explicit policies
- Rules without actions
- Overly permissive rules
- Duplicate or conflicting rules
- Unused variables or sets
- Deprecated syntax usage
- Missing documentation
- Security risks
### Performance Hints
- Inefficient rule ordering
- Large sets without timeouts
- Missing counters where beneficial
## JSON Output Format
When using `--json`, nff outputs LSP-compatible diagnostics:
```json
{
"diagnostics": [
{
"range": {
"start": { "line": 5, "character": 10 },
"end": { "line": 5, "character": 20 }
},
"severity": "Error",
"code": "NFT001",
"source": "nff",
"message": "Expected ';' after policy",
"related_information": [],
"code_actions": [],
"tags": []
}
],
"file_path": "config.nft",
"source_text": "..."
}
```
### Diagnostic Codes
nff uses structured diagnostic codes for categorization:
- **NFT001-NFT099**: Syntax errors
- **NFT101-NFT199**: Semantic errors
- **NFT201-NFT299**: Style warnings
- **NFT301-NFT399**: Best practice recommendations
- **NFT401-NFT499**: Performance hints
- **NFT501-NFT599**: Formatting issues
- **NFT601-NFT699**: nftables-specific validations
## Development ## Development
@ -500,95 +196,8 @@ table inet protection {
} }
``` ```
## Diagnostics Examples
### Error Detection
Input file with issues:
```nftables
table inet firewall {
chain input {
type filter hook input priority 100
tcp dport 22 accept
}
}
```
Human-readable output:
```
Found 2 issues in config.nft:
config.nft:3:37: error: Expected ';' after policy [NFT001]
1: table inet firewall {
2: chain input {
→ 3: type filter hook input priority 100
4: tcp dport 22 accept
5: }
config.nft:3:1: warning: Filter chain should have an explicit policy [NFT301]
1: table inet firewall {
2: chain input {
→ 3: type filter hook input priority 100
4: tcp dport 22 accept
5: }
```
JSON output:
```json
{
"diagnostics": [
{
"range": {
"start": { "line": 2, "character": 37 },
"end": { "line": 2, "character": 37 }
},
"severity": "Error",
"code": "NFT001",
"source": "nff",
"message": "Expected ';' after policy"
},
{
"range": {
"start": { "line": 2, "character": 0 },
"end": { "line": 2, "character": 37 }
},
"severity": "Warning",
"code": "NFT301",
"source": "nff",
"message": "Filter chain should have an explicit policy"
}
],
"file_path": "config.nft",
"source_text": "..."
}
```
### Style Analysis
Input with style issues:
```nftables
table inet test{chain input{type filter hook input priority 0;policy drop;tcp dport 22 accept;}}
```
Style warnings:
```
Found 3 issues in style.nft:
style.nft:1:1: warning: Consider adding a shebang line [NFT201]
style.nft:1:121: warning: Line too long (98 > 80 characters) [NFT205]
style.nft:1:16: warning: Missing space after '{' [NFT503]
```
## Contributing ## Contributing
### Building
Build with `cargo build` as usual. If you are using Nix, you will also want to
ensure that the Nix package builds as expected.
### Code Style ### Code Style
- Follow `cargo fmt` formatting - Follow `cargo fmt` formatting
@ -603,6 +212,11 @@ ensure that the Nix package builds as expected.
- **Regression tests**: Known issue prevention - **Regression tests**: Known issue prevention
- **Performance tests**: Benchmark critical paths - **Performance tests**: Benchmark critical paths
### Building
Build with `cargo build` as usual. If you are using Nix, you will also want to
ensure that the Nix package builds as expected.
## Technical Notes ## Technical Notes
### CST Implementation ### CST Implementation
@ -623,17 +237,6 @@ Below are the design goals of nff's architechture.
- **Memory efficiency**: Streaming token processing where possible - **Memory efficiency**: Streaming token processing where possible
- **Grammar completeness**: Covers full nftables syntax specification - **Grammar completeness**: Covers full nftables syntax specification
### Diagnostic Architecture
The diagnostic system uses a modular architecture with specialized analyzers:
- **Modular design**: Each analyzer focuses on specific concerns (lexical,
syntax, style, semantic)
- **Configurable analysis**: Enable/disable specific diagnostic categories
- **LSP compatibility**: JSON output follows Language Server Protocol standards
- **Performance optimized**: Concurrent analysis when possible
- **Extensible**: Easy to add new diagnostic rules and categories
## License ## License
nff is licensed under [MPL v2.0](LICENSE). See license file for more details on nff is licensed under [MPL v2.0](LICENSE). See license file for more details on

View file

@ -4,8 +4,6 @@
rustfmt, rustfmt,
clippy, clippy,
cargo, cargo,
cargo-machete,
cargo-nextest,
rustPlatform, rustPlatform,
}: }:
mkShell { mkShell {
@ -15,8 +13,6 @@ mkShell {
rustfmt rustfmt
clippy clippy
cargo cargo
cargo-machete
cargo-nextest
]; ];
RUST_SRC_PATH = "${rustPlatform.rustLibSrc}"; RUST_SRC_PATH = "${rustPlatform.rustLibSrc}";

View file

@ -125,12 +125,6 @@ pub enum Expression {
// Set expressions // Set expressions
Set(Vec<Expression>), Set(Vec<Expression>),
// Vmap expressions (value maps)
Vmap {
expr: Option<Box<Expression>>,
map: Vec<(Expression, Expression)>,
},
// Range expressions // Range expressions
Range { Range {
start: Box<Expression>, start: Box<Expression>,

View file

@ -4,289 +4,250 @@
use crate::lexer::{Token, TokenKind}; use crate::lexer::{Token, TokenKind};
use cstree::{RawSyntaxKind, green::GreenNode, util::NodeOrToken}; use cstree::{RawSyntaxKind, green::GreenNode, util::NodeOrToken};
use num_enum::{IntoPrimitive, TryFromPrimitive};
use std::fmt; use std::fmt;
use thiserror::Error; use thiserror::Error;
/// nftables syntax node types /// nftables syntax node types
/// Uses `TryFromPrimitive` for safe conversion from raw values with fallback to `Error`. #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
#[derive(
Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, TryFromPrimitive, IntoPrimitive,
)]
#[repr(u16)] #[repr(u16)]
pub enum SyntaxKind { pub enum SyntaxKind {
// Root and containers // Root and containers
Root = 0, Root = 0,
Table = 1, Table,
Chain = 2, Chain,
Rule = 3, Rule,
Set = 4, Set,
Map = 5, Map,
Element = 6, Element,
// Expressions // Expressions
Expression = 7, Expression,
BinaryExpr = 8, BinaryExpr,
UnaryExpr = 9, UnaryExpr,
CallExpr = 10, CallExpr,
SetExpr = 11, SetExpr,
RangeExpr = 12, RangeExpr,
// Statements // Statements
Statement = 13, Statement,
IncludeStmt = 14, IncludeStmt,
DefineStmt = 15, DefineStmt,
FlushStmt = 16, FlushStmt,
AddStmt = 17, AddStmt,
DeleteStmt = 18, DeleteStmt,
// Literals and identifiers // Literals and identifiers
Identifier = 19, Identifier,
StringLiteral = 20, StringLiteral,
NumberLiteral = 21, NumberLiteral,
IpAddress = 22, IpAddress,
Ipv6Address = 23, Ipv6Address,
MacAddress = 24, MacAddress,
// Keywords // Keywords
TableKw = 25, TableKw,
ChainKw = 26, ChainKw,
RuleKw = 27, RuleKw,
SetKw = 28, SetKw,
MapKw = 29, MapKw,
ElementKw = 30, ElementKw,
IncludeKw = 31, IncludeKw,
DefineKw = 32, DefineKw,
FlushKw = 33, FlushKw,
AddKw = 34, AddKw,
DeleteKw = 35, DeleteKw,
InsertKw = 36, InsertKw,
ReplaceKw = 37, ReplaceKw,
// Chain types and hooks // Chain types and hooks
FilterKw = 38, FilterKw,
NatKw = 39, NatKw,
RouteKw = 40, RouteKw,
InputKw = 41, InputKw,
OutputKw = 42, OutputKw,
ForwardKw = 43, ForwardKw,
PreroutingKw = 44, PreroutingKw,
PostroutingKw = 45, PostroutingKw,
// Protocols and families // Protocols and families
IpKw = 46, IpKw,
Ip6Kw = 47, Ip6Kw,
InetKw = 48, InetKw,
ArpKw = 49, ArpKw,
BridgeKw = 50, BridgeKw,
NetdevKw = 51, NetdevKw,
TcpKw = 52, TcpKw,
UdpKw = 53, UdpKw,
IcmpKw = 54, IcmpKw,
Icmpv6Kw = 55, Icmpv6Kw,
// Match keywords // Match keywords
SportKw = 56, SportKw,
DportKw = 57, DportKw,
SaddrKw = 58, SaddrKw,
DaddrKw = 59, DaddrKw,
ProtocolKw = 60, ProtocolKw,
NexthdrKw = 61, NexthdrKw,
TypeKw = 62, TypeKw,
HookKw = 63, HookKw,
PriorityKw = 64, PriorityKw,
PolicyKw = 65, PolicyKw,
IifnameKw = 66, IifnameKw,
OifnameKw = 67, OifnameKw,
CtKw = 68, CtKw,
StateKw = 69, StateKw,
// Actions // Actions
AcceptKw = 70, AcceptKw,
DropKw = 71, DropKw,
RejectKw = 72, RejectKw,
ReturnKw = 73, ReturnKw,
JumpKw = 74, JumpKw,
GotoKw = 75, GotoKw,
ContinueKw = 76, ContinueKw,
LogKw = 77, LogKw,
CommentKw = 78, CommentKw,
// States // States
EstablishedKw = 79, EstablishedKw,
RelatedKw = 80, RelatedKw,
NewKw = 81, NewKw,
InvalidKw = 82, InvalidKw,
// Operators // Operators
EqOp = 83, EqOp,
NeOp = 84, NeOp,
LeOp = 85, LeOp,
GeOp = 86, GeOp,
LtOp = 87, LtOp,
GtOp = 88, GtOp,
// Punctuation // Punctuation
LeftBrace = 89, LeftBrace,
RightBrace = 90, RightBrace,
LeftParen = 91, LeftParen,
RightParen = 92, RightParen,
LeftBracket = 93, LeftBracket,
RightBracket = 94, RightBracket,
Comma = 95, Comma,
Semicolon = 96, Semicolon,
Colon = 97, Colon,
Assign = 98, Assign,
Dash = 99, Dash,
Slash = 100, Slash,
Dot = 101, Dot,
// Trivia // Trivia
Whitespace = 102, Whitespace,
Newline = 103, Newline,
Comment = 104, Comment,
Shebang = 105, Shebang,
// Error recovery // Error recovery
Error = 106, Error,
// Protocol keywords for nftables
VmapKw = 107,
NdRouterAdvertKw = 108,
NdNeighborSolicitKw = 109,
NdNeighborAdvertKw = 110,
EchoRequestKw = 111,
DestUnreachableKw = 112,
RouterAdvertisementKw = 113,
TimeExceededKw = 114,
ParameterProblemKw = 115,
PacketTooBigKw = 116,
} }
impl From<TokenKind> for SyntaxKind { impl From<TokenKind> for SyntaxKind {
fn from(kind: TokenKind) -> Self { fn from(kind: TokenKind) -> Self {
use TokenKind::*;
match kind { match kind {
// Keywords -> Kw variants TokenKind::Table => SyntaxKind::TableKw,
Table => SyntaxKind::TableKw, TokenKind::Chain => SyntaxKind::ChainKw,
Chain => SyntaxKind::ChainKw, TokenKind::Rule => SyntaxKind::RuleKw,
Rule => SyntaxKind::RuleKw, TokenKind::Set => SyntaxKind::SetKw,
Set => SyntaxKind::SetKw, TokenKind::Map => SyntaxKind::MapKw,
Map => SyntaxKind::MapKw, TokenKind::Element => SyntaxKind::ElementKw,
Element => SyntaxKind::ElementKw, TokenKind::Include => SyntaxKind::IncludeKw,
Include => SyntaxKind::IncludeKw, TokenKind::Define => SyntaxKind::DefineKw,
Define => SyntaxKind::DefineKw, TokenKind::Flush => SyntaxKind::FlushKw,
Flush => SyntaxKind::FlushKw, TokenKind::Add => SyntaxKind::AddKw,
Add => SyntaxKind::AddKw, TokenKind::Delete => SyntaxKind::DeleteKw,
Delete => SyntaxKind::DeleteKw, TokenKind::Insert => SyntaxKind::InsertKw,
Insert => SyntaxKind::InsertKw, TokenKind::Replace => SyntaxKind::ReplaceKw,
Replace => SyntaxKind::ReplaceKw,
// Chain types and hooks TokenKind::Filter => SyntaxKind::FilterKw,
Filter => SyntaxKind::FilterKw, TokenKind::Nat => SyntaxKind::NatKw,
Nat => SyntaxKind::NatKw, TokenKind::Route => SyntaxKind::RouteKw,
Route => SyntaxKind::RouteKw,
Input => SyntaxKind::InputKw,
Output => SyntaxKind::OutputKw,
Forward => SyntaxKind::ForwardKw,
Prerouting => SyntaxKind::PreroutingKw,
Postrouting => SyntaxKind::PostroutingKw,
// Protocols and families TokenKind::Input => SyntaxKind::InputKw,
Ip => SyntaxKind::IpKw, TokenKind::Output => SyntaxKind::OutputKw,
Ip6 => SyntaxKind::Ip6Kw, TokenKind::Forward => SyntaxKind::ForwardKw,
Inet => SyntaxKind::InetKw, TokenKind::Prerouting => SyntaxKind::PreroutingKw,
Arp => SyntaxKind::ArpKw, TokenKind::Postrouting => SyntaxKind::PostroutingKw,
Bridge => SyntaxKind::BridgeKw,
Netdev => SyntaxKind::NetdevKw,
Tcp => SyntaxKind::TcpKw,
Udp => SyntaxKind::UdpKw,
Icmp => SyntaxKind::IcmpKw,
Icmpv6 => SyntaxKind::Icmpv6Kw,
// Match keywords TokenKind::Ip => SyntaxKind::IpKw,
Sport => SyntaxKind::SportKw, TokenKind::Ip6 => SyntaxKind::Ip6Kw,
Dport => SyntaxKind::DportKw, TokenKind::Inet => SyntaxKind::InetKw,
Saddr => SyntaxKind::SaddrKw, TokenKind::Arp => SyntaxKind::ArpKw,
Daddr => SyntaxKind::DaddrKw, TokenKind::Bridge => SyntaxKind::BridgeKw,
Protocol => SyntaxKind::ProtocolKw, TokenKind::Netdev => SyntaxKind::NetdevKw,
Nexthdr => SyntaxKind::NexthdrKw, TokenKind::Tcp => SyntaxKind::TcpKw,
Type => SyntaxKind::TypeKw, TokenKind::Udp => SyntaxKind::UdpKw,
Hook => SyntaxKind::HookKw, TokenKind::Icmp => SyntaxKind::IcmpKw,
Priority => SyntaxKind::PriorityKw, TokenKind::Icmpv6 => SyntaxKind::Icmpv6Kw,
Policy => SyntaxKind::PolicyKw,
Iifname => SyntaxKind::IifnameKw,
Oifname => SyntaxKind::OifnameKw,
Ct => SyntaxKind::CtKw,
State => SyntaxKind::StateKw,
// Actions TokenKind::Sport => SyntaxKind::SportKw,
Accept => SyntaxKind::AcceptKw, TokenKind::Dport => SyntaxKind::DportKw,
Drop => SyntaxKind::DropKw, TokenKind::Saddr => SyntaxKind::SaddrKw,
Reject => SyntaxKind::RejectKw, TokenKind::Daddr => SyntaxKind::DaddrKw,
Return => SyntaxKind::ReturnKw, TokenKind::Protocol => SyntaxKind::ProtocolKw,
Jump => SyntaxKind::JumpKw, TokenKind::Nexthdr => SyntaxKind::NexthdrKw,
Goto => SyntaxKind::GotoKw, TokenKind::Type => SyntaxKind::TypeKw,
Continue => SyntaxKind::ContinueKw, TokenKind::Hook => SyntaxKind::HookKw,
Log => SyntaxKind::LogKw, TokenKind::Priority => SyntaxKind::PriorityKw,
Comment => SyntaxKind::CommentKw, TokenKind::Policy => SyntaxKind::PolicyKw,
TokenKind::Iifname => SyntaxKind::IifnameKw,
TokenKind::Oifname => SyntaxKind::OifnameKw,
TokenKind::Ct => SyntaxKind::CtKw,
TokenKind::State => SyntaxKind::StateKw,
// States TokenKind::Accept => SyntaxKind::AcceptKw,
Established => SyntaxKind::EstablishedKw, TokenKind::Drop => SyntaxKind::DropKw,
Related => SyntaxKind::RelatedKw, TokenKind::Reject => SyntaxKind::RejectKw,
New => SyntaxKind::NewKw, TokenKind::Return => SyntaxKind::ReturnKw,
Invalid => SyntaxKind::InvalidKw, TokenKind::Jump => SyntaxKind::JumpKw,
TokenKind::Goto => SyntaxKind::GotoKw,
TokenKind::Continue => SyntaxKind::ContinueKw,
TokenKind::Log => SyntaxKind::LogKw,
TokenKind::Comment => SyntaxKind::CommentKw,
// Protocol keywords for ICMP/ICMPv6 TokenKind::Established => SyntaxKind::EstablishedKw,
Vmap => SyntaxKind::VmapKw, TokenKind::Related => SyntaxKind::RelatedKw,
NdRouterAdvert => SyntaxKind::NdRouterAdvertKw, TokenKind::New => SyntaxKind::NewKw,
NdNeighborSolicit => SyntaxKind::NdNeighborSolicitKw, TokenKind::Invalid => SyntaxKind::InvalidKw,
NdNeighborAdvert => SyntaxKind::NdNeighborAdvertKw,
EchoRequest => SyntaxKind::EchoRequestKw,
DestUnreachable => SyntaxKind::DestUnreachableKw,
RouterAdvertisement => SyntaxKind::RouterAdvertisementKw,
TimeExceeded => SyntaxKind::TimeExceededKw,
ParameterProblem => SyntaxKind::ParameterProblemKw,
PacketTooBig => SyntaxKind::PacketTooBigKw,
// Operators - direct mapping TokenKind::Eq => SyntaxKind::EqOp,
Eq => SyntaxKind::EqOp, TokenKind::Ne => SyntaxKind::NeOp,
Ne => SyntaxKind::NeOp, TokenKind::Le => SyntaxKind::LeOp,
Le => SyntaxKind::LeOp, TokenKind::Ge => SyntaxKind::GeOp,
Ge => SyntaxKind::GeOp, TokenKind::Lt => SyntaxKind::LtOp,
Lt => SyntaxKind::LtOp, TokenKind::Gt => SyntaxKind::GtOp,
Gt => SyntaxKind::GtOp,
// Punctuation - direct mapping TokenKind::LeftBrace => SyntaxKind::LeftBrace,
LeftBrace => SyntaxKind::LeftBrace, TokenKind::RightBrace => SyntaxKind::RightBrace,
RightBrace => SyntaxKind::RightBrace, TokenKind::LeftParen => SyntaxKind::LeftParen,
LeftParen => SyntaxKind::LeftParen, TokenKind::RightParen => SyntaxKind::RightParen,
RightParen => SyntaxKind::RightParen, TokenKind::LeftBracket => SyntaxKind::LeftBracket,
LeftBracket => SyntaxKind::LeftBracket, TokenKind::RightBracket => SyntaxKind::RightBracket,
RightBracket => SyntaxKind::RightBracket, TokenKind::Comma => SyntaxKind::Comma,
Comma => SyntaxKind::Comma, TokenKind::Semicolon => SyntaxKind::Semicolon,
Semicolon => SyntaxKind::Semicolon, TokenKind::Colon => SyntaxKind::Colon,
Colon => SyntaxKind::Colon, TokenKind::Assign => SyntaxKind::Assign,
Assign => SyntaxKind::Assign, TokenKind::Dash => SyntaxKind::Dash,
Dash => SyntaxKind::Dash, TokenKind::Slash => SyntaxKind::Slash,
Slash => SyntaxKind::Slash, TokenKind::Dot => SyntaxKind::Dot,
Dot => SyntaxKind::Dot,
// Literals - map data-carrying variants to their types TokenKind::StringLiteral(_) => SyntaxKind::StringLiteral,
StringLiteral(_) => SyntaxKind::StringLiteral, TokenKind::NumberLiteral(_) => SyntaxKind::NumberLiteral,
NumberLiteral(_) => SyntaxKind::NumberLiteral, TokenKind::IpAddress(_) => SyntaxKind::IpAddress,
IpAddress(_) => SyntaxKind::IpAddress, TokenKind::Ipv6Address(_) => SyntaxKind::Ipv6Address,
Ipv6Address(_) => SyntaxKind::Ipv6Address, TokenKind::MacAddress(_) => SyntaxKind::MacAddress,
MacAddress(_) => SyntaxKind::MacAddress, TokenKind::Identifier(_) => SyntaxKind::Identifier,
Identifier(_) => SyntaxKind::Identifier,
// Special tokens TokenKind::Newline => SyntaxKind::Newline,
Newline => SyntaxKind::Newline, TokenKind::CommentLine(_) => SyntaxKind::Comment,
CommentLine(_) => SyntaxKind::Comment, TokenKind::Shebang(_) => SyntaxKind::Shebang,
Shebang(_) => SyntaxKind::Shebang,
// Error fallback TokenKind::Error => SyntaxKind::Error,
Error => SyntaxKind::Error,
} }
} }
} }
@ -340,7 +301,7 @@ impl SyntaxKind {
} }
pub fn from_raw(raw: RawSyntaxKind) -> Self { pub fn from_raw(raw: RawSyntaxKind) -> Self {
Self::try_from(raw.0 as u16).unwrap_or(SyntaxKind::Error) unsafe { std::mem::transmute(raw.0 as u16) }
} }
} }
@ -358,7 +319,7 @@ pub enum CstError {
/// Result type for CST operations /// Result type for CST operations
pub type CstResult<T> = Result<T, CstError>; pub type CstResult<T> = Result<T, CstError>;
/// CST builder for nftables syntax /// Basic CST builder
pub struct CstBuilder; pub struct CstBuilder;
impl CstBuilder { impl CstBuilder {
@ -381,79 +342,6 @@ impl CstBuilder {
Self::validate_tree(&tree)?; Self::validate_tree(&tree)?;
Ok(tree) Ok(tree)
} }
/// Display CST in a human-readable format for debugging
pub fn display_tree(node: &GreenNode, tree_format: bool, verbose: bool) -> String {
let mut output = String::new();
Self::display_node_recursive(node, 0, tree_format, verbose, &mut output);
output
}
fn display_node_recursive(
node: &GreenNode,
indent_level: usize,
tree_format: bool,
verbose: bool,
output: &mut String,
) {
let kind = SyntaxKind::from_raw(node.kind());
let indent = if tree_format {
" ".repeat(indent_level)
} else {
String::new()
};
if tree_format {
output.push_str(&format!("{}├─ {}", indent, kind));
} else {
output.push_str(&format!("{}{}", indent, kind));
}
if verbose {
output.push_str(&format!(
" (kind: {:?}, width: {:?})",
node.kind(),
node.text_len()
));
}
output.push('\n');
// Display children
for child in node.children() {
match child {
NodeOrToken::Node(child_node) => {
Self::display_node_recursive(
child_node,
indent_level + 1,
tree_format,
verbose,
output,
);
}
NodeOrToken::Token(token) => {
let token_indent = if tree_format {
" ".repeat(indent_level + 1)
} else {
String::new()
};
let token_kind = SyntaxKind::from_raw(token.kind());
if tree_format {
output.push_str(&format!("{}├─ {}", token_indent, token_kind));
} else {
output.push_str(&format!("{}{}", token_indent, token_kind));
}
if verbose {
output.push_str(&format!(" (width: {:?})", token.text_len()));
}
output.push('\n');
}
}
}
}
} }
/// Internal tree builder that constructs CST according to nftables grammar /// Internal tree builder that constructs CST according to nftables grammar
@ -1320,7 +1208,7 @@ mod tests {
let mut lexer = NftablesLexer::new(source); let mut lexer = NftablesLexer::new(source);
let tokens = lexer.tokenize().expect("Tokenization should succeed"); let tokens = lexer.tokenize().expect("Tokenization should succeed");
// Test CST construction with basic table syntax // CST is now implemented - test that it works
let green_tree = CstBuilder::build_tree(&tokens); let green_tree = CstBuilder::build_tree(&tokens);
// Verify the tree was created successfully // Verify the tree was created successfully
@ -1337,88 +1225,4 @@ mod tests {
let cst_result = CstBuilder::parse_to_cst(&tokens); let cst_result = CstBuilder::parse_to_cst(&tokens);
assert!(cst_result.is_ok()); assert!(cst_result.is_ok());
} }
#[test]
fn test_num_enum_improvements() {
// Test that from_raw uses num_enum for conversion
// Invalid values fall back to Error variant
// Test valid conversions
assert_eq!(SyntaxKind::from_raw(RawSyntaxKind(0)), SyntaxKind::Root);
assert_eq!(SyntaxKind::from_raw(RawSyntaxKind(1)), SyntaxKind::Table);
assert_eq!(SyntaxKind::from_raw(RawSyntaxKind(25)), SyntaxKind::TableKw);
assert_eq!(SyntaxKind::from_raw(RawSyntaxKind(106)), SyntaxKind::Error);
assert_eq!(
SyntaxKind::from_raw(RawSyntaxKind(116)),
SyntaxKind::PacketTooBigKw
);
// Test invalid values automatically fall back to Error
assert_eq!(SyntaxKind::from_raw(RawSyntaxKind(999)), SyntaxKind::Error);
assert_eq!(SyntaxKind::from_raw(RawSyntaxKind(1000)), SyntaxKind::Error);
// Test bidirectional conversion
for variant in [
SyntaxKind::Root,
SyntaxKind::Table,
SyntaxKind::TableKw,
SyntaxKind::Error,
SyntaxKind::PacketTooBigKw,
] {
let raw = variant.to_raw();
let converted_back = SyntaxKind::from_raw(raw);
assert_eq!(variant, converted_back);
}
}
#[test]
fn test_token_kind_conversion_improvements() {
// Test that From<TokenKind> conversion is complete and correct
use crate::lexer::TokenKind;
// Test keyword mappings
assert_eq!(SyntaxKind::from(TokenKind::Table), SyntaxKind::TableKw);
assert_eq!(SyntaxKind::from(TokenKind::Chain), SyntaxKind::ChainKw);
assert_eq!(SyntaxKind::from(TokenKind::Accept), SyntaxKind::AcceptKw);
// Test operators
assert_eq!(SyntaxKind::from(TokenKind::Eq), SyntaxKind::EqOp);
assert_eq!(SyntaxKind::from(TokenKind::Lt), SyntaxKind::LtOp);
// Test punctuation
assert_eq!(
SyntaxKind::from(TokenKind::LeftBrace),
SyntaxKind::LeftBrace
);
assert_eq!(
SyntaxKind::from(TokenKind::Semicolon),
SyntaxKind::Semicolon
);
// Test literals (with data)
assert_eq!(
SyntaxKind::from(TokenKind::StringLiteral("test".to_string())),
SyntaxKind::StringLiteral
);
assert_eq!(
SyntaxKind::from(TokenKind::NumberLiteral(42)),
SyntaxKind::NumberLiteral
);
assert_eq!(
SyntaxKind::from(TokenKind::IpAddress("192.168.1.1".to_string())),
SyntaxKind::IpAddress
);
assert_eq!(
SyntaxKind::from(TokenKind::Identifier("test".to_string())),
SyntaxKind::Identifier
);
// Test special tokens
assert_eq!(SyntaxKind::from(TokenKind::Newline), SyntaxKind::Newline);
assert_eq!(
SyntaxKind::from(TokenKind::CommentLine("# comment".to_string())),
SyntaxKind::Comment
);
assert_eq!(SyntaxKind::from(TokenKind::Error), SyntaxKind::Error);
}
} }

File diff suppressed because it is too large Load diff

View file

@ -10,8 +10,8 @@ pub enum LexError {
InvalidToken { position: usize, text: String }, InvalidToken { position: usize, text: String },
#[error("Unterminated string literal starting at position {position}")] #[error("Unterminated string literal starting at position {position}")]
UnterminatedString { position: usize }, UnterminatedString { position: usize },
#[error("Invalid numeric literal at position {position}: {text}")] #[error("Invalid numeric literal: {text}")]
InvalidNumber { position: usize, text: String }, InvalidNumber { text: String },
} }
/// Result type for lexical analysis /// Result type for lexical analysis
@ -129,28 +129,6 @@ pub enum TokenKind {
#[token("new")] #[token("new")]
New, New,
// Additional protocol keywords
#[token("vmap")]
Vmap,
#[token("nd-router-advert")]
NdRouterAdvert,
#[token("nd-neighbor-solicit")]
NdNeighborSolicit,
#[token("nd-neighbor-advert")]
NdNeighborAdvert,
#[token("echo-request")]
EchoRequest,
#[token("destination-unreachable")]
DestUnreachable,
#[token("router-advertisement")]
RouterAdvertisement,
#[token("time-exceeded")]
TimeExceeded,
#[token("parameter-problem")]
ParameterProblem,
#[token("packet-too-big")]
PacketTooBig,
// Actions // Actions
#[token("accept")] #[token("accept")]
Accept, Accept,
@ -356,7 +334,6 @@ impl<'a> NftablesLexer<'a> {
.any(|c| !c.is_ascii_digit() && c != '.' && c != 'x' && c != 'X') .any(|c| !c.is_ascii_digit() && c != '.' && c != 'x' && c != 'X')
{ {
return Err(LexError::InvalidNumber { return Err(LexError::InvalidNumber {
position: span.start,
text: text.to_owned(), text: text.to_owned(),
}); });
} else { } else {
@ -449,38 +426,4 @@ mod tests {
panic!("Expected InvalidToken error"); panic!("Expected InvalidToken error");
} }
} }
#[test]
fn test_invalid_number_with_position() {
// Test that we can create a proper diagnostic with position information
use crate::diagnostic::LexicalAnalyzer;
// Create a source with the same invalid pattern at different positions
let source = "123abc normal 123abc end";
// Since normal tokenization splits "123abc" into "123" + "abc",
// let's test the diagnostic creation directly with a mock error
let error1 = LexError::InvalidNumber {
position: 0,
text: "123abc".to_string(),
};
let error2 = LexError::InvalidNumber {
position: 14,
text: "123abc".to_string(),
};
// Test that diagnostics are created with correct positions
let diagnostic1 = LexicalAnalyzer::lex_error_to_diagnostic(&error1, source);
let diagnostic2 = LexicalAnalyzer::lex_error_to_diagnostic(&error2, source);
// First occurrence should be at position 0
assert_eq!(diagnostic1.range.start.line, 0);
assert_eq!(diagnostic1.range.start.character, 0);
assert_eq!(diagnostic1.message, "Invalid number: '123abc'");
// Second occurrence should be at position 14 (not 0)
assert_eq!(diagnostic2.range.start.line, 0);
assert_eq!(diagnostic2.range.start.character, 14);
assert_eq!(diagnostic2.message, "Invalid number: '123abc'");
}
} }

View file

@ -1,21 +1,18 @@
mod ast; mod ast;
mod cst; mod cst;
mod diagnostic;
mod lexer; mod lexer;
mod parser; mod parser;
mod syntax; mod syntax;
use anyhow::{Context, Result}; use anyhow::{Context, Result};
use clap::{Parser, Subcommand}; use clap::Parser;
use glob::glob;
use std::fs; use std::fs;
use std::io::{self, Write}; use std::io::{self, Write};
use std::path::Path; use std::path::Path;
use thiserror::Error; use thiserror::Error;
use crate::cst::CstBuilder; use crate::cst::CstBuilder;
use crate::diagnostic::{DiagnosticAnalyzer, DiagnosticConfig}; use crate::lexer::NftablesLexer;
use crate::lexer::{NftablesLexer, Token, TokenKind};
use crate::parser::Parser as NftablesParser; use crate::parser::Parser as NftablesParser;
use crate::syntax::{FormatConfig, IndentStyle, NftablesFormatter}; use crate::syntax::{FormatConfig, IndentStyle, NftablesFormatter};
@ -27,213 +24,51 @@ enum FormatterError {
InvalidFile(String), InvalidFile(String),
#[error("Parse error: {0}")] #[error("Parse error: {0}")]
ParseError(String), ParseError(String),
#[error("Syntax error at line {line}, column {column}: {message}")]
SyntaxError {
line: usize,
column: usize,
message: String,
suggestion: Option<String>,
},
#[error("IO error: {0}")] #[error("IO error: {0}")]
Io(#[from] io::Error), Io(#[from] io::Error),
} }
#[derive(Error, Debug)] #[derive(Parser, Debug)]
enum LintError {
#[error("Lint errors found in {file_count} file(s)")]
DiagnosticErrors { file_count: usize },
#[error("File discovery error: {0}")]
FileDiscovery(#[from] anyhow::Error),
}
#[derive(Parser, Debug, Clone)]
#[command( #[command(
name = "nff", name = "nff",
version = "0.1.0", version = "0.1.0",
about = "A high-quality nftables formatter and linter", about = "A high-quality nftables formatter and beautifier",
long_about = "nff (nftables formatter) is a tool for formatting and linting nftables configuration files with proper indentation and structure." long_about = "nff (nftables formatter) is a tool for formatting and beautifying nftables configuration files with proper indentation and structure."
)] )]
struct Args { struct Args {
#[command(subcommand)] /// nftables config file (e.g: /etc/nftables.conf)
command: Commands, #[arg(short, long, value_name = "FILE")]
file: String,
/// Type of indentation
#[arg(short, long, default_value = "tabs", value_parser = clap::value_parser!(IndentStyle))]
indent: IndentStyle,
/// Output file (writes to stdout if not specified)
#[arg(short, long, value_name = "FILE")]
output: Option<String>,
/// Optimize output by removing excessive empty lines
#[arg(long)]
optimize: bool,
/// Number of spaces per indentation level (only used with --indent=spaces)
#[arg(long, default_value = "2", value_name = "N")]
spaces: usize,
/// Show debug information (tokens, AST, etc.) /// Show debug information (tokens, AST, etc.)
#[arg(long, global = true)] #[arg(long)]
debug: bool, debug: bool,
}
#[derive(Subcommand, Debug, Clone)] /// Check syntax only, don't format
enum Commands { #[arg(long)]
/// Format nftables configuration files
Format {
/// nftables config file (e.g: /etc/nftables.conf). If not provided, formats all .nft files in current directory
#[arg(value_name = "FILE")]
file: Option<String>,
/// Type of indentation
#[arg(short, long, default_value = "tabs", value_parser = clap::value_parser!(IndentStyle))]
indent: IndentStyle,
/// Print formatted output to stdout instead of modifying files in place
#[arg(long)]
stdout: bool,
/// Optimize output by removing excessive empty lines
#[arg(long)]
optimize: bool,
/// Number of spaces per indentation level (only used with --indent=spaces)
#[arg(long, default_value = "2", value_name = "N")]
spaces: usize,
/// Check syntax only, don't format
#[arg(long)]
check: bool,
},
/// Lint nftables configuration files and show diagnostics
Lint {
/// nftables config file (e.g: /etc/nftables.conf). If not provided, lints all .nft files in current directory
#[arg(value_name = "FILE")]
file: Option<String>,
/// Output diagnostics in JSON format (useful for tooling integration)
#[arg(long)]
json: bool,
/// Include style warnings in diagnostics
#[arg(long, action = clap::ArgAction::Set, default_value = "true")]
style_warnings: bool,
/// Include best practice recommendations in diagnostics
#[arg(long, action = clap::ArgAction::Set, default_value = "true")]
best_practices: bool,
/// Include performance hints in diagnostics
#[arg(long, action = clap::ArgAction::Set, default_value = "true")]
performance_hints: bool,
/// Include security warnings in diagnostics
#[arg(long, action = clap::ArgAction::Set, default_value = "true")]
security_warnings: bool,
/// Diagnostic modules to run (comma-separated: lexical,syntax,style,semantic)
#[arg(long, value_delimiter = ',')]
modules: Option<Vec<String>>,
},
/// Parse and display file in CST format for debugging
Parse {
/// nftables config file to parse
#[arg(value_name = "FILE")]
file: String,
/// Show tree structure with indentation
#[arg(long)]
tree: bool,
/// Show detailed node information
#[arg(long)]
verbose: bool,
},
}
fn discover_nftables_files() -> Result<Vec<String>> {
let mut files = Vec::new();
// Common nftables file patterns
let patterns = [
"*.nft",
"*.nftables",
"/etc/nftables.conf",
"/etc/nftables/*.nft",
];
for pattern in &patterns {
match glob(pattern) {
Ok(paths) => {
for entry in paths {
match entry {
Ok(path) => {
if path.is_file() {
if let Some(path_str) = path.to_str() {
files.push(path_str.to_string());
}
}
}
Err(e) => eprintln!("Warning: Error reading path: {}", e),
}
}
}
Err(e) => {
// Only warn for non-current directory patterns
if !pattern.starts_with("*.") {
eprintln!("Warning: Failed to search pattern {}: {}", pattern, e);
}
}
}
}
if files.is_empty() {
return Err(anyhow::anyhow!(
"No nftables files found. Please specify a file explicitly or ensure .nft/.nftables files exist in the current directory."
));
}
// Remove duplicates and sort
files.sort();
files.dedup();
Ok(files)
}
fn process_format_command(
file: Option<String>,
indent: IndentStyle,
stdout: bool,
optimize: bool,
spaces: usize,
check: bool, check: bool,
debug: bool,
) -> Result<()> {
let files = match file {
Some(f) => vec![f],
None => discover_nftables_files()?,
};
let is_multiple_files = files.len() > 1;
for file_path in files {
if let Err(e) = process_single_file_format(
&file_path,
indent,
stdout,
optimize,
spaces,
check,
debug,
is_multiple_files,
) {
eprintln!("Error processing {}: {}", file_path, e);
if !is_multiple_files {
return Err(e);
}
}
}
Ok(())
} }
fn process_single_file_format( fn process_nftables_config(args: Args) -> Result<()> {
file: &str, let path = Path::new(&args.file);
indent: IndentStyle,
stdout: bool,
optimize: bool,
spaces: usize,
check: bool,
debug: bool,
is_multiple_files: bool,
) -> Result<()> {
let path = Path::new(&file);
if !path.exists() { if !path.exists() {
return Err(FormatterError::FileNotFound(file.to_string()).into()); return Err(FormatterError::FileNotFound(args.file).into());
} }
if !path.is_file() { if !path.is_file() {
@ -241,12 +76,12 @@ fn process_single_file_format(
} }
// Read file contents // Read file contents
let source = let source = fs::read_to_string(&args.file)
fs::read_to_string(file).with_context(|| format!("Failed to read file: {}", file))?; .with_context(|| format!("Failed to read file: {}", args.file))?;
// Tokenize // Tokenize
let mut lexer = NftablesLexer::new(&source); let mut lexer = NftablesLexer::new(&source);
let tokens = if debug { let tokens = if args.debug {
// Use error-recovery tokenization for debug mode // Use error-recovery tokenization for debug mode
lexer.tokenize_with_errors() lexer.tokenize_with_errors()
} else { } else {
@ -255,7 +90,7 @@ fn process_single_file_format(
.map_err(|e| FormatterError::ParseError(e.to_string()))? .map_err(|e| FormatterError::ParseError(e.to_string()))?
}; };
if debug { if args.debug {
eprintln!("=== TOKENS ==="); eprintln!("=== TOKENS ===");
for (i, token) in tokens.iter().enumerate() { for (i, token) in tokens.iter().enumerate() {
eprintln!( eprintln!(
@ -282,7 +117,7 @@ fn process_single_file_format(
} }
// Parse // Parse
let ruleset = if debug { let ruleset = if args.debug {
// Use error-recovery parsing for debug mode // Use error-recovery parsing for debug mode
let (parsed_ruleset, errors) = NftablesParser::parse_with_errors(&source); let (parsed_ruleset, errors) = NftablesParser::parse_with_errors(&source);
if !errors.is_empty() { if !errors.is_empty() {
@ -292,524 +127,64 @@ fn process_single_file_format(
} }
eprintln!(); eprintln!();
} }
parsed_ruleset.unwrap_or_else(crate::ast::Ruleset::new) parsed_ruleset.unwrap_or_else(|| crate::ast::Ruleset::new())
} else { } else {
let mut parser = NftablesParser::new(tokens.clone()); let mut parser = NftablesParser::new(tokens.clone());
parser parser
.parse() .parse()
.map_err(|e| convert_parse_error_to_formatter_error(&e, &source, &tokens))? .map_err(|e| FormatterError::ParseError(e.to_string()))?
}; };
if debug { if args.debug {
eprintln!("=== AST ==="); eprintln!("=== AST ===");
eprintln!("{:#?}", ruleset); eprintln!("{:#?}", ruleset);
eprintln!(); eprintln!();
} }
if check { if args.check {
println!("Syntax check passed for: {}", file); println!("Syntax check passed for: {}", args.file);
return Ok(()); return Ok(());
} }
// Format // Format
let config = FormatConfig { let config = FormatConfig {
indent_style: indent, indent_style: args.indent,
spaces_per_level: spaces, spaces_per_level: args.spaces,
optimize, optimize: args.optimize,
max_empty_lines: if optimize { 1 } else { 2 }, max_empty_lines: if args.optimize { 1 } else { 2 },
}; };
let formatter = NftablesFormatter::new(config); let formatter = NftablesFormatter::new(config);
let formatted_output = formatter.format_ruleset(&ruleset); let formatted_output = formatter.format_ruleset(&ruleset);
// Write output // Write output
if stdout { match &args.output {
// Output to stdout Some(output_file) => {
if is_multiple_files { fs::write(output_file, &formatted_output)
println!("=== {} ===", file); .with_context(|| format!("Failed to write to output file: {}", output_file))?;
println!("Formatted output written to: {}", output_file);
} }
io::stdout() None => {
.write_all(formatted_output.as_bytes()) io::stdout()
.with_context(|| "Failed to write to stdout")?; .write_all(formatted_output.as_bytes())
} else { .with_context(|| "Failed to write to stdout")?;
// Format in place
fs::write(file, &formatted_output)
.with_context(|| format!("Failed to write formatted content back to: {}", file))?;
if is_multiple_files || debug {
println!("Formatted: {}", file);
} }
} }
Ok(()) Ok(())
} }
fn process_lint_command(
file: Option<String>,
json: bool,
style_warnings: bool,
best_practices: bool,
performance_hints: bool,
security_warnings: bool,
modules: Option<Vec<String>>,
debug: bool,
) -> Result<()> {
let files = match file {
Some(f) => vec![f],
None => discover_nftables_files()?,
};
let is_multiple_files = files.len() > 1;
let mut error_file_count = 0;
for file_path in files {
if let Err(e) = process_single_file_lint(
&file_path,
json,
style_warnings,
best_practices,
performance_hints,
security_warnings,
modules.as_ref(),
debug,
is_multiple_files,
) {
eprintln!("Error processing {}: {}", file_path, e);
error_file_count += 1;
if !is_multiple_files {
return Err(e);
}
}
}
if error_file_count > 0 {
return Err(LintError::DiagnosticErrors {
file_count: error_file_count,
}
.into());
}
Ok(())
}
fn process_single_file_lint(
file: &str,
json: bool,
style_warnings: bool,
best_practices: bool,
performance_hints: bool,
security_warnings: bool,
modules: Option<&Vec<String>>,
debug: bool,
is_multiple_files: bool,
) -> Result<()> {
let path = Path::new(&file);
if !path.exists() {
return Err(FormatterError::FileNotFound(file.to_string()).into());
}
if !path.is_file() {
return Err(FormatterError::InvalidFile("Not a regular file".to_string()).into());
}
// Read file contents
let source =
fs::read_to_string(file).with_context(|| format!("Failed to read file: {}", file))?;
if debug {
// Tokenize for debug output
let mut lexer = NftablesLexer::new(&source);
let tokens = lexer.tokenize_with_errors();
eprintln!("=== TOKENS ===");
for (i, token) in tokens.iter().enumerate() {
eprintln!(
"{:3}: {:?} @ {:?} = '{}'",
i, token.kind, token.range, token.text
);
}
eprintln!();
// Build and validate CST
eprintln!("=== CST ===");
let cst_tree = CstBuilder::build_tree(&tokens);
match CstBuilder::validate_tree(&cst_tree) {
Ok(()) => eprintln!("CST validation passed"),
Err(e) => eprintln!("CST validation error: {}", e),
}
eprintln!();
}
// Run diagnostics
let diagnostic_config = DiagnosticConfig {
enable_style_warnings: style_warnings,
enable_best_practices: best_practices,
enable_performance_hints: performance_hints,
enable_security_warnings: security_warnings,
max_line_length: 120,
max_empty_lines: 2,
preferred_indent: None, // Don't enforce indent style in lint mode
};
let analyzer = DiagnosticAnalyzer::new(diagnostic_config);
let diagnostics = if let Some(modules) = &modules {
let module_names: Vec<&str> = modules.iter().map(|s| s.as_str()).collect();
analyzer.analyze_with_modules(&source, file, &module_names)
} else {
analyzer.analyze(&source, file)
};
if json {
// Output JSON format for tooling integration
match diagnostics.to_json() {
Ok(json) => println!("{}", json),
Err(e) => {
// Even JSON serialization errors should be in JSON format when --json is used
let error_json = format!(r#"{{"error": "JSON serialization failed: {}"}}"#, e);
println!("{}", error_json);
}
}
} else {
// Output human-readable format
if is_multiple_files {
println!("=== {} ===", file);
}
println!("{}", diagnostics.to_human_readable());
}
// Return error if there are diagnostics errors
if diagnostics.has_errors() {
return Err(anyhow::anyhow!("Diagnostics found errors in file"));
}
Ok(())
}
/// Convert parser errors to formatter errors with proper location information
fn convert_parse_error_to_formatter_error(
error: &crate::parser::ParseError,
source: &str,
tokens: &[Token],
) -> FormatterError {
use crate::parser::ParseError;
match error {
ParseError::UnexpectedToken {
line,
column,
expected,
found,
} => FormatterError::SyntaxError {
line: *line,
column: *column,
message: format!("Expected {}, found '{}'", expected, found),
suggestion: None,
},
ParseError::MissingToken { expected } => {
let (line, column) = if let Some(last_token) = tokens.last() {
position_from_range(&last_token.range, source)
} else {
(1, 1)
};
FormatterError::SyntaxError {
line,
column,
message: format!("Missing token: expected {}", expected),
suggestion: None,
}
}
ParseError::InvalidExpression { message } => {
let (line, column) = find_current_parse_position(tokens, source);
FormatterError::SyntaxError {
line,
column,
message: format!("Invalid expression: {}", message),
suggestion: None,
}
}
ParseError::InvalidStatement { message } => {
let (line, column) = find_current_parse_position(tokens, source);
FormatterError::SyntaxError {
line,
column,
message: format!("Invalid statement: {}", message),
suggestion: None,
}
}
ParseError::SemanticError { message } => {
let (line, column) = find_current_parse_position(tokens, source);
FormatterError::SyntaxError {
line,
column,
message: format!("Semantic error: {}", message),
suggestion: None,
}
}
ParseError::LexError(lex_error) => {
// Convert lexical errors to formatter errors with location
convert_lex_error_to_formatter_error(lex_error, source)
}
ParseError::AnyhowError(anyhow_error) => {
// For anyhow errors, try to extract location from error message and context
let error_msg = anyhow_error.to_string();
let (line, column) = find_error_location_from_context(&error_msg, tokens, source);
let suggestion = generate_suggestion_for_error(&error_msg);
FormatterError::SyntaxError {
line,
column,
message: error_msg,
suggestion,
}
}
}
}
/// Find the current parsing position from tokens
fn find_current_parse_position(tokens: &[Token], source: &str) -> (usize, usize) {
// Look for the last non-whitespace, non-comment token
for token in tokens.iter().rev() {
match token.kind {
TokenKind::Newline | TokenKind::CommentLine(_) => continue,
_ => return position_from_range(&token.range, source),
}
}
(1, 1) // fallback
}
/// Convert lexical errors to formatter errors
fn convert_lex_error_to_formatter_error(
lex_error: &crate::lexer::LexError,
source: &str,
) -> FormatterError {
use crate::lexer::LexError;
match lex_error {
LexError::InvalidToken { position, text } => {
let (line, column) = offset_to_line_column(*position, source);
FormatterError::SyntaxError {
line,
column,
message: format!("Invalid token: '{}'", text),
suggestion: None,
}
}
LexError::UnterminatedString { position } => {
let (line, column) = offset_to_line_column(*position, source);
FormatterError::SyntaxError {
line,
column,
message: "Unterminated string literal".to_string(),
suggestion: Some("Add closing quote".to_string()),
}
}
LexError::InvalidNumber { position, text } => {
let (line, column) = offset_to_line_column(*position, source);
FormatterError::SyntaxError {
line,
column,
message: format!("Invalid number: '{}'", text),
suggestion: Some("Check number format".to_string()),
}
}
}
}
/// Convert byte offset to line/column position
fn offset_to_line_column(offset: usize, source: &str) -> (usize, usize) {
let mut line = 1;
let mut column = 1;
for (i, ch) in source.char_indices() {
if i >= offset {
break;
}
if ch == '\n' {
line += 1;
column = 1;
} else {
column += 1;
}
}
(line, column)
}
/// Find error location from context clues in the error message
fn find_error_location_from_context(
error_msg: &str,
tokens: &[Token],
source: &str,
) -> (usize, usize) {
// Look for context clues in the error message
if error_msg.contains("Expected string or identifier, got:") {
// Find the problematic token mentioned in the error
if let Some(bad_token_text) = extract_token_from_error_message(error_msg) {
// Find this token in the token stream
for token in tokens {
if token.text == bad_token_text {
return position_from_range(&token.range, source);
}
}
}
}
// Fallback to finding last meaningful token
find_current_parse_position(tokens, source)
}
/// Extract the problematic token from error message
fn extract_token_from_error_message(error_msg: &str) -> Option<String> {
// Parse messages like "Expected string or identifier, got: {"
if let Some(got_part) = error_msg.split("got: ").nth(1) {
Some(got_part.trim().to_string())
} else {
None
}
}
/// Generate helpful suggestions based on error message
fn generate_suggestion_for_error(error_msg: &str) -> Option<String> {
if error_msg.contains("Expected string or identifier") {
Some(
"Check if you're missing quotes around a string value or have an unexpected character"
.to_string(),
)
} else if error_msg.contains("Expected") && error_msg.contains("got:") {
Some("Check syntax and ensure proper nftables structure".to_string())
} else {
None
}
}
/// Convert TextRange to line/column position
fn position_from_range(range: &text_size::TextRange, source: &str) -> (usize, usize) {
let start_offset: usize = range.start().into();
let lines: Vec<&str> = source.lines().collect();
let mut current_offset = 0;
for (line_idx, line) in lines.iter().enumerate() {
let line_end = current_offset + line.len();
if start_offset <= line_end {
let column = start_offset - current_offset;
return (line_idx + 1, column + 1); // 1-based indexing
}
current_offset = line_end + 1; // +1 for newline
}
(1, 1) // fallback
}
fn process_parse_command(file: String, tree: bool, verbose: bool, debug: bool) -> Result<()> {
let source =
fs::read_to_string(&file).with_context(|| format!("Failed to read file: {}", file))?;
// Tokenize
let mut lexer = NftablesLexer::new(&source);
let tokens = lexer
.tokenize()
.map_err(|e| FormatterError::ParseError(format!("Tokenization failed: {}", e)))?;
if debug {
eprintln!("=== TOKENS ===");
for (i, token) in tokens.iter().enumerate() {
eprintln!(
"{:3}: {:?} @ {:?} = '{}'",
i, token.kind, token.range, token.text
);
}
eprintln!();
}
// Build CST
let cst_tree = CstBuilder::build_tree(&tokens);
// Validate CST
match CstBuilder::validate_tree(&cst_tree) {
Ok(()) => {
if debug {
eprintln!("CST validation passed");
eprintln!();
}
}
Err(e) => {
eprintln!("Warning: CST validation error: {}", e);
eprintln!();
}
}
// Display CST
let cst_display = CstBuilder::display_tree(&cst_tree, tree, verbose);
println!("{}", cst_display);
Ok(())
}
fn main() -> Result<()> { fn main() -> Result<()> {
let args = Args::parse(); let args = Args::parse();
let result = match &args.command { if let Err(e) = process_nftables_config(args) {
Commands::Format { eprintln!("Error: {}", e);
file,
indent,
stdout,
optimize,
spaces,
check,
} => process_format_command(
file.clone(),
*indent,
*stdout,
*optimize,
*spaces,
*check,
args.debug,
),
Commands::Lint {
file,
json,
style_warnings,
best_practices,
performance_hints,
security_warnings,
modules,
} => process_lint_command(
file.clone(),
*json,
*style_warnings,
*best_practices,
*performance_hints,
*security_warnings,
modules.clone(),
args.debug,
),
Commands::Parse {
file,
tree,
verbose,
} => process_parse_command(file.clone(), *tree, *verbose, args.debug),
};
if let Err(e) = result { // Print the error chain
// Check if we're in lint mode with JSON output for error formatting let mut current = e.source();
let use_json = match &args.command { while let Some(cause) = current {
Commands::Lint { json, .. } => *json, eprintln!(" Caused by: {}", cause);
_ => false, current = cause.source();
};
if use_json {
// Output error in JSON format when --json flag is used in lint mode
let error_json = format!(r#"{{"error": "{}"}}"#, e);
println!("{}", error_json);
} else {
eprintln!("Error: {}", e);
// Print the error chain
let mut current = e.source();
while let Some(cause) = current {
eprintln!(" Caused by: {}", cause);
current = cause.source();
}
} }
std::process::exit(1); std::process::exit(1);

View file

@ -311,8 +311,9 @@ impl Parser {
self.advance(); // consume 'policy' self.advance(); // consume 'policy'
let policy = self.parse_policy()?; let policy = self.parse_policy()?;
chain = chain.with_policy(policy); chain = chain.with_policy(policy);
self.consume(TokenKind::Semicolon, "Expected ';' after policy")?;
} }
self.consume(TokenKind::Semicolon, "Expected ';' after policy")?;
} }
Some(TokenKind::CommentLine(_)) => { Some(TokenKind::CommentLine(_)) => {
self.advance(); self.advance();
@ -464,13 +465,7 @@ impl Parser {
fn parse_comparison_expression(&mut self) -> Result<Expression> { fn parse_comparison_expression(&mut self) -> Result<Expression> {
let mut expr = self.parse_range_expression()?; let mut expr = self.parse_range_expression()?;
// Check for operators
while let Some(token) = self.peek() { while let Some(token) = self.peek() {
if matches!(token.kind, TokenKind::Newline) {
self.advance();
continue;
}
// Check for comparison operators
let operator = match &token.kind { let operator = match &token.kind {
TokenKind::Eq => BinaryOperator::Eq, TokenKind::Eq => BinaryOperator::Eq,
TokenKind::Ne => BinaryOperator::Ne, TokenKind::Ne => BinaryOperator::Ne,
@ -478,49 +473,7 @@ impl Parser {
TokenKind::Le => BinaryOperator::Le, TokenKind::Le => BinaryOperator::Le,
TokenKind::Gt => BinaryOperator::Gt, TokenKind::Gt => BinaryOperator::Gt,
TokenKind::Ge => BinaryOperator::Ge, TokenKind::Ge => BinaryOperator::Ge,
_ => { _ => break,
// Check for vmap after an expression
if matches!(&token.kind, TokenKind::Vmap) {
self.advance(); // consume 'vmap'
// Parse the map contents
self.consume(TokenKind::LeftBrace, "Expected '{' after vmap")?;
let mut map = Vec::new();
while !self.current_token_is(&TokenKind::RightBrace) && !self.is_at_end() {
// Skip commas and newlines
if self.current_token_is(&TokenKind::Comma)
|| self.current_token_is(&TokenKind::Newline)
{
self.advance();
continue;
}
// Parse key
let key = self.parse_expression()?;
// Parse colon separator
self.consume(TokenKind::Colon, "Expected ':' in vmap key-value pair")?;
// Parse value
let value = self.parse_expression()?;
// Add the key-value pair to the map
map.push((key, value));
}
self.consume(TokenKind::RightBrace, "Expected '}' to close vmap")?;
// Return a vmap expression with the previous expression as the mapping target
expr = Expression::Vmap {
expr: Some(Box::new(expr)),
map,
};
continue; // allow the outer `while` to detect ==, != … afterwards
}
break;
}
}; };
self.advance(); // consume operator self.advance(); // consume operator
@ -800,43 +753,6 @@ impl Parser {
let addr = self.advance().unwrap().text.clone(); let addr = self.advance().unwrap().text.clone();
Ok(Expression::MacAddress(addr)) Ok(Expression::MacAddress(addr))
} }
Some(TokenKind::Vmap) => {
self.advance(); // consume 'vmap'
// Parse the map contents
self.consume(TokenKind::LeftBrace, "Expected '{' after vmap")?;
let mut map = Vec::new();
while !self.current_token_is(&TokenKind::RightBrace) && !self.is_at_end() {
// Skip commas and newlines
if self.current_token_is(&TokenKind::Comma)
|| self.current_token_is(&TokenKind::Newline)
{
self.advance();
continue;
}
// Parse key
let key = self.parse_expression()?;
// Parse colon separator
self.consume(TokenKind::Colon, "Expected ':' in vmap key-value pair")?;
// Parse value
let value = self.parse_expression()?;
// Add the key-value pair to the map
map.push((key, value));
}
self.consume(TokenKind::RightBrace, "Expected '}' to close vmap")?;
// No expression available at parse time, will be filled by post-processing if needed
let expr = None;
Ok(Expression::Vmap { expr, map })
}
Some(TokenKind::LeftBrace) => { Some(TokenKind::LeftBrace) => {
self.advance(); // consume '{' self.advance(); // consume '{'
let mut elements = Vec::new(); let mut elements = Vec::new();
@ -858,23 +774,6 @@ impl Parser {
self.consume(TokenKind::RightBrace, "Expected '}' to close set")?; self.consume(TokenKind::RightBrace, "Expected '}' to close set")?;
Ok(Expression::Set(elements)) Ok(Expression::Set(elements))
} }
Some(TokenKind::Accept) => {
self.advance();
Ok(Expression::Identifier("accept".to_string()))
}
Some(TokenKind::Drop) => {
self.advance();
Ok(Expression::Identifier("drop".to_string()))
}
Some(TokenKind::Reject) => {
self.advance();
Ok(Expression::Identifier("reject".to_string()))
}
Some(TokenKind::Protocol) => {
self.advance(); // consume 'protocol'
let protocol = self.parse_identifier_or_keyword()?;
Ok(Expression::Protocol(protocol))
}
_ => Err(ParseError::InvalidExpression { _ => Err(ParseError::InvalidExpression {
message: format!( message: format!(
"Unexpected token in expression: {}", "Unexpected token in expression: {}",

View file

@ -175,11 +175,10 @@ impl NftablesFormatter {
// Add policy on the same line if present // Add policy on the same line if present
if let Some(policy) = &chain.policy { if let Some(policy) = &chain.policy {
write!(output, " policy {}", policy).unwrap(); write!(output, " policy {}", policy).unwrap();
output.push_str(";\n");
} else {
output.push_str("\n");
} }
output.push_str(";\n");
if !chain.rules.is_empty() && !self.config.optimize { if !chain.rules.is_empty() && !self.config.optimize {
output.push('\n'); output.push('\n');
} }
@ -293,23 +292,6 @@ impl NftablesFormatter {
output.push('-'); output.push('-');
self.format_expression(output, end); self.format_expression(output, end);
} }
Expression::Vmap { expr, map } => {
if let Some(expr) = expr {
self.format_expression(output, expr);
output.push(' ');
}
output.push_str("vmap { ");
for (i, (key, value)) in map.iter().enumerate() {
if i > 0 {
output.push_str(", ");
}
self.format_expression(output, key);
output.push_str(" : ");
self.format_expression(output, value);
}
output.push_str(" }");
}
} }
} }
} }