WIP: allow using nff as a diagnostics source #1
12 changed files with 3500 additions and 293 deletions
27
.config/nextest.toml
Normal file
27
.config/nextest.toml
Normal file
|
@ -0,0 +1,27 @@
|
|||
[test-groups]
|
||||
nff = { max-threads = 1 }
|
||||
|
||||
[profile.default]
|
||||
|
||||
# "retries" defines the number of times a test should be retried. If set to a
|
||||
# non-zero value, tests that succeed on a subsequent attempt will be marked as
|
||||
# flaky. Can be overridden through the `--retries` option.
|
||||
retries = 2
|
||||
|
||||
# This will display all of fail, retry, slow
|
||||
# see https://nexte.st/book/other-options.html?highlight=failure-output#--status-level-and---final-status-level
|
||||
status-level = "skip"
|
||||
|
||||
# Treat a test that takes longer than this period as slow, and print a message.
|
||||
# Given a non-zero positive integer, shutdown the tests when the number periods
|
||||
# have passed.
|
||||
slow-timeout = { period = "30s", terminate-after = 4 }
|
||||
|
||||
# * "immediate-final": output failures as soon as they happen and at the end of
|
||||
# the test run
|
||||
failure-output = "immediate-final"
|
||||
|
||||
# Do not cancel the test run on the first failure.
|
||||
fail-fast = false
|
||||
|
||||
test-threads = 2
|
149
Cargo.lock
generated
149
Cargo.lock
generated
|
@ -2,6 +2,15 @@
|
|||
# It is not intended for manual editing.
|
||||
version = 4
|
||||
|
||||
[[package]]
|
||||
name = "aho-corasick"
|
||||
version = "1.1.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916"
|
||||
dependencies = [
|
||||
"memchr",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "anstream"
|
||||
version = "0.6.13"
|
||||
|
@ -167,6 +176,12 @@ dependencies = [
|
|||
"byteorder",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "glob"
|
||||
version = "0.3.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a8d1add55171497b4705a648c6b583acafb01d58050a51727785f0b2c8e0a2b2"
|
||||
|
||||
[[package]]
|
||||
name = "hashbrown"
|
||||
version = "0.15.3"
|
||||
|
@ -189,6 +204,12 @@ dependencies = [
|
|||
"hashbrown",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "itoa"
|
||||
version = "1.0.15"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c"
|
||||
|
||||
[[package]]
|
||||
name = "lazy_static"
|
||||
version = "1.5.0"
|
||||
|
@ -245,6 +266,12 @@ dependencies = [
|
|||
"logos-codegen",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "memchr"
|
||||
version = "2.7.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3"
|
||||
|
||||
[[package]]
|
||||
name = "nff"
|
||||
version = "0.1.0"
|
||||
|
@ -252,11 +279,37 @@ dependencies = [
|
|||
"anyhow",
|
||||
"clap",
|
||||
"cstree",
|
||||
"glob",
|
||||
"logos",
|
||||
"num_enum",
|
||||
"regex",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"text-size",
|
||||
"thiserror",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "num_enum"
|
||||
version = "0.7.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4e613fc340b2220f734a8595782c551f1250e969d87d3be1ae0579e8d4065179"
|
||||
dependencies = [
|
||||
"num_enum_derive",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "num_enum_derive"
|
||||
version = "0.7.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "af1844ef2428cc3e1cb900be36181049ef3d3193c63e43026cfe202983b27a56"
|
||||
dependencies = [
|
||||
"proc-macro-crate",
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "parking_lot"
|
||||
version = "0.12.3"
|
||||
|
@ -280,6 +333,15 @@ dependencies = [
|
|||
"windows-targets",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "proc-macro-crate"
|
||||
version = "3.3.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "edce586971a4dfaa28950c6f18ed55e0406c1ab88bbce2c6f6293a7aaba73d35"
|
||||
dependencies = [
|
||||
"toml_edit",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "proc-macro2"
|
||||
version = "1.0.95"
|
||||
|
@ -307,6 +369,29 @@ dependencies = [
|
|||
"bitflags",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "regex"
|
||||
version = "1.11.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191"
|
||||
dependencies = [
|
||||
"aho-corasick",
|
||||
"memchr",
|
||||
"regex-automata",
|
||||
"regex-syntax",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "regex-automata"
|
||||
version = "0.4.9"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "809e8dc61f6de73b46c85f4c96486310fe304c434cfa43669d7b40f711150908"
|
||||
dependencies = [
|
||||
"aho-corasick",
|
||||
"memchr",
|
||||
"regex-syntax",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "regex-syntax"
|
||||
version = "0.8.5"
|
||||
|
@ -322,6 +407,12 @@ dependencies = [
|
|||
"semver",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ryu"
|
||||
version = "1.0.20"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f"
|
||||
|
||||
[[package]]
|
||||
name = "scopeguard"
|
||||
version = "1.2.0"
|
||||
|
@ -334,6 +425,38 @@ version = "1.0.26"
|
|||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "56e6fa9c48d24d85fb3de5ad847117517440f6beceb7798af16b4a87d616b8d0"
|
||||
|
||||
[[package]]
|
||||
name = "serde"
|
||||
version = "1.0.219"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5f0e2c6ed6606019b4e29e69dbaba95b11854410e5347d525002456dbbb786b6"
|
||||
dependencies = [
|
||||
"serde_derive",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde_derive"
|
||||
version = "1.0.219"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde_json"
|
||||
version = "1.0.140"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "20068b6e96dc6c9bd23e01df8827e6c7e1f2fddd43c21810382803c136b99373"
|
||||
dependencies = [
|
||||
"itoa",
|
||||
"memchr",
|
||||
"ryu",
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "smallvec"
|
||||
version = "1.15.0"
|
||||
|
@ -395,6 +518,23 @@ dependencies = [
|
|||
"syn",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "toml_datetime"
|
||||
version = "0.6.9"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3da5db5a963e24bc68be8b17b6fa82814bb22ee8660f192bb182771d498f09a3"
|
||||
|
||||
[[package]]
|
||||
name = "toml_edit"
|
||||
version = "0.22.26"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "310068873db2c5b3e7659d2cc35d21855dbafa50d1ce336397c666e3cb08137e"
|
||||
dependencies = [
|
||||
"indexmap",
|
||||
"toml_datetime",
|
||||
"winnow",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "triomphe"
|
||||
version = "0.1.14"
|
||||
|
@ -481,3 +621,12 @@ name = "windows_x86_64_msvc"
|
|||
version = "0.52.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "32b752e52a2da0ddfbdbcc6fceadfeede4c939ed16d13e648833a61dfb611ed8"
|
||||
|
||||
[[package]]
|
||||
name = "winnow"
|
||||
version = "0.7.10"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c06928c8748d81b05c9be96aad92e1b6ff01833332f281e8cfca3be4b35fc9ec"
|
||||
dependencies = [
|
||||
"memchr",
|
||||
]
|
||||
|
|
|
@ -12,3 +12,8 @@ thiserror = "2.0"
|
|||
logos = "0.15"
|
||||
cstree = "0.12"
|
||||
text-size = "1.1"
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
regex = "1.11"
|
||||
glob = "0.3"
|
||||
num_enum = "0.7"
|
||||
|
|
451
README.md
451
README.md
|
@ -1,27 +1,33 @@
|
|||
# nff
|
||||
|
||||
This is a high performance configuration parser and written in Rust. The goal is
|
||||
to receive possibly jumbled up nftables rule files, and output ✨ pretty ✨
|
||||
human readable output in return. The main emphasis is on the syntax-aware
|
||||
formatting with comprehensive grammar support. It is a future goal to allow
|
||||
editors to hook into nff in order to format your rulesets directly from your
|
||||
editor, or as a diagnostics source.
|
||||
This is a high performance, low overhead configuration parser for nftables,
|
||||
written in Rust. Syntax-aware parsing allows nff to provide a complete formatter
|
||||
_and_ a linter for nftables. With the formatter the goal is to receive possibly
|
||||
jumbled up nftables rule files, and output ✨ pretty ✨ human readable output in
|
||||
return. The linter on another hand, will demonstrate possible syntax,
|
||||
performance or stylistic errors.
|
||||
|
||||
The main emphasis, however, is on the syntax-aware formatting with comprehensive
|
||||
grammar support.
|
||||
|
||||
## Features
|
||||
|
||||
nff is in its early stages of development. While _most_ of the syntax is
|
||||
supported, I cannot guarantee that _everything_ is supported just yet.
|
||||
> [!NOTE]
|
||||
> nff is in its early stages of development. While _most_ of the syntax is
|
||||
> supported, I cannot guarantee that _everything_ is supported just yet.
|
||||
|
||||
### Core Functionality
|
||||
|
||||
Basic functionality of nff that most users will be interested in
|
||||
|
||||
- **Syntax-aware formatting** - Deep understanding of nftables grammar with
|
||||
semantic preservation
|
||||
- **Multi-family support** - Handles `inet`, `ip`, `ip6`, `arp`, `bridge`, and
|
||||
`netdev` table families
|
||||
- **Multi-family support** - Handles `inet`, `ip`, `ip6`, `arp`, `bridge`, and
|
||||
`netdev` table families
|
||||
- **CIDR notation** - Proper handling of network addresses (`192.168.1.0/24`)
|
||||
- **Chain properties** - Hooks, priorities (including negative), policies,
|
||||
device bindings
|
||||
- **Flexible indentation** - Configurable tabs/spaces with custom depth
|
||||
- **CIDR notation** - Proper handling of network addresses (`192.168.1.0/24`)
|
||||
- **Chain properties** - Hooks, priorities (including negative), policies,
|
||||
device bindings
|
||||
|
||||
### Advanced Features
|
||||
|
||||
|
@ -30,26 +36,87 @@ supported, I cannot guarantee that _everything_ is supported just yet.
|
|||
- **Validation** - Syntax checking with precise error locations
|
||||
- **Optimization** - Configurable empty line reduction and whitespace control
|
||||
|
||||
### Diagnostics & Analysis
|
||||
|
||||
- **Comprehensive diagnostics** - Syntax, semantic, style, and best practice
|
||||
analysis
|
||||
- **Modular analysis** - Run specific diagnostic modules (`lexical`, `syntax`,
|
||||
`style`, `semantic`)
|
||||
- **LSP-compatible output** - JSON format for editor integration
|
||||
- **Human-readable reports** - Detailed error messages with context and location
|
||||
information
|
||||
- **Configurable severity** - Control which diagnostic categories to
|
||||
enable/disable
|
||||
|
||||
## Usage
|
||||
|
||||
### Formatting
|
||||
|
||||
```bash
|
||||
# Basic formatting
|
||||
nff -f /etc/nftables.conf
|
||||
# Format a specific file (in place)
|
||||
nff format /etc/nftables.conf
|
||||
|
||||
# Format all .nft files in current directory (in place)
|
||||
nff format
|
||||
|
||||
# Custom indentation (4 spaces)
|
||||
nff -f config.nft --indent spaces --spaces 4
|
||||
nff format config.nft --indent spaces --spaces 4
|
||||
|
||||
# Optimize formatting (reduce empty lines)
|
||||
nff -f config.nft --optimize
|
||||
nff format config.nft --optimize
|
||||
|
||||
# Output to file
|
||||
nff -f config.nft -o formatted.nft
|
||||
# Output to stdout instead of modifying files
|
||||
nff format config.nft --stdout
|
||||
|
||||
# Syntax validation only
|
||||
nff -f config.nft --check
|
||||
nff format config.nft --check
|
||||
|
||||
# Debug output for development (or debugging)
|
||||
nff -f config.nft --debug
|
||||
nff format config.nft --debug
|
||||
```
|
||||
|
||||
### Linting and Diagnostics
|
||||
|
||||
```bash
|
||||
# Run comprehensive diagnostics on a file
|
||||
nff lint /etc/nftables.conf
|
||||
|
||||
# Lint all .nft files in current directory
|
||||
nff lint
|
||||
|
||||
# JSON output for editor integration
|
||||
nff lint config.nft --json
|
||||
|
||||
# Run specific diagnostic modules
|
||||
nff lint config.nft --modules syntax,style
|
||||
|
||||
# Available modules: lexical, syntax, style, semantic
|
||||
nff lint config.nft --modules semantic
|
||||
|
||||
# Configure diagnostic settings (note: flags are enabled by default)
|
||||
nff lint config.nft --style-warnings=false --best-practices=false
|
||||
|
||||
# Debug output with diagnostics
|
||||
nff lint config.nft --debug
|
||||
```
|
||||
|
||||
### Parsing and CST Inspection
|
||||
|
||||
```bash
|
||||
# Parse and display CST structure for debugging
|
||||
nff parse /etc/nftables.conf
|
||||
|
||||
# Show tree structure with indentation
|
||||
nff parse config.nft --tree
|
||||
|
||||
# Show detailed node information
|
||||
nff parse config.nft --verbose
|
||||
|
||||
# Combined tree and verbose output
|
||||
nff parse config.nft --tree --verbose
|
||||
|
||||
# Debug output with tokens and CST validation
|
||||
nff parse config.nft --debug
|
||||
```
|
||||
|
||||
## Architecture
|
||||
|
@ -69,11 +136,248 @@ graph TD
|
|||
AST --> Formatter
|
||||
Formatter --> Output
|
||||
CST --> Formatter
|
||||
|
||||
Input --> Diagnostics[Diagnostic System]
|
||||
Diagnostics --> LexAnalyzer[Lexical Analyzer]
|
||||
Diagnostics --> SyntaxAnalyzer[Syntax Analyzer]
|
||||
Diagnostics --> StyleAnalyzer[Style Analyzer]
|
||||
Diagnostics --> SemanticAnalyzer[Semantic Analyzer]
|
||||
|
||||
LexAnalyzer --> DiagOutput[JSON/Human Output]
|
||||
SyntaxAnalyzer --> DiagOutput
|
||||
StyleAnalyzer --> DiagOutput
|
||||
SemanticAnalyzer --> DiagOutput
|
||||
```
|
||||
|
||||
## Installation
|
||||
|
||||
Recommended way of installing nff is to use Nix.
|
||||
Recommended way of installing nff is to use Nix. Add `nff` to your flake inputs,
|
||||
and add the package to your `environment.systemPackages`. Alternatively, on
|
||||
non-NixOS systems, it is possible to use `nix profile install` to install nff.
|
||||
|
||||
### Editor Integration
|
||||
|
||||
> [!TIP]
|
||||
> Your editor not here? Open an issue. I can only add support for editors I use
|
||||
> but pull requests documenting alternative editor setups are appreciated!
|
||||
|
||||
#### Neovim Setup
|
||||
|
||||
nff can be integrated into Neovim as a diagnostics source for nftables files.
|
||||
Here are several setup approaches:
|
||||
|
||||
##### Option 1: Using none-ls
|
||||
|
||||
none-ls is the most common method of adding diagnostics sources in Neovim. While
|
||||
I recommend using nvim-lint for its simplicity, below instructions document how
|
||||
to set up null-ls.
|
||||
|
||||
```lua
|
||||
local null_ls = require("null-ls")
|
||||
|
||||
null_ls.setup({
|
||||
sources = {
|
||||
-- nftables diagnostics
|
||||
null_ls.builtins.diagnostics.nff.with({
|
||||
command = "nff",
|
||||
args = { "lint", "$FILENAME", "--json" },
|
||||
format = "json",
|
||||
check_exit_code = false,
|
||||
filetypes = { "nftables" },
|
||||
}),
|
||||
|
||||
-- nftables formatting
|
||||
null_ls.builtins.formatting.nff.with({
|
||||
command = "nff",
|
||||
args = { "format", "$FILENAME", "--stdout" },
|
||||
filetypes = { "nftables" },
|
||||
}),
|
||||
},
|
||||
})
|
||||
```
|
||||
|
||||
##### Option 2: Using nvim-lint (recommended)
|
||||
|
||||
Recommended way of adding nff as a diagnostics source in Neovim. Simple, low
|
||||
overhead and not as error-prone as null-ls.
|
||||
|
||||
```lua
|
||||
-- ~/.config/nvim/lua/config/lint.lua
|
||||
require('lint').linters.nff = {
|
||||
cmd = 'nff',
|
||||
stdin = false,
|
||||
args = { 'lint', '%s', '--json' },
|
||||
stream = 'stdout',
|
||||
ignore_exitcode = true,
|
||||
parser = function(output)
|
||||
local diagnostics = {}
|
||||
local ok, decoded = pcall(vim.fn.json_decode, output)
|
||||
|
||||
if not ok or not decoded.diagnostics then
|
||||
return diagnostics
|
||||
end
|
||||
|
||||
for _, diagnostic in ipairs(decoded.diagnostics) do
|
||||
table.insert(diagnostics, {
|
||||
lnum = diagnostic.range.start.line,
|
||||
col = diagnostic.range.start.character,
|
||||
severity = diagnostic.severity == "Error" and vim.diagnostic.severity.ERROR or vim.diagnostic.severity.WARN,
|
||||
message = diagnostic.message,
|
||||
source = "nff",
|
||||
code = diagnostic.code,
|
||||
})
|
||||
end
|
||||
|
||||
return diagnostics
|
||||
end,
|
||||
}
|
||||
|
||||
-- Setup linting for nftables files
|
||||
vim.api.nvim_create_autocmd({ "BufEnter", "BufWritePost" }, {
|
||||
pattern = "*.nft",
|
||||
callback = function()
|
||||
require("lint").try_lint("nff")
|
||||
end,
|
||||
})
|
||||
```
|
||||
|
||||
##### Option 3: Custom Lua Function
|
||||
|
||||
Alternatively, if you don't want to use plugins, consider a setup such as this
|
||||
one to do it without any reliance on plugins:
|
||||
|
||||
```lua
|
||||
-- ~/.config/nvim/lua/nff.lua
|
||||
local M = {}
|
||||
|
||||
function M.lint_nftables()
|
||||
local filename = vim.fn.expand('%:p')
|
||||
if vim.bo.filetype ~= 'nftables' then
|
||||
return
|
||||
end
|
||||
|
||||
local cmd = { 'nff', 'lint', filename, '--json' }
|
||||
|
||||
vim.fn.jobstart(cmd, {
|
||||
stdout_buffered = true,
|
||||
on_stdout = function(_, data)
|
||||
if data then
|
||||
local output = table.concat(data, '\n')
|
||||
local ok, result = pcall(vim.fn.json_decode, output)
|
||||
|
||||
if ok and result.diagnostics then
|
||||
local diagnostics = {}
|
||||
for _, diag in ipairs(result.diagnostics) do
|
||||
table.insert(diagnostics, {
|
||||
lnum = diag.range.start.line,
|
||||
col = diag.range.start.character,
|
||||
severity = diag.severity == "Error" and vim.diagnostic.severity.ERROR or vim.diagnostic.severity.WARN,
|
||||
message = diag.message,
|
||||
source = "nff",
|
||||
})
|
||||
end
|
||||
|
||||
vim.diagnostic.set(vim.api.nvim_create_namespace('nff'), 0, diagnostics)
|
||||
end
|
||||
end
|
||||
end,
|
||||
})
|
||||
end
|
||||
|
||||
-- Auto-run on save
|
||||
vim.api.nvim_create_autocmd("BufWritePost", {
|
||||
pattern = "*.nft",
|
||||
callback = M.lint_nftables,
|
||||
})
|
||||
|
||||
return M
|
||||
```
|
||||
|
||||
## Diagnostic Categories
|
||||
|
||||
nff provides comprehensive analysis across multiple categories:
|
||||
|
||||
### Syntax Errors
|
||||
|
||||
- Parse errors with precise location information
|
||||
- Missing tokens (semicolons, braces, etc.)
|
||||
- Unexpected tokens
|
||||
- Unterminated strings
|
||||
- Invalid numbers
|
||||
|
||||
### Semantic Validation
|
||||
|
||||
- Unknown table families (`inet`, `ip`, `ip6`, etc.)
|
||||
- Invalid chain types and hooks
|
||||
- Incorrect priority values
|
||||
- Missing chain policies
|
||||
- Duplicate table/chain names
|
||||
- Invalid CIDR notation
|
||||
- Invalid port ranges
|
||||
|
||||
### Style Warnings
|
||||
|
||||
- Missing shebang line
|
||||
- Inconsistent indentation (mixed tabs/spaces)
|
||||
- Trailing whitespace
|
||||
- Lines exceeding maximum length (configurable)
|
||||
- Excessive empty lines
|
||||
- Preferred syntax alternatives
|
||||
|
||||
### Best Practices
|
||||
|
||||
- Chains without explicit policies
|
||||
- Rules without actions
|
||||
- Overly permissive rules
|
||||
- Duplicate or conflicting rules
|
||||
- Unused variables or sets
|
||||
- Deprecated syntax usage
|
||||
- Missing documentation
|
||||
- Security risks
|
||||
|
||||
### Performance Hints
|
||||
|
||||
- Inefficient rule ordering
|
||||
- Large sets without timeouts
|
||||
- Missing counters where beneficial
|
||||
|
||||
## JSON Output Format
|
||||
|
||||
When using `--json`, nff outputs LSP-compatible diagnostics:
|
||||
|
||||
```json
|
||||
{
|
||||
"diagnostics": [
|
||||
{
|
||||
"range": {
|
||||
"start": { "line": 5, "character": 10 },
|
||||
"end": { "line": 5, "character": 20 }
|
||||
},
|
||||
"severity": "Error",
|
||||
"code": "NFT001",
|
||||
"source": "nff",
|
||||
"message": "Expected ';' after policy",
|
||||
"related_information": [],
|
||||
"code_actions": [],
|
||||
"tags": []
|
||||
}
|
||||
],
|
||||
"file_path": "config.nft",
|
||||
"source_text": "..."
|
||||
}
|
||||
```
|
||||
|
||||
### Diagnostic Codes
|
||||
|
||||
nff uses structured diagnostic codes for categorization:
|
||||
|
||||
- **NFT001-NFT099**: Syntax errors
|
||||
- **NFT101-NFT199**: Semantic errors
|
||||
- **NFT201-NFT299**: Style warnings
|
||||
- **NFT301-NFT399**: Best practice recommendations
|
||||
- **NFT401-NFT499**: Performance hints
|
||||
- **NFT501-NFT599**: Formatting issues
|
||||
- **NFT601-NFT699**: nftables-specific validations
|
||||
|
||||
## Development
|
||||
|
||||
|
@ -196,8 +500,95 @@ table inet protection {
|
|||
}
|
||||
```
|
||||
|
||||
## Diagnostics Examples
|
||||
|
||||
### Error Detection
|
||||
|
||||
Input file with issues:
|
||||
|
||||
```nftables
|
||||
table inet firewall {
|
||||
chain input {
|
||||
type filter hook input priority 100
|
||||
tcp dport 22 accept
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Human-readable output:
|
||||
|
||||
```
|
||||
Found 2 issues in config.nft:
|
||||
config.nft:3:37: error: Expected ';' after policy [NFT001]
|
||||
1: table inet firewall {
|
||||
2: chain input {
|
||||
→ 3: type filter hook input priority 100
|
||||
4: tcp dport 22 accept
|
||||
5: }
|
||||
|
||||
config.nft:3:1: warning: Filter chain should have an explicit policy [NFT301]
|
||||
1: table inet firewall {
|
||||
2: chain input {
|
||||
→ 3: type filter hook input priority 100
|
||||
4: tcp dport 22 accept
|
||||
5: }
|
||||
```
|
||||
|
||||
JSON output:
|
||||
|
||||
```json
|
||||
{
|
||||
"diagnostics": [
|
||||
{
|
||||
"range": {
|
||||
"start": { "line": 2, "character": 37 },
|
||||
"end": { "line": 2, "character": 37 }
|
||||
},
|
||||
"severity": "Error",
|
||||
"code": "NFT001",
|
||||
"source": "nff",
|
||||
"message": "Expected ';' after policy"
|
||||
},
|
||||
{
|
||||
"range": {
|
||||
"start": { "line": 2, "character": 0 },
|
||||
"end": { "line": 2, "character": 37 }
|
||||
},
|
||||
"severity": "Warning",
|
||||
"code": "NFT301",
|
||||
"source": "nff",
|
||||
"message": "Filter chain should have an explicit policy"
|
||||
}
|
||||
],
|
||||
"file_path": "config.nft",
|
||||
"source_text": "..."
|
||||
}
|
||||
```
|
||||
|
||||
### Style Analysis
|
||||
|
||||
Input with style issues:
|
||||
|
||||
```nftables
|
||||
table inet test{chain input{type filter hook input priority 0;policy drop;tcp dport 22 accept;}}
|
||||
```
|
||||
|
||||
Style warnings:
|
||||
|
||||
```
|
||||
Found 3 issues in style.nft:
|
||||
style.nft:1:1: warning: Consider adding a shebang line [NFT201]
|
||||
style.nft:1:121: warning: Line too long (98 > 80 characters) [NFT205]
|
||||
style.nft:1:16: warning: Missing space after '{' [NFT503]
|
||||
```
|
||||
|
||||
## Contributing
|
||||
|
||||
### Building
|
||||
|
||||
Build with `cargo build` as usual. If you are using Nix, you will also want to
|
||||
ensure that the Nix package builds as expected.
|
||||
|
||||
### Code Style
|
||||
|
||||
- Follow `cargo fmt` formatting
|
||||
|
@ -212,11 +603,6 @@ table inet protection {
|
|||
- **Regression tests**: Known issue prevention
|
||||
- **Performance tests**: Benchmark critical paths
|
||||
|
||||
### Building
|
||||
|
||||
Build with `cargo build` as usual. If you are using Nix, you will also want to
|
||||
ensure that the Nix package builds as expected.
|
||||
|
||||
## Technical Notes
|
||||
|
||||
### CST Implementation
|
||||
|
@ -237,6 +623,17 @@ Below are the design goals of nff's architechture.
|
|||
- **Memory efficiency**: Streaming token processing where possible
|
||||
- **Grammar completeness**: Covers full nftables syntax specification
|
||||
|
||||
### Diagnostic Architecture
|
||||
|
||||
The diagnostic system uses a modular architecture with specialized analyzers:
|
||||
|
||||
- **Modular design**: Each analyzer focuses on specific concerns (lexical,
|
||||
syntax, style, semantic)
|
||||
- **Configurable analysis**: Enable/disable specific diagnostic categories
|
||||
- **LSP compatibility**: JSON output follows Language Server Protocol standards
|
||||
- **Performance optimized**: Concurrent analysis when possible
|
||||
- **Extensible**: Easy to add new diagnostic rules and categories
|
||||
|
||||
## License
|
||||
|
||||
nff is licensed under [MPL v2.0](LICENSE). See license file for more details on
|
||||
|
|
|
@ -4,6 +4,8 @@
|
|||
rustfmt,
|
||||
clippy,
|
||||
cargo,
|
||||
cargo-machete,
|
||||
cargo-nextest,
|
||||
rustPlatform,
|
||||
}:
|
||||
mkShell {
|
||||
|
@ -13,6 +15,8 @@ mkShell {
|
|||
rustfmt
|
||||
clippy
|
||||
cargo
|
||||
cargo-machete
|
||||
cargo-nextest
|
||||
];
|
||||
|
||||
RUST_SRC_PATH = "${rustPlatform.rustLibSrc}";
|
||||
|
|
|
@ -125,6 +125,12 @@ pub enum Expression {
|
|||
// Set expressions
|
||||
Set(Vec<Expression>),
|
||||
|
||||
// Vmap expressions (value maps)
|
||||
Vmap {
|
||||
expr: Option<Box<Expression>>,
|
||||
map: Vec<(Expression, Expression)>,
|
||||
},
|
||||
|
||||
// Range expressions
|
||||
Range {
|
||||
start: Box<Expression>,
|
||||
|
|
590
src/cst.rs
590
src/cst.rs
|
@ -4,250 +4,289 @@
|
|||
|
||||
use crate::lexer::{Token, TokenKind};
|
||||
use cstree::{RawSyntaxKind, green::GreenNode, util::NodeOrToken};
|
||||
use num_enum::{IntoPrimitive, TryFromPrimitive};
|
||||
use std::fmt;
|
||||
use thiserror::Error;
|
||||
|
||||
/// nftables syntax node types
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
|
||||
/// Uses `TryFromPrimitive` for safe conversion from raw values with fallback to `Error`.
|
||||
#[derive(
|
||||
Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, TryFromPrimitive, IntoPrimitive,
|
||||
)]
|
||||
#[repr(u16)]
|
||||
pub enum SyntaxKind {
|
||||
// Root and containers
|
||||
Root = 0,
|
||||
Table,
|
||||
Chain,
|
||||
Rule,
|
||||
Set,
|
||||
Map,
|
||||
Element,
|
||||
Table = 1,
|
||||
Chain = 2,
|
||||
Rule = 3,
|
||||
Set = 4,
|
||||
Map = 5,
|
||||
Element = 6,
|
||||
|
||||
// Expressions
|
||||
Expression,
|
||||
BinaryExpr,
|
||||
UnaryExpr,
|
||||
CallExpr,
|
||||
SetExpr,
|
||||
RangeExpr,
|
||||
Expression = 7,
|
||||
BinaryExpr = 8,
|
||||
UnaryExpr = 9,
|
||||
CallExpr = 10,
|
||||
SetExpr = 11,
|
||||
RangeExpr = 12,
|
||||
|
||||
// Statements
|
||||
Statement,
|
||||
IncludeStmt,
|
||||
DefineStmt,
|
||||
FlushStmt,
|
||||
AddStmt,
|
||||
DeleteStmt,
|
||||
Statement = 13,
|
||||
IncludeStmt = 14,
|
||||
DefineStmt = 15,
|
||||
FlushStmt = 16,
|
||||
AddStmt = 17,
|
||||
DeleteStmt = 18,
|
||||
|
||||
// Literals and identifiers
|
||||
Identifier,
|
||||
StringLiteral,
|
||||
NumberLiteral,
|
||||
IpAddress,
|
||||
Ipv6Address,
|
||||
MacAddress,
|
||||
Identifier = 19,
|
||||
StringLiteral = 20,
|
||||
NumberLiteral = 21,
|
||||
IpAddress = 22,
|
||||
Ipv6Address = 23,
|
||||
MacAddress = 24,
|
||||
|
||||
// Keywords
|
||||
TableKw,
|
||||
ChainKw,
|
||||
RuleKw,
|
||||
SetKw,
|
||||
MapKw,
|
||||
ElementKw,
|
||||
IncludeKw,
|
||||
DefineKw,
|
||||
FlushKw,
|
||||
AddKw,
|
||||
DeleteKw,
|
||||
InsertKw,
|
||||
ReplaceKw,
|
||||
TableKw = 25,
|
||||
ChainKw = 26,
|
||||
RuleKw = 27,
|
||||
SetKw = 28,
|
||||
MapKw = 29,
|
||||
ElementKw = 30,
|
||||
IncludeKw = 31,
|
||||
DefineKw = 32,
|
||||
FlushKw = 33,
|
||||
AddKw = 34,
|
||||
DeleteKw = 35,
|
||||
InsertKw = 36,
|
||||
ReplaceKw = 37,
|
||||
|
||||
// Chain types and hooks
|
||||
FilterKw,
|
||||
NatKw,
|
||||
RouteKw,
|
||||
InputKw,
|
||||
OutputKw,
|
||||
ForwardKw,
|
||||
PreroutingKw,
|
||||
PostroutingKw,
|
||||
FilterKw = 38,
|
||||
NatKw = 39,
|
||||
RouteKw = 40,
|
||||
InputKw = 41,
|
||||
OutputKw = 42,
|
||||
ForwardKw = 43,
|
||||
PreroutingKw = 44,
|
||||
PostroutingKw = 45,
|
||||
|
||||
// Protocols and families
|
||||
IpKw,
|
||||
Ip6Kw,
|
||||
InetKw,
|
||||
ArpKw,
|
||||
BridgeKw,
|
||||
NetdevKw,
|
||||
TcpKw,
|
||||
UdpKw,
|
||||
IcmpKw,
|
||||
Icmpv6Kw,
|
||||
IpKw = 46,
|
||||
Ip6Kw = 47,
|
||||
InetKw = 48,
|
||||
ArpKw = 49,
|
||||
BridgeKw = 50,
|
||||
NetdevKw = 51,
|
||||
TcpKw = 52,
|
||||
UdpKw = 53,
|
||||
IcmpKw = 54,
|
||||
Icmpv6Kw = 55,
|
||||
|
||||
// Match keywords
|
||||
SportKw,
|
||||
DportKw,
|
||||
SaddrKw,
|
||||
DaddrKw,
|
||||
ProtocolKw,
|
||||
NexthdrKw,
|
||||
TypeKw,
|
||||
HookKw,
|
||||
PriorityKw,
|
||||
PolicyKw,
|
||||
IifnameKw,
|
||||
OifnameKw,
|
||||
CtKw,
|
||||
StateKw,
|
||||
SportKw = 56,
|
||||
DportKw = 57,
|
||||
SaddrKw = 58,
|
||||
DaddrKw = 59,
|
||||
ProtocolKw = 60,
|
||||
NexthdrKw = 61,
|
||||
TypeKw = 62,
|
||||
HookKw = 63,
|
||||
PriorityKw = 64,
|
||||
PolicyKw = 65,
|
||||
IifnameKw = 66,
|
||||
OifnameKw = 67,
|
||||
CtKw = 68,
|
||||
StateKw = 69,
|
||||
|
||||
// Actions
|
||||
AcceptKw,
|
||||
DropKw,
|
||||
RejectKw,
|
||||
ReturnKw,
|
||||
JumpKw,
|
||||
GotoKw,
|
||||
ContinueKw,
|
||||
LogKw,
|
||||
CommentKw,
|
||||
AcceptKw = 70,
|
||||
DropKw = 71,
|
||||
RejectKw = 72,
|
||||
ReturnKw = 73,
|
||||
JumpKw = 74,
|
||||
GotoKw = 75,
|
||||
ContinueKw = 76,
|
||||
LogKw = 77,
|
||||
CommentKw = 78,
|
||||
|
||||
// States
|
||||
EstablishedKw,
|
||||
RelatedKw,
|
||||
NewKw,
|
||||
InvalidKw,
|
||||
EstablishedKw = 79,
|
||||
RelatedKw = 80,
|
||||
NewKw = 81,
|
||||
InvalidKw = 82,
|
||||
|
||||
// Operators
|
||||
EqOp,
|
||||
NeOp,
|
||||
LeOp,
|
||||
GeOp,
|
||||
LtOp,
|
||||
GtOp,
|
||||
EqOp = 83,
|
||||
NeOp = 84,
|
||||
LeOp = 85,
|
||||
GeOp = 86,
|
||||
LtOp = 87,
|
||||
GtOp = 88,
|
||||
|
||||
// Punctuation
|
||||
LeftBrace,
|
||||
RightBrace,
|
||||
LeftParen,
|
||||
RightParen,
|
||||
LeftBracket,
|
||||
RightBracket,
|
||||
Comma,
|
||||
Semicolon,
|
||||
Colon,
|
||||
Assign,
|
||||
Dash,
|
||||
Slash,
|
||||
Dot,
|
||||
LeftBrace = 89,
|
||||
RightBrace = 90,
|
||||
LeftParen = 91,
|
||||
RightParen = 92,
|
||||
LeftBracket = 93,
|
||||
RightBracket = 94,
|
||||
Comma = 95,
|
||||
Semicolon = 96,
|
||||
Colon = 97,
|
||||
Assign = 98,
|
||||
Dash = 99,
|
||||
Slash = 100,
|
||||
Dot = 101,
|
||||
|
||||
// Trivia
|
||||
Whitespace,
|
||||
Newline,
|
||||
Comment,
|
||||
Shebang,
|
||||
Whitespace = 102,
|
||||
Newline = 103,
|
||||
Comment = 104,
|
||||
Shebang = 105,
|
||||
|
||||
// Error recovery
|
||||
Error,
|
||||
Error = 106,
|
||||
|
||||
// Protocol keywords for nftables
|
||||
VmapKw = 107,
|
||||
NdRouterAdvertKw = 108,
|
||||
NdNeighborSolicitKw = 109,
|
||||
NdNeighborAdvertKw = 110,
|
||||
EchoRequestKw = 111,
|
||||
DestUnreachableKw = 112,
|
||||
RouterAdvertisementKw = 113,
|
||||
TimeExceededKw = 114,
|
||||
ParameterProblemKw = 115,
|
||||
PacketTooBigKw = 116,
|
||||
}
|
||||
|
||||
impl From<TokenKind> for SyntaxKind {
|
||||
fn from(kind: TokenKind) -> Self {
|
||||
use TokenKind::*;
|
||||
match kind {
|
||||
TokenKind::Table => SyntaxKind::TableKw,
|
||||
TokenKind::Chain => SyntaxKind::ChainKw,
|
||||
TokenKind::Rule => SyntaxKind::RuleKw,
|
||||
TokenKind::Set => SyntaxKind::SetKw,
|
||||
TokenKind::Map => SyntaxKind::MapKw,
|
||||
TokenKind::Element => SyntaxKind::ElementKw,
|
||||
TokenKind::Include => SyntaxKind::IncludeKw,
|
||||
TokenKind::Define => SyntaxKind::DefineKw,
|
||||
TokenKind::Flush => SyntaxKind::FlushKw,
|
||||
TokenKind::Add => SyntaxKind::AddKw,
|
||||
TokenKind::Delete => SyntaxKind::DeleteKw,
|
||||
TokenKind::Insert => SyntaxKind::InsertKw,
|
||||
TokenKind::Replace => SyntaxKind::ReplaceKw,
|
||||
// Keywords -> Kw variants
|
||||
Table => SyntaxKind::TableKw,
|
||||
Chain => SyntaxKind::ChainKw,
|
||||
Rule => SyntaxKind::RuleKw,
|
||||
Set => SyntaxKind::SetKw,
|
||||
Map => SyntaxKind::MapKw,
|
||||
Element => SyntaxKind::ElementKw,
|
||||
Include => SyntaxKind::IncludeKw,
|
||||
Define => SyntaxKind::DefineKw,
|
||||
Flush => SyntaxKind::FlushKw,
|
||||
Add => SyntaxKind::AddKw,
|
||||
Delete => SyntaxKind::DeleteKw,
|
||||
Insert => SyntaxKind::InsertKw,
|
||||
Replace => SyntaxKind::ReplaceKw,
|
||||
|
||||
TokenKind::Filter => SyntaxKind::FilterKw,
|
||||
TokenKind::Nat => SyntaxKind::NatKw,
|
||||
TokenKind::Route => SyntaxKind::RouteKw,
|
||||
// Chain types and hooks
|
||||
Filter => SyntaxKind::FilterKw,
|
||||
Nat => SyntaxKind::NatKw,
|
||||
Route => SyntaxKind::RouteKw,
|
||||
Input => SyntaxKind::InputKw,
|
||||
Output => SyntaxKind::OutputKw,
|
||||
Forward => SyntaxKind::ForwardKw,
|
||||
Prerouting => SyntaxKind::PreroutingKw,
|
||||
Postrouting => SyntaxKind::PostroutingKw,
|
||||
|
||||
TokenKind::Input => SyntaxKind::InputKw,
|
||||
TokenKind::Output => SyntaxKind::OutputKw,
|
||||
TokenKind::Forward => SyntaxKind::ForwardKw,
|
||||
TokenKind::Prerouting => SyntaxKind::PreroutingKw,
|
||||
TokenKind::Postrouting => SyntaxKind::PostroutingKw,
|
||||
// Protocols and families
|
||||
Ip => SyntaxKind::IpKw,
|
||||
Ip6 => SyntaxKind::Ip6Kw,
|
||||
Inet => SyntaxKind::InetKw,
|
||||
Arp => SyntaxKind::ArpKw,
|
||||
Bridge => SyntaxKind::BridgeKw,
|
||||
Netdev => SyntaxKind::NetdevKw,
|
||||
Tcp => SyntaxKind::TcpKw,
|
||||
Udp => SyntaxKind::UdpKw,
|
||||
Icmp => SyntaxKind::IcmpKw,
|
||||
Icmpv6 => SyntaxKind::Icmpv6Kw,
|
||||
|
||||
TokenKind::Ip => SyntaxKind::IpKw,
|
||||
TokenKind::Ip6 => SyntaxKind::Ip6Kw,
|
||||
TokenKind::Inet => SyntaxKind::InetKw,
|
||||
TokenKind::Arp => SyntaxKind::ArpKw,
|
||||
TokenKind::Bridge => SyntaxKind::BridgeKw,
|
||||
TokenKind::Netdev => SyntaxKind::NetdevKw,
|
||||
TokenKind::Tcp => SyntaxKind::TcpKw,
|
||||
TokenKind::Udp => SyntaxKind::UdpKw,
|
||||
TokenKind::Icmp => SyntaxKind::IcmpKw,
|
||||
TokenKind::Icmpv6 => SyntaxKind::Icmpv6Kw,
|
||||
// Match keywords
|
||||
Sport => SyntaxKind::SportKw,
|
||||
Dport => SyntaxKind::DportKw,
|
||||
Saddr => SyntaxKind::SaddrKw,
|
||||
Daddr => SyntaxKind::DaddrKw,
|
||||
Protocol => SyntaxKind::ProtocolKw,
|
||||
Nexthdr => SyntaxKind::NexthdrKw,
|
||||
Type => SyntaxKind::TypeKw,
|
||||
Hook => SyntaxKind::HookKw,
|
||||
Priority => SyntaxKind::PriorityKw,
|
||||
Policy => SyntaxKind::PolicyKw,
|
||||
Iifname => SyntaxKind::IifnameKw,
|
||||
Oifname => SyntaxKind::OifnameKw,
|
||||
Ct => SyntaxKind::CtKw,
|
||||
State => SyntaxKind::StateKw,
|
||||
|
||||
TokenKind::Sport => SyntaxKind::SportKw,
|
||||
TokenKind::Dport => SyntaxKind::DportKw,
|
||||
TokenKind::Saddr => SyntaxKind::SaddrKw,
|
||||
TokenKind::Daddr => SyntaxKind::DaddrKw,
|
||||
TokenKind::Protocol => SyntaxKind::ProtocolKw,
|
||||
TokenKind::Nexthdr => SyntaxKind::NexthdrKw,
|
||||
TokenKind::Type => SyntaxKind::TypeKw,
|
||||
TokenKind::Hook => SyntaxKind::HookKw,
|
||||
TokenKind::Priority => SyntaxKind::PriorityKw,
|
||||
TokenKind::Policy => SyntaxKind::PolicyKw,
|
||||
TokenKind::Iifname => SyntaxKind::IifnameKw,
|
||||
TokenKind::Oifname => SyntaxKind::OifnameKw,
|
||||
TokenKind::Ct => SyntaxKind::CtKw,
|
||||
TokenKind::State => SyntaxKind::StateKw,
|
||||
// Actions
|
||||
Accept => SyntaxKind::AcceptKw,
|
||||
Drop => SyntaxKind::DropKw,
|
||||
Reject => SyntaxKind::RejectKw,
|
||||
Return => SyntaxKind::ReturnKw,
|
||||
Jump => SyntaxKind::JumpKw,
|
||||
Goto => SyntaxKind::GotoKw,
|
||||
Continue => SyntaxKind::ContinueKw,
|
||||
Log => SyntaxKind::LogKw,
|
||||
Comment => SyntaxKind::CommentKw,
|
||||
|
||||
TokenKind::Accept => SyntaxKind::AcceptKw,
|
||||
TokenKind::Drop => SyntaxKind::DropKw,
|
||||
TokenKind::Reject => SyntaxKind::RejectKw,
|
||||
TokenKind::Return => SyntaxKind::ReturnKw,
|
||||
TokenKind::Jump => SyntaxKind::JumpKw,
|
||||
TokenKind::Goto => SyntaxKind::GotoKw,
|
||||
TokenKind::Continue => SyntaxKind::ContinueKw,
|
||||
TokenKind::Log => SyntaxKind::LogKw,
|
||||
TokenKind::Comment => SyntaxKind::CommentKw,
|
||||
// States
|
||||
Established => SyntaxKind::EstablishedKw,
|
||||
Related => SyntaxKind::RelatedKw,
|
||||
New => SyntaxKind::NewKw,
|
||||
Invalid => SyntaxKind::InvalidKw,
|
||||
|
||||
TokenKind::Established => SyntaxKind::EstablishedKw,
|
||||
TokenKind::Related => SyntaxKind::RelatedKw,
|
||||
TokenKind::New => SyntaxKind::NewKw,
|
||||
TokenKind::Invalid => SyntaxKind::InvalidKw,
|
||||
// Protocol keywords for ICMP/ICMPv6
|
||||
Vmap => SyntaxKind::VmapKw,
|
||||
NdRouterAdvert => SyntaxKind::NdRouterAdvertKw,
|
||||
NdNeighborSolicit => SyntaxKind::NdNeighborSolicitKw,
|
||||
NdNeighborAdvert => SyntaxKind::NdNeighborAdvertKw,
|
||||
EchoRequest => SyntaxKind::EchoRequestKw,
|
||||
DestUnreachable => SyntaxKind::DestUnreachableKw,
|
||||
RouterAdvertisement => SyntaxKind::RouterAdvertisementKw,
|
||||
TimeExceeded => SyntaxKind::TimeExceededKw,
|
||||
ParameterProblem => SyntaxKind::ParameterProblemKw,
|
||||
PacketTooBig => SyntaxKind::PacketTooBigKw,
|
||||
|
||||
TokenKind::Eq => SyntaxKind::EqOp,
|
||||
TokenKind::Ne => SyntaxKind::NeOp,
|
||||
TokenKind::Le => SyntaxKind::LeOp,
|
||||
TokenKind::Ge => SyntaxKind::GeOp,
|
||||
TokenKind::Lt => SyntaxKind::LtOp,
|
||||
TokenKind::Gt => SyntaxKind::GtOp,
|
||||
// Operators - direct mapping
|
||||
Eq => SyntaxKind::EqOp,
|
||||
Ne => SyntaxKind::NeOp,
|
||||
Le => SyntaxKind::LeOp,
|
||||
Ge => SyntaxKind::GeOp,
|
||||
Lt => SyntaxKind::LtOp,
|
||||
Gt => SyntaxKind::GtOp,
|
||||
|
||||
TokenKind::LeftBrace => SyntaxKind::LeftBrace,
|
||||
TokenKind::RightBrace => SyntaxKind::RightBrace,
|
||||
TokenKind::LeftParen => SyntaxKind::LeftParen,
|
||||
TokenKind::RightParen => SyntaxKind::RightParen,
|
||||
TokenKind::LeftBracket => SyntaxKind::LeftBracket,
|
||||
TokenKind::RightBracket => SyntaxKind::RightBracket,
|
||||
TokenKind::Comma => SyntaxKind::Comma,
|
||||
TokenKind::Semicolon => SyntaxKind::Semicolon,
|
||||
TokenKind::Colon => SyntaxKind::Colon,
|
||||
TokenKind::Assign => SyntaxKind::Assign,
|
||||
TokenKind::Dash => SyntaxKind::Dash,
|
||||
TokenKind::Slash => SyntaxKind::Slash,
|
||||
TokenKind::Dot => SyntaxKind::Dot,
|
||||
// Punctuation - direct mapping
|
||||
LeftBrace => SyntaxKind::LeftBrace,
|
||||
RightBrace => SyntaxKind::RightBrace,
|
||||
LeftParen => SyntaxKind::LeftParen,
|
||||
RightParen => SyntaxKind::RightParen,
|
||||
LeftBracket => SyntaxKind::LeftBracket,
|
||||
RightBracket => SyntaxKind::RightBracket,
|
||||
Comma => SyntaxKind::Comma,
|
||||
Semicolon => SyntaxKind::Semicolon,
|
||||
Colon => SyntaxKind::Colon,
|
||||
Assign => SyntaxKind::Assign,
|
||||
Dash => SyntaxKind::Dash,
|
||||
Slash => SyntaxKind::Slash,
|
||||
Dot => SyntaxKind::Dot,
|
||||
|
||||
TokenKind::StringLiteral(_) => SyntaxKind::StringLiteral,
|
||||
TokenKind::NumberLiteral(_) => SyntaxKind::NumberLiteral,
|
||||
TokenKind::IpAddress(_) => SyntaxKind::IpAddress,
|
||||
TokenKind::Ipv6Address(_) => SyntaxKind::Ipv6Address,
|
||||
TokenKind::MacAddress(_) => SyntaxKind::MacAddress,
|
||||
TokenKind::Identifier(_) => SyntaxKind::Identifier,
|
||||
// Literals - map data-carrying variants to their types
|
||||
StringLiteral(_) => SyntaxKind::StringLiteral,
|
||||
NumberLiteral(_) => SyntaxKind::NumberLiteral,
|
||||
IpAddress(_) => SyntaxKind::IpAddress,
|
||||
Ipv6Address(_) => SyntaxKind::Ipv6Address,
|
||||
MacAddress(_) => SyntaxKind::MacAddress,
|
||||
Identifier(_) => SyntaxKind::Identifier,
|
||||
|
||||
TokenKind::Newline => SyntaxKind::Newline,
|
||||
TokenKind::CommentLine(_) => SyntaxKind::Comment,
|
||||
TokenKind::Shebang(_) => SyntaxKind::Shebang,
|
||||
// Special tokens
|
||||
Newline => SyntaxKind::Newline,
|
||||
CommentLine(_) => SyntaxKind::Comment,
|
||||
Shebang(_) => SyntaxKind::Shebang,
|
||||
|
||||
TokenKind::Error => SyntaxKind::Error,
|
||||
// Error fallback
|
||||
Error => SyntaxKind::Error,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -301,7 +340,7 @@ impl SyntaxKind {
|
|||
}
|
||||
|
||||
pub fn from_raw(raw: RawSyntaxKind) -> Self {
|
||||
unsafe { std::mem::transmute(raw.0 as u16) }
|
||||
Self::try_from(raw.0 as u16).unwrap_or(SyntaxKind::Error)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -319,7 +358,7 @@ pub enum CstError {
|
|||
/// Result type for CST operations
|
||||
pub type CstResult<T> = Result<T, CstError>;
|
||||
|
||||
/// Basic CST builder
|
||||
/// CST builder for nftables syntax
|
||||
pub struct CstBuilder;
|
||||
|
||||
impl CstBuilder {
|
||||
|
@ -342,6 +381,79 @@ impl CstBuilder {
|
|||
Self::validate_tree(&tree)?;
|
||||
Ok(tree)
|
||||
}
|
||||
|
||||
/// Display CST in a human-readable format for debugging
|
||||
pub fn display_tree(node: &GreenNode, tree_format: bool, verbose: bool) -> String {
|
||||
let mut output = String::new();
|
||||
Self::display_node_recursive(node, 0, tree_format, verbose, &mut output);
|
||||
output
|
||||
}
|
||||
|
||||
fn display_node_recursive(
|
||||
node: &GreenNode,
|
||||
indent_level: usize,
|
||||
tree_format: bool,
|
||||
verbose: bool,
|
||||
output: &mut String,
|
||||
) {
|
||||
let kind = SyntaxKind::from_raw(node.kind());
|
||||
let indent = if tree_format {
|
||||
" ".repeat(indent_level)
|
||||
} else {
|
||||
String::new()
|
||||
};
|
||||
|
||||
if tree_format {
|
||||
output.push_str(&format!("{}├─ {}", indent, kind));
|
||||
} else {
|
||||
output.push_str(&format!("{}{}", indent, kind));
|
||||
}
|
||||
|
||||
if verbose {
|
||||
output.push_str(&format!(
|
||||
" (kind: {:?}, width: {:?})",
|
||||
node.kind(),
|
||||
node.text_len()
|
||||
));
|
||||
}
|
||||
|
||||
output.push('\n');
|
||||
|
||||
// Display children
|
||||
for child in node.children() {
|
||||
match child {
|
||||
NodeOrToken::Node(child_node) => {
|
||||
Self::display_node_recursive(
|
||||
child_node,
|
||||
indent_level + 1,
|
||||
tree_format,
|
||||
verbose,
|
||||
output,
|
||||
);
|
||||
}
|
||||
NodeOrToken::Token(token) => {
|
||||
let token_indent = if tree_format {
|
||||
" ".repeat(indent_level + 1)
|
||||
} else {
|
||||
String::new()
|
||||
};
|
||||
|
||||
let token_kind = SyntaxKind::from_raw(token.kind());
|
||||
if tree_format {
|
||||
output.push_str(&format!("{}├─ {}", token_indent, token_kind));
|
||||
} else {
|
||||
output.push_str(&format!("{}{}", token_indent, token_kind));
|
||||
}
|
||||
|
||||
if verbose {
|
||||
output.push_str(&format!(" (width: {:?})", token.text_len()));
|
||||
}
|
||||
|
||||
output.push('\n');
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Internal tree builder that constructs CST according to nftables grammar
|
||||
|
@ -1208,7 +1320,7 @@ mod tests {
|
|||
let mut lexer = NftablesLexer::new(source);
|
||||
let tokens = lexer.tokenize().expect("Tokenization should succeed");
|
||||
|
||||
// CST is now implemented - test that it works
|
||||
// Test CST construction with basic table syntax
|
||||
let green_tree = CstBuilder::build_tree(&tokens);
|
||||
|
||||
// Verify the tree was created successfully
|
||||
|
@ -1225,4 +1337,88 @@ mod tests {
|
|||
let cst_result = CstBuilder::parse_to_cst(&tokens);
|
||||
assert!(cst_result.is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_num_enum_improvements() {
|
||||
// Test that from_raw uses num_enum for conversion
|
||||
// Invalid values fall back to Error variant
|
||||
|
||||
// Test valid conversions
|
||||
assert_eq!(SyntaxKind::from_raw(RawSyntaxKind(0)), SyntaxKind::Root);
|
||||
assert_eq!(SyntaxKind::from_raw(RawSyntaxKind(1)), SyntaxKind::Table);
|
||||
assert_eq!(SyntaxKind::from_raw(RawSyntaxKind(25)), SyntaxKind::TableKw);
|
||||
assert_eq!(SyntaxKind::from_raw(RawSyntaxKind(106)), SyntaxKind::Error);
|
||||
assert_eq!(
|
||||
SyntaxKind::from_raw(RawSyntaxKind(116)),
|
||||
SyntaxKind::PacketTooBigKw
|
||||
);
|
||||
|
||||
// Test invalid values automatically fall back to Error
|
||||
assert_eq!(SyntaxKind::from_raw(RawSyntaxKind(999)), SyntaxKind::Error);
|
||||
assert_eq!(SyntaxKind::from_raw(RawSyntaxKind(1000)), SyntaxKind::Error);
|
||||
|
||||
// Test bidirectional conversion
|
||||
for variant in [
|
||||
SyntaxKind::Root,
|
||||
SyntaxKind::Table,
|
||||
SyntaxKind::TableKw,
|
||||
SyntaxKind::Error,
|
||||
SyntaxKind::PacketTooBigKw,
|
||||
] {
|
||||
let raw = variant.to_raw();
|
||||
let converted_back = SyntaxKind::from_raw(raw);
|
||||
assert_eq!(variant, converted_back);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_token_kind_conversion_improvements() {
|
||||
// Test that From<TokenKind> conversion is complete and correct
|
||||
use crate::lexer::TokenKind;
|
||||
|
||||
// Test keyword mappings
|
||||
assert_eq!(SyntaxKind::from(TokenKind::Table), SyntaxKind::TableKw);
|
||||
assert_eq!(SyntaxKind::from(TokenKind::Chain), SyntaxKind::ChainKw);
|
||||
assert_eq!(SyntaxKind::from(TokenKind::Accept), SyntaxKind::AcceptKw);
|
||||
|
||||
// Test operators
|
||||
assert_eq!(SyntaxKind::from(TokenKind::Eq), SyntaxKind::EqOp);
|
||||
assert_eq!(SyntaxKind::from(TokenKind::Lt), SyntaxKind::LtOp);
|
||||
|
||||
// Test punctuation
|
||||
assert_eq!(
|
||||
SyntaxKind::from(TokenKind::LeftBrace),
|
||||
SyntaxKind::LeftBrace
|
||||
);
|
||||
assert_eq!(
|
||||
SyntaxKind::from(TokenKind::Semicolon),
|
||||
SyntaxKind::Semicolon
|
||||
);
|
||||
|
||||
// Test literals (with data)
|
||||
assert_eq!(
|
||||
SyntaxKind::from(TokenKind::StringLiteral("test".to_string())),
|
||||
SyntaxKind::StringLiteral
|
||||
);
|
||||
assert_eq!(
|
||||
SyntaxKind::from(TokenKind::NumberLiteral(42)),
|
||||
SyntaxKind::NumberLiteral
|
||||
);
|
||||
assert_eq!(
|
||||
SyntaxKind::from(TokenKind::IpAddress("192.168.1.1".to_string())),
|
||||
SyntaxKind::IpAddress
|
||||
);
|
||||
assert_eq!(
|
||||
SyntaxKind::from(TokenKind::Identifier("test".to_string())),
|
||||
SyntaxKind::Identifier
|
||||
);
|
||||
|
||||
// Test special tokens
|
||||
assert_eq!(SyntaxKind::from(TokenKind::Newline), SyntaxKind::Newline);
|
||||
assert_eq!(
|
||||
SyntaxKind::from(TokenKind::CommentLine("# comment".to_string())),
|
||||
SyntaxKind::Comment
|
||||
);
|
||||
assert_eq!(SyntaxKind::from(TokenKind::Error), SyntaxKind::Error);
|
||||
}
|
||||
}
|
||||
|
|
1622
src/diagnostic.rs
Normal file
1622
src/diagnostic.rs
Normal file
File diff suppressed because it is too large
Load diff
61
src/lexer.rs
61
src/lexer.rs
|
@ -10,8 +10,8 @@ pub enum LexError {
|
|||
InvalidToken { position: usize, text: String },
|
||||
#[error("Unterminated string literal starting at position {position}")]
|
||||
UnterminatedString { position: usize },
|
||||
#[error("Invalid numeric literal: {text}")]
|
||||
InvalidNumber { text: String },
|
||||
#[error("Invalid numeric literal at position {position}: {text}")]
|
||||
InvalidNumber { position: usize, text: String },
|
||||
}
|
||||
|
||||
/// Result type for lexical analysis
|
||||
|
@ -129,6 +129,28 @@ pub enum TokenKind {
|
|||
#[token("new")]
|
||||
New,
|
||||
|
||||
// Additional protocol keywords
|
||||
#[token("vmap")]
|
||||
Vmap,
|
||||
#[token("nd-router-advert")]
|
||||
NdRouterAdvert,
|
||||
#[token("nd-neighbor-solicit")]
|
||||
NdNeighborSolicit,
|
||||
#[token("nd-neighbor-advert")]
|
||||
NdNeighborAdvert,
|
||||
#[token("echo-request")]
|
||||
EchoRequest,
|
||||
#[token("destination-unreachable")]
|
||||
DestUnreachable,
|
||||
#[token("router-advertisement")]
|
||||
RouterAdvertisement,
|
||||
#[token("time-exceeded")]
|
||||
TimeExceeded,
|
||||
#[token("parameter-problem")]
|
||||
ParameterProblem,
|
||||
#[token("packet-too-big")]
|
||||
PacketTooBig,
|
||||
|
||||
// Actions
|
||||
#[token("accept")]
|
||||
Accept,
|
||||
|
@ -334,6 +356,7 @@ impl<'a> NftablesLexer<'a> {
|
|||
.any(|c| !c.is_ascii_digit() && c != '.' && c != 'x' && c != 'X')
|
||||
{
|
||||
return Err(LexError::InvalidNumber {
|
||||
position: span.start,
|
||||
text: text.to_owned(),
|
||||
});
|
||||
} else {
|
||||
|
@ -426,4 +449,38 @@ mod tests {
|
|||
panic!("Expected InvalidToken error");
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_invalid_number_with_position() {
|
||||
// Test that we can create a proper diagnostic with position information
|
||||
use crate::diagnostic::LexicalAnalyzer;
|
||||
|
||||
// Create a source with the same invalid pattern at different positions
|
||||
let source = "123abc normal 123abc end";
|
||||
|
||||
// Since normal tokenization splits "123abc" into "123" + "abc",
|
||||
// let's test the diagnostic creation directly with a mock error
|
||||
let error1 = LexError::InvalidNumber {
|
||||
position: 0,
|
||||
text: "123abc".to_string(),
|
||||
};
|
||||
let error2 = LexError::InvalidNumber {
|
||||
position: 14,
|
||||
text: "123abc".to_string(),
|
||||
};
|
||||
|
||||
// Test that diagnostics are created with correct positions
|
||||
let diagnostic1 = LexicalAnalyzer::lex_error_to_diagnostic(&error1, source);
|
||||
let diagnostic2 = LexicalAnalyzer::lex_error_to_diagnostic(&error2, source);
|
||||
|
||||
// First occurrence should be at position 0
|
||||
assert_eq!(diagnostic1.range.start.line, 0);
|
||||
assert_eq!(diagnostic1.range.start.character, 0);
|
||||
assert_eq!(diagnostic1.message, "Invalid number: '123abc'");
|
||||
|
||||
// Second occurrence should be at position 14 (not 0)
|
||||
assert_eq!(diagnostic2.range.start.line, 0);
|
||||
assert_eq!(diagnostic2.range.start.character, 14);
|
||||
assert_eq!(diagnostic2.message, "Invalid number: '123abc'");
|
||||
}
|
||||
}
|
||||
|
|
749
src/main.rs
749
src/main.rs
|
@ -1,18 +1,21 @@
|
|||
mod ast;
|
||||
mod cst;
|
||||
mod diagnostic;
|
||||
mod lexer;
|
||||
mod parser;
|
||||
mod syntax;
|
||||
|
||||
use anyhow::{Context, Result};
|
||||
use clap::Parser;
|
||||
use clap::{Parser, Subcommand};
|
||||
use glob::glob;
|
||||
use std::fs;
|
||||
use std::io::{self, Write};
|
||||
use std::path::Path;
|
||||
use thiserror::Error;
|
||||
|
||||
use crate::cst::CstBuilder;
|
||||
use crate::lexer::NftablesLexer;
|
||||
use crate::diagnostic::{DiagnosticAnalyzer, DiagnosticConfig};
|
||||
use crate::lexer::{NftablesLexer, Token, TokenKind};
|
||||
use crate::parser::Parser as NftablesParser;
|
||||
use crate::syntax::{FormatConfig, IndentStyle, NftablesFormatter};
|
||||
|
||||
|
@ -24,51 +27,213 @@ enum FormatterError {
|
|||
InvalidFile(String),
|
||||
#[error("Parse error: {0}")]
|
||||
ParseError(String),
|
||||
#[error("Syntax error at line {line}, column {column}: {message}")]
|
||||
SyntaxError {
|
||||
line: usize,
|
||||
column: usize,
|
||||
message: String,
|
||||
suggestion: Option<String>,
|
||||
},
|
||||
#[error("IO error: {0}")]
|
||||
Io(#[from] io::Error),
|
||||
}
|
||||
|
||||
#[derive(Parser, Debug)]
|
||||
#[derive(Error, Debug)]
|
||||
enum LintError {
|
||||
#[error("Lint errors found in {file_count} file(s)")]
|
||||
DiagnosticErrors { file_count: usize },
|
||||
#[error("File discovery error: {0}")]
|
||||
FileDiscovery(#[from] anyhow::Error),
|
||||
}
|
||||
|
||||
#[derive(Parser, Debug, Clone)]
|
||||
#[command(
|
||||
name = "nff",
|
||||
version = "0.1.0",
|
||||
about = "A high-quality nftables formatter and beautifier",
|
||||
long_about = "nff (nftables formatter) is a tool for formatting and beautifying nftables configuration files with proper indentation and structure."
|
||||
about = "A high-quality nftables formatter and linter",
|
||||
long_about = "nff (nftables formatter) is a tool for formatting and linting nftables configuration files with proper indentation and structure."
|
||||
)]
|
||||
struct Args {
|
||||
/// nftables config file (e.g: /etc/nftables.conf)
|
||||
#[arg(short, long, value_name = "FILE")]
|
||||
file: String,
|
||||
|
||||
/// Type of indentation
|
||||
#[arg(short, long, default_value = "tabs", value_parser = clap::value_parser!(IndentStyle))]
|
||||
indent: IndentStyle,
|
||||
|
||||
/// Output file (writes to stdout if not specified)
|
||||
#[arg(short, long, value_name = "FILE")]
|
||||
output: Option<String>,
|
||||
|
||||
/// Optimize output by removing excessive empty lines
|
||||
#[arg(long)]
|
||||
optimize: bool,
|
||||
|
||||
/// Number of spaces per indentation level (only used with --indent=spaces)
|
||||
#[arg(long, default_value = "2", value_name = "N")]
|
||||
spaces: usize,
|
||||
#[command(subcommand)]
|
||||
command: Commands,
|
||||
|
||||
/// Show debug information (tokens, AST, etc.)
|
||||
#[arg(long)]
|
||||
#[arg(long, global = true)]
|
||||
debug: bool,
|
||||
|
||||
/// Check syntax only, don't format
|
||||
#[arg(long)]
|
||||
check: bool,
|
||||
}
|
||||
|
||||
fn process_nftables_config(args: Args) -> Result<()> {
|
||||
let path = Path::new(&args.file);
|
||||
#[derive(Subcommand, Debug, Clone)]
|
||||
enum Commands {
|
||||
/// Format nftables configuration files
|
||||
Format {
|
||||
/// nftables config file (e.g: /etc/nftables.conf). If not provided, formats all .nft files in current directory
|
||||
#[arg(value_name = "FILE")]
|
||||
file: Option<String>,
|
||||
|
||||
/// Type of indentation
|
||||
#[arg(short, long, default_value = "tabs", value_parser = clap::value_parser!(IndentStyle))]
|
||||
indent: IndentStyle,
|
||||
|
||||
/// Print formatted output to stdout instead of modifying files in place
|
||||
#[arg(long)]
|
||||
stdout: bool,
|
||||
|
||||
/// Optimize output by removing excessive empty lines
|
||||
#[arg(long)]
|
||||
optimize: bool,
|
||||
|
||||
/// Number of spaces per indentation level (only used with --indent=spaces)
|
||||
#[arg(long, default_value = "2", value_name = "N")]
|
||||
spaces: usize,
|
||||
|
||||
/// Check syntax only, don't format
|
||||
#[arg(long)]
|
||||
check: bool,
|
||||
},
|
||||
/// Lint nftables configuration files and show diagnostics
|
||||
Lint {
|
||||
/// nftables config file (e.g: /etc/nftables.conf). If not provided, lints all .nft files in current directory
|
||||
#[arg(value_name = "FILE")]
|
||||
file: Option<String>,
|
||||
|
||||
/// Output diagnostics in JSON format (useful for tooling integration)
|
||||
#[arg(long)]
|
||||
json: bool,
|
||||
|
||||
/// Include style warnings in diagnostics
|
||||
#[arg(long, action = clap::ArgAction::Set, default_value = "true")]
|
||||
style_warnings: bool,
|
||||
|
||||
/// Include best practice recommendations in diagnostics
|
||||
#[arg(long, action = clap::ArgAction::Set, default_value = "true")]
|
||||
best_practices: bool,
|
||||
|
||||
/// Include performance hints in diagnostics
|
||||
#[arg(long, action = clap::ArgAction::Set, default_value = "true")]
|
||||
performance_hints: bool,
|
||||
|
||||
/// Include security warnings in diagnostics
|
||||
#[arg(long, action = clap::ArgAction::Set, default_value = "true")]
|
||||
security_warnings: bool,
|
||||
|
||||
/// Diagnostic modules to run (comma-separated: lexical,syntax,style,semantic)
|
||||
#[arg(long, value_delimiter = ',')]
|
||||
modules: Option<Vec<String>>,
|
||||
},
|
||||
/// Parse and display file in CST format for debugging
|
||||
Parse {
|
||||
/// nftables config file to parse
|
||||
#[arg(value_name = "FILE")]
|
||||
file: String,
|
||||
|
||||
/// Show tree structure with indentation
|
||||
#[arg(long)]
|
||||
tree: bool,
|
||||
|
||||
/// Show detailed node information
|
||||
#[arg(long)]
|
||||
verbose: bool,
|
||||
},
|
||||
}
|
||||
|
||||
fn discover_nftables_files() -> Result<Vec<String>> {
|
||||
let mut files = Vec::new();
|
||||
|
||||
// Common nftables file patterns
|
||||
let patterns = [
|
||||
"*.nft",
|
||||
"*.nftables",
|
||||
"/etc/nftables.conf",
|
||||
"/etc/nftables/*.nft",
|
||||
];
|
||||
|
||||
for pattern in &patterns {
|
||||
match glob(pattern) {
|
||||
Ok(paths) => {
|
||||
for entry in paths {
|
||||
match entry {
|
||||
Ok(path) => {
|
||||
if path.is_file() {
|
||||
if let Some(path_str) = path.to_str() {
|
||||
files.push(path_str.to_string());
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(e) => eprintln!("Warning: Error reading path: {}", e),
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
// Only warn for non-current directory patterns
|
||||
if !pattern.starts_with("*.") {
|
||||
eprintln!("Warning: Failed to search pattern {}: {}", pattern, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if files.is_empty() {
|
||||
return Err(anyhow::anyhow!(
|
||||
"No nftables files found. Please specify a file explicitly or ensure .nft/.nftables files exist in the current directory."
|
||||
));
|
||||
}
|
||||
|
||||
// Remove duplicates and sort
|
||||
files.sort();
|
||||
files.dedup();
|
||||
|
||||
Ok(files)
|
||||
}
|
||||
|
||||
fn process_format_command(
|
||||
file: Option<String>,
|
||||
indent: IndentStyle,
|
||||
stdout: bool,
|
||||
optimize: bool,
|
||||
spaces: usize,
|
||||
check: bool,
|
||||
debug: bool,
|
||||
) -> Result<()> {
|
||||
let files = match file {
|
||||
Some(f) => vec![f],
|
||||
None => discover_nftables_files()?,
|
||||
};
|
||||
|
||||
let is_multiple_files = files.len() > 1;
|
||||
for file_path in files {
|
||||
if let Err(e) = process_single_file_format(
|
||||
&file_path,
|
||||
indent,
|
||||
stdout,
|
||||
optimize,
|
||||
spaces,
|
||||
check,
|
||||
debug,
|
||||
is_multiple_files,
|
||||
) {
|
||||
eprintln!("Error processing {}: {}", file_path, e);
|
||||
if !is_multiple_files {
|
||||
return Err(e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn process_single_file_format(
|
||||
file: &str,
|
||||
indent: IndentStyle,
|
||||
stdout: bool,
|
||||
optimize: bool,
|
||||
spaces: usize,
|
||||
check: bool,
|
||||
debug: bool,
|
||||
is_multiple_files: bool,
|
||||
) -> Result<()> {
|
||||
let path = Path::new(&file);
|
||||
if !path.exists() {
|
||||
return Err(FormatterError::FileNotFound(args.file).into());
|
||||
return Err(FormatterError::FileNotFound(file.to_string()).into());
|
||||
}
|
||||
|
||||
if !path.is_file() {
|
||||
|
@ -76,12 +241,12 @@ fn process_nftables_config(args: Args) -> Result<()> {
|
|||
}
|
||||
|
||||
// Read file contents
|
||||
let source = fs::read_to_string(&args.file)
|
||||
.with_context(|| format!("Failed to read file: {}", args.file))?;
|
||||
let source =
|
||||
fs::read_to_string(file).with_context(|| format!("Failed to read file: {}", file))?;
|
||||
|
||||
// Tokenize
|
||||
let mut lexer = NftablesLexer::new(&source);
|
||||
let tokens = if args.debug {
|
||||
let tokens = if debug {
|
||||
// Use error-recovery tokenization for debug mode
|
||||
lexer.tokenize_with_errors()
|
||||
} else {
|
||||
|
@ -90,7 +255,7 @@ fn process_nftables_config(args: Args) -> Result<()> {
|
|||
.map_err(|e| FormatterError::ParseError(e.to_string()))?
|
||||
};
|
||||
|
||||
if args.debug {
|
||||
if debug {
|
||||
eprintln!("=== TOKENS ===");
|
||||
for (i, token) in tokens.iter().enumerate() {
|
||||
eprintln!(
|
||||
|
@ -117,7 +282,7 @@ fn process_nftables_config(args: Args) -> Result<()> {
|
|||
}
|
||||
|
||||
// Parse
|
||||
let ruleset = if args.debug {
|
||||
let ruleset = if debug {
|
||||
// Use error-recovery parsing for debug mode
|
||||
let (parsed_ruleset, errors) = NftablesParser::parse_with_errors(&source);
|
||||
if !errors.is_empty() {
|
||||
|
@ -127,64 +292,524 @@ fn process_nftables_config(args: Args) -> Result<()> {
|
|||
}
|
||||
eprintln!();
|
||||
}
|
||||
parsed_ruleset.unwrap_or_else(|| crate::ast::Ruleset::new())
|
||||
parsed_ruleset.unwrap_or_else(crate::ast::Ruleset::new)
|
||||
} else {
|
||||
let mut parser = NftablesParser::new(tokens.clone());
|
||||
parser
|
||||
.parse()
|
||||
.map_err(|e| FormatterError::ParseError(e.to_string()))?
|
||||
.map_err(|e| convert_parse_error_to_formatter_error(&e, &source, &tokens))?
|
||||
};
|
||||
|
||||
if args.debug {
|
||||
if debug {
|
||||
eprintln!("=== AST ===");
|
||||
eprintln!("{:#?}", ruleset);
|
||||
eprintln!();
|
||||
}
|
||||
|
||||
if args.check {
|
||||
println!("Syntax check passed for: {}", args.file);
|
||||
if check {
|
||||
println!("Syntax check passed for: {}", file);
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Format
|
||||
let config = FormatConfig {
|
||||
indent_style: args.indent,
|
||||
spaces_per_level: args.spaces,
|
||||
optimize: args.optimize,
|
||||
max_empty_lines: if args.optimize { 1 } else { 2 },
|
||||
indent_style: indent,
|
||||
spaces_per_level: spaces,
|
||||
optimize,
|
||||
max_empty_lines: if optimize { 1 } else { 2 },
|
||||
};
|
||||
|
||||
let formatter = NftablesFormatter::new(config);
|
||||
let formatted_output = formatter.format_ruleset(&ruleset);
|
||||
|
||||
// Write output
|
||||
match &args.output {
|
||||
Some(output_file) => {
|
||||
fs::write(output_file, &formatted_output)
|
||||
.with_context(|| format!("Failed to write to output file: {}", output_file))?;
|
||||
println!("Formatted output written to: {}", output_file);
|
||||
if stdout {
|
||||
// Output to stdout
|
||||
if is_multiple_files {
|
||||
println!("=== {} ===", file);
|
||||
}
|
||||
None => {
|
||||
io::stdout()
|
||||
.write_all(formatted_output.as_bytes())
|
||||
.with_context(|| "Failed to write to stdout")?;
|
||||
io::stdout()
|
||||
.write_all(formatted_output.as_bytes())
|
||||
.with_context(|| "Failed to write to stdout")?;
|
||||
} else {
|
||||
// Format in place
|
||||
fs::write(file, &formatted_output)
|
||||
.with_context(|| format!("Failed to write formatted content back to: {}", file))?;
|
||||
if is_multiple_files || debug {
|
||||
println!("Formatted: {}", file);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn process_lint_command(
|
||||
file: Option<String>,
|
||||
json: bool,
|
||||
style_warnings: bool,
|
||||
best_practices: bool,
|
||||
performance_hints: bool,
|
||||
security_warnings: bool,
|
||||
modules: Option<Vec<String>>,
|
||||
debug: bool,
|
||||
) -> Result<()> {
|
||||
let files = match file {
|
||||
Some(f) => vec![f],
|
||||
None => discover_nftables_files()?,
|
||||
};
|
||||
|
||||
let is_multiple_files = files.len() > 1;
|
||||
let mut error_file_count = 0;
|
||||
|
||||
for file_path in files {
|
||||
if let Err(e) = process_single_file_lint(
|
||||
&file_path,
|
||||
json,
|
||||
style_warnings,
|
||||
best_practices,
|
||||
performance_hints,
|
||||
security_warnings,
|
||||
modules.as_ref(),
|
||||
debug,
|
||||
is_multiple_files,
|
||||
) {
|
||||
eprintln!("Error processing {}: {}", file_path, e);
|
||||
error_file_count += 1;
|
||||
if !is_multiple_files {
|
||||
return Err(e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if error_file_count > 0 {
|
||||
return Err(LintError::DiagnosticErrors {
|
||||
file_count: error_file_count,
|
||||
}
|
||||
.into());
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn process_single_file_lint(
|
||||
file: &str,
|
||||
json: bool,
|
||||
style_warnings: bool,
|
||||
best_practices: bool,
|
||||
performance_hints: bool,
|
||||
security_warnings: bool,
|
||||
modules: Option<&Vec<String>>,
|
||||
debug: bool,
|
||||
is_multiple_files: bool,
|
||||
) -> Result<()> {
|
||||
let path = Path::new(&file);
|
||||
if !path.exists() {
|
||||
return Err(FormatterError::FileNotFound(file.to_string()).into());
|
||||
}
|
||||
|
||||
if !path.is_file() {
|
||||
return Err(FormatterError::InvalidFile("Not a regular file".to_string()).into());
|
||||
}
|
||||
|
||||
// Read file contents
|
||||
let source =
|
||||
fs::read_to_string(file).with_context(|| format!("Failed to read file: {}", file))?;
|
||||
|
||||
if debug {
|
||||
// Tokenize for debug output
|
||||
let mut lexer = NftablesLexer::new(&source);
|
||||
let tokens = lexer.tokenize_with_errors();
|
||||
|
||||
eprintln!("=== TOKENS ===");
|
||||
for (i, token) in tokens.iter().enumerate() {
|
||||
eprintln!(
|
||||
"{:3}: {:?} @ {:?} = '{}'",
|
||||
i, token.kind, token.range, token.text
|
||||
);
|
||||
}
|
||||
eprintln!();
|
||||
|
||||
// Build and validate CST
|
||||
eprintln!("=== CST ===");
|
||||
let cst_tree = CstBuilder::build_tree(&tokens);
|
||||
match CstBuilder::validate_tree(&cst_tree) {
|
||||
Ok(()) => eprintln!("CST validation passed"),
|
||||
Err(e) => eprintln!("CST validation error: {}", e),
|
||||
}
|
||||
eprintln!();
|
||||
}
|
||||
|
||||
// Run diagnostics
|
||||
let diagnostic_config = DiagnosticConfig {
|
||||
enable_style_warnings: style_warnings,
|
||||
enable_best_practices: best_practices,
|
||||
enable_performance_hints: performance_hints,
|
||||
enable_security_warnings: security_warnings,
|
||||
max_line_length: 120,
|
||||
max_empty_lines: 2,
|
||||
preferred_indent: None, // Don't enforce indent style in lint mode
|
||||
};
|
||||
|
||||
let analyzer = DiagnosticAnalyzer::new(diagnostic_config);
|
||||
|
||||
let diagnostics = if let Some(modules) = &modules {
|
||||
let module_names: Vec<&str> = modules.iter().map(|s| s.as_str()).collect();
|
||||
analyzer.analyze_with_modules(&source, file, &module_names)
|
||||
} else {
|
||||
analyzer.analyze(&source, file)
|
||||
};
|
||||
|
||||
if json {
|
||||
// Output JSON format for tooling integration
|
||||
match diagnostics.to_json() {
|
||||
Ok(json) => println!("{}", json),
|
||||
Err(e) => {
|
||||
// Even JSON serialization errors should be in JSON format when --json is used
|
||||
let error_json = format!(r#"{{"error": "JSON serialization failed: {}"}}"#, e);
|
||||
println!("{}", error_json);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Output human-readable format
|
||||
if is_multiple_files {
|
||||
println!("=== {} ===", file);
|
||||
}
|
||||
println!("{}", diagnostics.to_human_readable());
|
||||
}
|
||||
|
||||
// Return error if there are diagnostics errors
|
||||
if diagnostics.has_errors() {
|
||||
return Err(anyhow::anyhow!("Diagnostics found errors in file"));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Convert parser errors to formatter errors with proper location information
|
||||
fn convert_parse_error_to_formatter_error(
|
||||
error: &crate::parser::ParseError,
|
||||
source: &str,
|
||||
tokens: &[Token],
|
||||
) -> FormatterError {
|
||||
use crate::parser::ParseError;
|
||||
|
||||
match error {
|
||||
ParseError::UnexpectedToken {
|
||||
line,
|
||||
column,
|
||||
expected,
|
||||
found,
|
||||
} => FormatterError::SyntaxError {
|
||||
line: *line,
|
||||
column: *column,
|
||||
message: format!("Expected {}, found '{}'", expected, found),
|
||||
suggestion: None,
|
||||
},
|
||||
ParseError::MissingToken { expected } => {
|
||||
let (line, column) = if let Some(last_token) = tokens.last() {
|
||||
position_from_range(&last_token.range, source)
|
||||
} else {
|
||||
(1, 1)
|
||||
};
|
||||
FormatterError::SyntaxError {
|
||||
line,
|
||||
column,
|
||||
message: format!("Missing token: expected {}", expected),
|
||||
suggestion: None,
|
||||
}
|
||||
}
|
||||
ParseError::InvalidExpression { message } => {
|
||||
let (line, column) = find_current_parse_position(tokens, source);
|
||||
FormatterError::SyntaxError {
|
||||
line,
|
||||
column,
|
||||
message: format!("Invalid expression: {}", message),
|
||||
suggestion: None,
|
||||
}
|
||||
}
|
||||
ParseError::InvalidStatement { message } => {
|
||||
let (line, column) = find_current_parse_position(tokens, source);
|
||||
FormatterError::SyntaxError {
|
||||
line,
|
||||
column,
|
||||
message: format!("Invalid statement: {}", message),
|
||||
suggestion: None,
|
||||
}
|
||||
}
|
||||
ParseError::SemanticError { message } => {
|
||||
let (line, column) = find_current_parse_position(tokens, source);
|
||||
FormatterError::SyntaxError {
|
||||
line,
|
||||
column,
|
||||
message: format!("Semantic error: {}", message),
|
||||
suggestion: None,
|
||||
}
|
||||
}
|
||||
ParseError::LexError(lex_error) => {
|
||||
// Convert lexical errors to formatter errors with location
|
||||
convert_lex_error_to_formatter_error(lex_error, source)
|
||||
}
|
||||
ParseError::AnyhowError(anyhow_error) => {
|
||||
// For anyhow errors, try to extract location from error message and context
|
||||
let error_msg = anyhow_error.to_string();
|
||||
let (line, column) = find_error_location_from_context(&error_msg, tokens, source);
|
||||
let suggestion = generate_suggestion_for_error(&error_msg);
|
||||
|
||||
FormatterError::SyntaxError {
|
||||
line,
|
||||
column,
|
||||
message: error_msg,
|
||||
suggestion,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Find the current parsing position from tokens
|
||||
fn find_current_parse_position(tokens: &[Token], source: &str) -> (usize, usize) {
|
||||
// Look for the last non-whitespace, non-comment token
|
||||
for token in tokens.iter().rev() {
|
||||
match token.kind {
|
||||
TokenKind::Newline | TokenKind::CommentLine(_) => continue,
|
||||
_ => return position_from_range(&token.range, source),
|
||||
}
|
||||
}
|
||||
(1, 1) // fallback
|
||||
}
|
||||
|
||||
/// Convert lexical errors to formatter errors
|
||||
fn convert_lex_error_to_formatter_error(
|
||||
lex_error: &crate::lexer::LexError,
|
||||
source: &str,
|
||||
) -> FormatterError {
|
||||
use crate::lexer::LexError;
|
||||
|
||||
match lex_error {
|
||||
LexError::InvalidToken { position, text } => {
|
||||
let (line, column) = offset_to_line_column(*position, source);
|
||||
FormatterError::SyntaxError {
|
||||
line,
|
||||
column,
|
||||
message: format!("Invalid token: '{}'", text),
|
||||
suggestion: None,
|
||||
}
|
||||
}
|
||||
LexError::UnterminatedString { position } => {
|
||||
let (line, column) = offset_to_line_column(*position, source);
|
||||
FormatterError::SyntaxError {
|
||||
line,
|
||||
column,
|
||||
message: "Unterminated string literal".to_string(),
|
||||
suggestion: Some("Add closing quote".to_string()),
|
||||
}
|
||||
}
|
||||
LexError::InvalidNumber { position, text } => {
|
||||
let (line, column) = offset_to_line_column(*position, source);
|
||||
FormatterError::SyntaxError {
|
||||
line,
|
||||
column,
|
||||
message: format!("Invalid number: '{}'", text),
|
||||
suggestion: Some("Check number format".to_string()),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Convert byte offset to line/column position
|
||||
fn offset_to_line_column(offset: usize, source: &str) -> (usize, usize) {
|
||||
let mut line = 1;
|
||||
let mut column = 1;
|
||||
|
||||
for (i, ch) in source.char_indices() {
|
||||
if i >= offset {
|
||||
break;
|
||||
}
|
||||
if ch == '\n' {
|
||||
line += 1;
|
||||
column = 1;
|
||||
} else {
|
||||
column += 1;
|
||||
}
|
||||
}
|
||||
|
||||
(line, column)
|
||||
}
|
||||
|
||||
/// Find error location from context clues in the error message
|
||||
fn find_error_location_from_context(
|
||||
error_msg: &str,
|
||||
tokens: &[Token],
|
||||
source: &str,
|
||||
) -> (usize, usize) {
|
||||
// Look for context clues in the error message
|
||||
if error_msg.contains("Expected string or identifier, got:") {
|
||||
// Find the problematic token mentioned in the error
|
||||
if let Some(bad_token_text) = extract_token_from_error_message(error_msg) {
|
||||
// Find this token in the token stream
|
||||
for token in tokens {
|
||||
if token.text == bad_token_text {
|
||||
return position_from_range(&token.range, source);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Fallback to finding last meaningful token
|
||||
find_current_parse_position(tokens, source)
|
||||
}
|
||||
|
||||
/// Extract the problematic token from error message
|
||||
fn extract_token_from_error_message(error_msg: &str) -> Option<String> {
|
||||
// Parse messages like "Expected string or identifier, got: {"
|
||||
if let Some(got_part) = error_msg.split("got: ").nth(1) {
|
||||
Some(got_part.trim().to_string())
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// Generate helpful suggestions based on error message
|
||||
fn generate_suggestion_for_error(error_msg: &str) -> Option<String> {
|
||||
if error_msg.contains("Expected string or identifier") {
|
||||
Some(
|
||||
"Check if you're missing quotes around a string value or have an unexpected character"
|
||||
.to_string(),
|
||||
)
|
||||
} else if error_msg.contains("Expected") && error_msg.contains("got:") {
|
||||
Some("Check syntax and ensure proper nftables structure".to_string())
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// Convert TextRange to line/column position
|
||||
fn position_from_range(range: &text_size::TextRange, source: &str) -> (usize, usize) {
|
||||
let start_offset: usize = range.start().into();
|
||||
let lines: Vec<&str> = source.lines().collect();
|
||||
let mut current_offset = 0;
|
||||
|
||||
for (line_idx, line) in lines.iter().enumerate() {
|
||||
let line_end = current_offset + line.len();
|
||||
if start_offset <= line_end {
|
||||
let column = start_offset - current_offset;
|
||||
return (line_idx + 1, column + 1); // 1-based indexing
|
||||
}
|
||||
current_offset = line_end + 1; // +1 for newline
|
||||
}
|
||||
|
||||
(1, 1) // fallback
|
||||
}
|
||||
|
||||
fn process_parse_command(file: String, tree: bool, verbose: bool, debug: bool) -> Result<()> {
|
||||
let source =
|
||||
fs::read_to_string(&file).with_context(|| format!("Failed to read file: {}", file))?;
|
||||
|
||||
// Tokenize
|
||||
let mut lexer = NftablesLexer::new(&source);
|
||||
let tokens = lexer
|
||||
.tokenize()
|
||||
.map_err(|e| FormatterError::ParseError(format!("Tokenization failed: {}", e)))?;
|
||||
|
||||
if debug {
|
||||
eprintln!("=== TOKENS ===");
|
||||
for (i, token) in tokens.iter().enumerate() {
|
||||
eprintln!(
|
||||
"{:3}: {:?} @ {:?} = '{}'",
|
||||
i, token.kind, token.range, token.text
|
||||
);
|
||||
}
|
||||
eprintln!();
|
||||
}
|
||||
|
||||
// Build CST
|
||||
let cst_tree = CstBuilder::build_tree(&tokens);
|
||||
|
||||
// Validate CST
|
||||
match CstBuilder::validate_tree(&cst_tree) {
|
||||
Ok(()) => {
|
||||
if debug {
|
||||
eprintln!("CST validation passed");
|
||||
eprintln!();
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
eprintln!("Warning: CST validation error: {}", e);
|
||||
eprintln!();
|
||||
}
|
||||
}
|
||||
|
||||
// Display CST
|
||||
let cst_display = CstBuilder::display_tree(&cst_tree, tree, verbose);
|
||||
println!("{}", cst_display);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn main() -> Result<()> {
|
||||
let args = Args::parse();
|
||||
|
||||
if let Err(e) = process_nftables_config(args) {
|
||||
eprintln!("Error: {}", e);
|
||||
let result = match &args.command {
|
||||
Commands::Format {
|
||||
file,
|
||||
indent,
|
||||
stdout,
|
||||
optimize,
|
||||
spaces,
|
||||
check,
|
||||
} => process_format_command(
|
||||
file.clone(),
|
||||
*indent,
|
||||
*stdout,
|
||||
*optimize,
|
||||
*spaces,
|
||||
*check,
|
||||
args.debug,
|
||||
),
|
||||
Commands::Lint {
|
||||
file,
|
||||
json,
|
||||
style_warnings,
|
||||
best_practices,
|
||||
performance_hints,
|
||||
security_warnings,
|
||||
modules,
|
||||
} => process_lint_command(
|
||||
file.clone(),
|
||||
*json,
|
||||
*style_warnings,
|
||||
*best_practices,
|
||||
*performance_hints,
|
||||
*security_warnings,
|
||||
modules.clone(),
|
||||
args.debug,
|
||||
),
|
||||
Commands::Parse {
|
||||
file,
|
||||
tree,
|
||||
verbose,
|
||||
} => process_parse_command(file.clone(), *tree, *verbose, args.debug),
|
||||
};
|
||||
|
||||
// Print the error chain
|
||||
let mut current = e.source();
|
||||
while let Some(cause) = current {
|
||||
eprintln!(" Caused by: {}", cause);
|
||||
current = cause.source();
|
||||
if let Err(e) = result {
|
||||
// Check if we're in lint mode with JSON output for error formatting
|
||||
let use_json = match &args.command {
|
||||
Commands::Lint { json, .. } => *json,
|
||||
_ => false,
|
||||
};
|
||||
|
||||
if use_json {
|
||||
// Output error in JSON format when --json flag is used in lint mode
|
||||
let error_json = format!(r#"{{"error": "{}"}}"#, e);
|
||||
println!("{}", error_json);
|
||||
} else {
|
||||
eprintln!("Error: {}", e);
|
||||
|
||||
// Print the error chain
|
||||
let mut current = e.source();
|
||||
while let Some(cause) = current {
|
||||
eprintln!(" Caused by: {}", cause);
|
||||
current = cause.source();
|
||||
}
|
||||
}
|
||||
|
||||
std::process::exit(1);
|
||||
|
|
107
src/parser.rs
107
src/parser.rs
|
@ -311,9 +311,8 @@ impl Parser {
|
|||
self.advance(); // consume 'policy'
|
||||
let policy = self.parse_policy()?;
|
||||
chain = chain.with_policy(policy);
|
||||
self.consume(TokenKind::Semicolon, "Expected ';' after policy")?;
|
||||
}
|
||||
|
||||
self.consume(TokenKind::Semicolon, "Expected ';' after policy")?;
|
||||
}
|
||||
Some(TokenKind::CommentLine(_)) => {
|
||||
self.advance();
|
||||
|
@ -465,7 +464,13 @@ impl Parser {
|
|||
fn parse_comparison_expression(&mut self) -> Result<Expression> {
|
||||
let mut expr = self.parse_range_expression()?;
|
||||
|
||||
// Check for operators
|
||||
while let Some(token) = self.peek() {
|
||||
if matches!(token.kind, TokenKind::Newline) {
|
||||
self.advance();
|
||||
continue;
|
||||
}
|
||||
// Check for comparison operators
|
||||
let operator = match &token.kind {
|
||||
TokenKind::Eq => BinaryOperator::Eq,
|
||||
TokenKind::Ne => BinaryOperator::Ne,
|
||||
|
@ -473,7 +478,49 @@ impl Parser {
|
|||
TokenKind::Le => BinaryOperator::Le,
|
||||
TokenKind::Gt => BinaryOperator::Gt,
|
||||
TokenKind::Ge => BinaryOperator::Ge,
|
||||
_ => break,
|
||||
_ => {
|
||||
// Check for vmap after an expression
|
||||
if matches!(&token.kind, TokenKind::Vmap) {
|
||||
self.advance(); // consume 'vmap'
|
||||
|
||||
// Parse the map contents
|
||||
self.consume(TokenKind::LeftBrace, "Expected '{' after vmap")?;
|
||||
|
||||
let mut map = Vec::new();
|
||||
|
||||
while !self.current_token_is(&TokenKind::RightBrace) && !self.is_at_end() {
|
||||
// Skip commas and newlines
|
||||
if self.current_token_is(&TokenKind::Comma)
|
||||
|| self.current_token_is(&TokenKind::Newline)
|
||||
{
|
||||
self.advance();
|
||||
continue;
|
||||
}
|
||||
|
||||
// Parse key
|
||||
let key = self.parse_expression()?;
|
||||
|
||||
// Parse colon separator
|
||||
self.consume(TokenKind::Colon, "Expected ':' in vmap key-value pair")?;
|
||||
|
||||
// Parse value
|
||||
let value = self.parse_expression()?;
|
||||
|
||||
// Add the key-value pair to the map
|
||||
map.push((key, value));
|
||||
}
|
||||
|
||||
self.consume(TokenKind::RightBrace, "Expected '}' to close vmap")?;
|
||||
|
||||
// Return a vmap expression with the previous expression as the mapping target
|
||||
expr = Expression::Vmap {
|
||||
expr: Some(Box::new(expr)),
|
||||
map,
|
||||
};
|
||||
continue; // allow the outer `while` to detect ==, != … afterwards
|
||||
}
|
||||
break;
|
||||
}
|
||||
};
|
||||
|
||||
self.advance(); // consume operator
|
||||
|
@ -753,6 +800,43 @@ impl Parser {
|
|||
let addr = self.advance().unwrap().text.clone();
|
||||
Ok(Expression::MacAddress(addr))
|
||||
}
|
||||
Some(TokenKind::Vmap) => {
|
||||
self.advance(); // consume 'vmap'
|
||||
|
||||
// Parse the map contents
|
||||
self.consume(TokenKind::LeftBrace, "Expected '{' after vmap")?;
|
||||
|
||||
let mut map = Vec::new();
|
||||
|
||||
while !self.current_token_is(&TokenKind::RightBrace) && !self.is_at_end() {
|
||||
// Skip commas and newlines
|
||||
if self.current_token_is(&TokenKind::Comma)
|
||||
|| self.current_token_is(&TokenKind::Newline)
|
||||
{
|
||||
self.advance();
|
||||
continue;
|
||||
}
|
||||
|
||||
// Parse key
|
||||
let key = self.parse_expression()?;
|
||||
|
||||
// Parse colon separator
|
||||
self.consume(TokenKind::Colon, "Expected ':' in vmap key-value pair")?;
|
||||
|
||||
// Parse value
|
||||
let value = self.parse_expression()?;
|
||||
|
||||
// Add the key-value pair to the map
|
||||
map.push((key, value));
|
||||
}
|
||||
|
||||
self.consume(TokenKind::RightBrace, "Expected '}' to close vmap")?;
|
||||
|
||||
// No expression available at parse time, will be filled by post-processing if needed
|
||||
let expr = None;
|
||||
|
||||
Ok(Expression::Vmap { expr, map })
|
||||
}
|
||||
Some(TokenKind::LeftBrace) => {
|
||||
self.advance(); // consume '{'
|
||||
let mut elements = Vec::new();
|
||||
|
@ -774,6 +858,23 @@ impl Parser {
|
|||
self.consume(TokenKind::RightBrace, "Expected '}' to close set")?;
|
||||
Ok(Expression::Set(elements))
|
||||
}
|
||||
Some(TokenKind::Accept) => {
|
||||
self.advance();
|
||||
Ok(Expression::Identifier("accept".to_string()))
|
||||
}
|
||||
Some(TokenKind::Drop) => {
|
||||
self.advance();
|
||||
Ok(Expression::Identifier("drop".to_string()))
|
||||
}
|
||||
Some(TokenKind::Reject) => {
|
||||
self.advance();
|
||||
Ok(Expression::Identifier("reject".to_string()))
|
||||
}
|
||||
Some(TokenKind::Protocol) => {
|
||||
self.advance(); // consume 'protocol'
|
||||
let protocol = self.parse_identifier_or_keyword()?;
|
||||
Ok(Expression::Protocol(protocol))
|
||||
}
|
||||
_ => Err(ParseError::InvalidExpression {
|
||||
message: format!(
|
||||
"Unexpected token in expression: {}",
|
||||
|
|
|
@ -175,10 +175,11 @@ impl NftablesFormatter {
|
|||
// Add policy on the same line if present
|
||||
if let Some(policy) = &chain.policy {
|
||||
write!(output, " policy {}", policy).unwrap();
|
||||
output.push_str(";\n");
|
||||
} else {
|
||||
output.push_str("\n");
|
||||
}
|
||||
|
||||
output.push_str(";\n");
|
||||
|
||||
if !chain.rules.is_empty() && !self.config.optimize {
|
||||
output.push('\n');
|
||||
}
|
||||
|
@ -292,6 +293,23 @@ impl NftablesFormatter {
|
|||
output.push('-');
|
||||
self.format_expression(output, end);
|
||||
}
|
||||
|
||||
Expression::Vmap { expr, map } => {
|
||||
if let Some(expr) = expr {
|
||||
self.format_expression(output, expr);
|
||||
output.push(' ');
|
||||
}
|
||||
output.push_str("vmap { ");
|
||||
for (i, (key, value)) in map.iter().enumerate() {
|
||||
if i > 0 {
|
||||
output.push_str(", ");
|
||||
}
|
||||
self.format_expression(output, key);
|
||||
output.push_str(" : ");
|
||||
self.format_expression(output, value);
|
||||
}
|
||||
output.push_str(" }");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue