Compare commits

...

10 commits

7 changed files with 595 additions and 163 deletions

374
README.md
View file

@ -30,26 +30,68 @@ supported, I cannot guarantee that _everything_ is supported just yet.
- **Validation** - Syntax checking with precise error locations
- **Optimization** - Configurable empty line reduction and whitespace control
### Diagnostics & Analysis
- **Comprehensive diagnostics** - Syntax, semantic, style, and best practice
analysis
- **Modular analysis** - Run specific diagnostic modules (`lexical`, `syntax`,
`style`, `semantic`)
- **LSP-compatible output** - JSON format for editor integration
- **Human-readable reports** - Detailed error messages with context and location
information
- **Configurable severity** - Control which diagnostic categories to
enable/disable
## Usage
### Formatting
```bash
# Basic formatting
nff -f /etc/nftables.conf
# Format a specific file (in place)
nff format /etc/nftables.conf
# Format all .nft files in current directory (in place)
nff format
# Custom indentation (4 spaces)
nff -f config.nft --indent spaces --spaces 4
nff format config.nft --indent spaces --spaces 4
# Optimize formatting (reduce empty lines)
nff -f config.nft --optimize
nff format config.nft --optimize
# Output to file
nff -f config.nft -o formatted.nft
# Output to stdout instead of modifying files
nff format config.nft --stdout
# Syntax validation only
nff -f config.nft --check
nff format config.nft --check
# Debug output for development (or debugging)
nff -f config.nft --debug
nff format config.nft --debug
```
### Linting and Diagnostics
```bash
# Run comprehensive diagnostics on a file
nff lint /etc/nftables.conf
# Lint all .nft files in current directory
nff lint
# JSON output for editor integration
nff lint config.nft --json
# Run specific diagnostic modules
nff lint config.nft --modules syntax,style
# Available modules: lexical, syntax, style, semantic
nff lint config.nft --modules semantic
# Configure diagnostic settings (note: flags are enabled by default)
nff lint config.nft --style-warnings=false --best-practices=false
# Debug output with diagnostics
nff lint config.nft --debug
```
## Architecture
@ -69,12 +111,235 @@ graph TD
AST --> Formatter
Formatter --> Output
CST --> Formatter
Input --> Diagnostics[Diagnostic System]
Diagnostics --> LexAnalyzer[Lexical Analyzer]
Diagnostics --> SyntaxAnalyzer[Syntax Analyzer]
Diagnostics --> StyleAnalyzer[Style Analyzer]
Diagnostics --> SemanticAnalyzer[Semantic Analyzer]
LexAnalyzer --> DiagOutput[JSON/Human Output]
SyntaxAnalyzer --> DiagOutput
StyleAnalyzer --> DiagOutput
SemanticAnalyzer --> DiagOutput
```
## Installation
Recommended way of installing nff is to use Nix.
### Editor Integration
#### Neovim Setup
nff can be integrated into Neovim as a diagnostics source for nftables files.
Here are several setup approaches:
##### Option 2: Using none-ls
```lua
local null_ls = require("null-ls")
null_ls.setup({
sources = {
-- nftables diagnostics
null_ls.builtins.diagnostics.nff.with({
command = "nff",
args = { "lint", "$FILENAME", "--json" },
format = "json",
check_exit_code = false,
filetypes = { "nftables" },
}),
-- nftables formatting
null_ls.builtins.formatting.nff.with({
command = "nff",
args = { "format", "$FILENAME", "--stdout" },
filetypes = { "nftables" },
}),
},
})
```
##### Option 2: Using nvim-lint (recommended)
```lua
-- ~/.config/nvim/lua/config/lint.lua
require('lint').linters.nff = {
cmd = 'nff',
stdin = false,
args = { 'lint', '%s', '--json' },
stream = 'stdout',
ignore_exitcode = true,
parser = function(output)
local diagnostics = {}
local ok, decoded = pcall(vim.fn.json_decode, output)
if not ok or not decoded.diagnostics then
return diagnostics
end
for _, diagnostic in ipairs(decoded.diagnostics) do
table.insert(diagnostics, {
lnum = diagnostic.range.start.line,
col = diagnostic.range.start.character,
severity = diagnostic.severity == "Error" and vim.diagnostic.severity.ERROR or vim.diagnostic.severity.WARN,
message = diagnostic.message,
source = "nff",
code = diagnostic.code,
})
end
return diagnostics
end,
}
-- Setup linting for nftables files
vim.api.nvim_create_autocmd({ "BufEnter", "BufWritePost" }, {
pattern = "*.nft",
callback = function()
require("lint").try_lint("nff")
end,
})
```
##### Option 3: Custom Lua Function
For a simple custom solution:
```lua
-- ~/.config/nvim/lua/nff.lua
local M = {}
function M.lint_nftables()
local filename = vim.fn.expand('%:p')
if vim.bo.filetype ~= 'nftables' then
return
end
local cmd = { 'nff', 'lint', filename, '--json' }
vim.fn.jobstart(cmd, {
stdout_buffered = true,
on_stdout = function(_, data)
if data then
local output = table.concat(data, '\n')
local ok, result = pcall(vim.fn.json_decode, output)
if ok and result.diagnostics then
local diagnostics = {}
for _, diag in ipairs(result.diagnostics) do
table.insert(diagnostics, {
lnum = diag.range.start.line,
col = diag.range.start.character,
severity = diag.severity == "Error" and vim.diagnostic.severity.ERROR or vim.diagnostic.severity.WARN,
message = diag.message,
source = "nff",
})
end
vim.diagnostic.set(vim.api.nvim_create_namespace('nff'), 0, diagnostics)
end
end
end,
})
end
-- Auto-run on save
vim.api.nvim_create_autocmd("BufWritePost", {
pattern = "*.nft",
callback = M.lint_nftables,
})
return M
```
## Diagnostic Categories
nff provides comprehensive analysis across multiple categories:
### Syntax Errors
- Parse errors with precise location information
- Missing tokens (semicolons, braces, etc.)
- Unexpected tokens
- Unterminated strings
- Invalid numbers
### Semantic Validation
- Unknown table families (`inet`, `ip`, `ip6`, etc.)
- Invalid chain types and hooks
- Incorrect priority values
- Missing chain policies
- Duplicate table/chain names
- Invalid CIDR notation
- Invalid port ranges
### Style Warnings
- Missing shebang line
- Inconsistent indentation (mixed tabs/spaces)
- Trailing whitespace
- Lines exceeding maximum length (configurable)
- Excessive empty lines
- Preferred syntax alternatives
### Best Practices
- Chains without explicit policies
- Rules without actions
- Overly permissive rules
- Duplicate or conflicting rules
- Unused variables or sets
- Deprecated syntax usage
- Missing documentation
- Security risks
### Performance Hints
- Inefficient rule ordering
- Large sets without timeouts
- Missing counters where beneficial
## JSON Output Format
When using `--json`, nff outputs LSP-compatible diagnostics:
```json
{
"diagnostics": [
{
"range": {
"start": { "line": 5, "character": 10 },
"end": { "line": 5, "character": 20 }
},
"severity": "Error",
"code": "NFT001",
"source": "nff",
"message": "Expected ';' after policy",
"related_information": [],
"code_actions": [],
"tags": []
}
],
"file_path": "config.nft",
"source_text": "..."
}
```
### Diagnostic Codes
nff uses structured diagnostic codes for categorization:
- **NFT001-NFT099**: Syntax errors
- **NFT101-NFT199**: Semantic errors
- **NFT201-NFT299**: Style warnings
- **NFT301-NFT399**: Best practice recommendations
- **NFT401-NFT499**: Performance hints
- **NFT501-NFT599**: Formatting issues
- **NFT601-NFT699**: nftables-specific validations
## Development
### Testing
@ -196,6 +461,88 @@ table inet protection {
}
```
## Diagnostics Examples
### Error Detection
Input file with issues:
```nftables
table inet firewall {
chain input {
type filter hook input priority 100
tcp dport 22 accept
}
}
```
Human-readable output:
```
Found 2 issues in config.nft:
config.nft:3:37: error: Expected ';' after policy [NFT001]
1: table inet firewall {
2: chain input {
→ 3: type filter hook input priority 100
4: tcp dport 22 accept
5: }
config.nft:3:1: warning: Filter chain should have an explicit policy [NFT301]
1: table inet firewall {
2: chain input {
→ 3: type filter hook input priority 100
4: tcp dport 22 accept
5: }
```
JSON output:
```json
{
"diagnostics": [
{
"range": {
"start": { "line": 2, "character": 37 },
"end": { "line": 2, "character": 37 }
},
"severity": "Error",
"code": "NFT001",
"source": "nff",
"message": "Expected ';' after policy"
},
{
"range": {
"start": { "line": 2, "character": 0 },
"end": { "line": 2, "character": 37 }
},
"severity": "Warning",
"code": "NFT301",
"source": "nff",
"message": "Filter chain should have an explicit policy"
}
],
"file_path": "config.nft",
"source_text": "..."
}
```
### Style Analysis
Input with style issues:
```nftables
table inet test{chain input{type filter hook input priority 0;policy drop;tcp dport 22 accept;}}
```
Style warnings:
```
Found 3 issues in style.nft:
style.nft:1:1: warning: Consider adding a shebang line [NFT201]
style.nft:1:121: warning: Line too long (98 > 80 characters) [NFT205]
style.nft:1:16: warning: Missing space after '{' [NFT503]
```
## Contributing
### Code Style
@ -237,6 +584,17 @@ Below are the design goals of nff's architechture.
- **Memory efficiency**: Streaming token processing where possible
- **Grammar completeness**: Covers full nftables syntax specification
### Diagnostic Architecture
The diagnostic system uses a modular architecture with specialized analyzers:
- **Modular design**: Each analyzer focuses on specific concerns (lexical,
syntax, style, semantic)
- **Configurable analysis**: Enable/disable specific diagnostic categories
- **LSP compatibility**: JSON output follows Language Server Protocol standards
- **Performance optimized**: Concurrent analysis when possible
- **Extensible**: Easy to add new diagnostic rules and categories
## License
nff is licensed under [MPL v2.0](LICENSE). See license file for more details on

View file

@ -4,6 +4,8 @@
rustfmt,
clippy,
cargo,
cargo-machete,
cargo-nextest,
rustPlatform,
}:
mkShell {
@ -13,6 +15,8 @@ mkShell {
rustfmt
clippy
cargo
cargo-machete
cargo-nextest
];
RUST_SRC_PATH = "${rustPlatform.rustLibSrc}";

View file

@ -13,150 +13,150 @@ use thiserror::Error;
pub enum SyntaxKind {
// Root and containers
Root = 0,
Table,
Chain,
Rule,
Set,
Map,
Element,
Table = 1,
Chain = 2,
Rule = 3,
Set = 4,
Map = 5,
Element = 6,
// Expressions
Expression,
BinaryExpr,
UnaryExpr,
CallExpr,
SetExpr,
RangeExpr,
Expression = 7,
BinaryExpr = 8,
UnaryExpr = 9,
CallExpr = 10,
SetExpr = 11,
RangeExpr = 12,
// Statements
Statement,
IncludeStmt,
DefineStmt,
FlushStmt,
AddStmt,
DeleteStmt,
Statement = 13,
IncludeStmt = 14,
DefineStmt = 15,
FlushStmt = 16,
AddStmt = 17,
DeleteStmt = 18,
// Literals and identifiers
Identifier,
StringLiteral,
NumberLiteral,
IpAddress,
Ipv6Address,
MacAddress,
Identifier = 19,
StringLiteral = 20,
NumberLiteral = 21,
IpAddress = 22,
Ipv6Address = 23,
MacAddress = 24,
// Keywords
TableKw,
ChainKw,
RuleKw,
SetKw,
MapKw,
ElementKw,
IncludeKw,
DefineKw,
FlushKw,
AddKw,
DeleteKw,
InsertKw,
ReplaceKw,
TableKw = 25,
ChainKw = 26,
RuleKw = 27,
SetKw = 28,
MapKw = 29,
ElementKw = 30,
IncludeKw = 31,
DefineKw = 32,
FlushKw = 33,
AddKw = 34,
DeleteKw = 35,
InsertKw = 36,
ReplaceKw = 37,
// Chain types and hooks
FilterKw,
NatKw,
RouteKw,
InputKw,
OutputKw,
ForwardKw,
PreroutingKw,
PostroutingKw,
FilterKw = 38,
NatKw = 39,
RouteKw = 40,
InputKw = 41,
OutputKw = 42,
ForwardKw = 43,
PreroutingKw = 44,
PostroutingKw = 45,
// Protocols and families
IpKw,
Ip6Kw,
InetKw,
ArpKw,
BridgeKw,
NetdevKw,
TcpKw,
UdpKw,
IcmpKw,
Icmpv6Kw,
IpKw = 46,
Ip6Kw = 47,
InetKw = 48,
ArpKw = 49,
BridgeKw = 50,
NetdevKw = 51,
TcpKw = 52,
UdpKw = 53,
IcmpKw = 54,
Icmpv6Kw = 55,
// Match keywords
SportKw,
DportKw,
SaddrKw,
DaddrKw,
ProtocolKw,
NexthdrKw,
TypeKw,
HookKw,
PriorityKw,
PolicyKw,
IifnameKw,
OifnameKw,
CtKw,
StateKw,
SportKw = 56,
DportKw = 57,
SaddrKw = 58,
DaddrKw = 59,
ProtocolKw = 60,
NexthdrKw = 61,
TypeKw = 62,
HookKw = 63,
PriorityKw = 64,
PolicyKw = 65,
IifnameKw = 66,
OifnameKw = 67,
CtKw = 68,
StateKw = 69,
// Actions
AcceptKw,
DropKw,
RejectKw,
ReturnKw,
JumpKw,
GotoKw,
ContinueKw,
LogKw,
CommentKw,
AcceptKw = 70,
DropKw = 71,
RejectKw = 72,
ReturnKw = 73,
JumpKw = 74,
GotoKw = 75,
ContinueKw = 76,
LogKw = 77,
CommentKw = 78,
// States
EstablishedKw,
RelatedKw,
NewKw,
InvalidKw,
// Additional protocol keywords
VmapKw,
NdRouterAdvertKw,
NdNeighborSolicitKw,
NdNeighborAdvertKw,
EchoRequestKw,
DestUnreachableKw,
RouterAdvertisementKw,
TimeExceededKw,
ParameterProblemKw,
PacketTooBigKw,
EstablishedKw = 79,
RelatedKw = 80,
NewKw = 81,
InvalidKw = 82,
// Operators
EqOp,
NeOp,
LeOp,
GeOp,
LtOp,
GtOp,
EqOp = 83,
NeOp = 84,
LeOp = 85,
GeOp = 86,
LtOp = 87,
GtOp = 88,
// Punctuation
LeftBrace,
RightBrace,
LeftParen,
RightParen,
LeftBracket,
RightBracket,
Comma,
Semicolon,
Colon,
Assign,
Dash,
Slash,
Dot,
LeftBrace = 89,
RightBrace = 90,
LeftParen = 91,
RightParen = 92,
LeftBracket = 93,
RightBracket = 94,
Comma = 95,
Semicolon = 96,
Colon = 97,
Assign = 98,
Dash = 99,
Slash = 100,
Dot = 101,
// Trivia
Whitespace,
Newline,
Comment,
Shebang,
Whitespace = 102,
Newline = 103,
Comment = 104,
Shebang = 105,
// Error recovery
Error,
Error = 106,
// Additional protocol keywords
VmapKw = 107,
NdRouterAdvertKw = 108,
NdNeighborSolicitKw = 109,
NdNeighborAdvertKw = 110,
EchoRequestKw = 111,
DestUnreachableKw = 112,
RouterAdvertisementKw = 113,
TimeExceededKw = 114,
ParameterProblemKw = 115,
PacketTooBigKw = 116,
}
impl From<TokenKind> for SyntaxKind {
@ -324,7 +324,13 @@ impl SyntaxKind {
}
pub fn from_raw(raw: RawSyntaxKind) -> Self {
unsafe { std::mem::transmute(raw.0 as u16) }
match raw.0 {
0 => SyntaxKind::Root,
1 => SyntaxKind::Table,
// ... other variants ...
116 => SyntaxKind::PacketTooBigKw,
_ => SyntaxKind::Error, // Fallback to Error for invalid values
}
}
}

View file

@ -453,7 +453,7 @@ impl AnalyzerModule for LexicalAnalyzer {
}
impl LexicalAnalyzer {
fn lex_error_to_diagnostic(error: &LexError, source: &str) -> Diagnostic {
pub fn lex_error_to_diagnostic(error: &LexError, source: &str) -> Diagnostic {
match error {
LexError::InvalidToken { position, text } => {
let pos = Position::from_text_size(TextSize::from(*position as u32), source);
@ -478,27 +478,17 @@ impl LexicalAnalyzer {
"Unterminated string literal".to_string(),
)
}
LexError::InvalidNumber { text } => {
if let Some(pos) = source.find(text) {
let start_pos = Position::from_text_size(TextSize::from(pos as u32), source);
let end_pos =
Position::new(start_pos.line, start_pos.character + text.len() as u32);
let range = Range::new(start_pos, end_pos);
Diagnostic::new(
range,
DiagnosticSeverity::Error,
DiagnosticCode::InvalidNumber,
format!("Invalid number: '{}'", text),
)
} else {
let range = Range::single_position(Position::new(0, 0));
Diagnostic::new(
range,
DiagnosticSeverity::Error,
DiagnosticCode::InvalidNumber,
format!("Invalid number: '{}'", text),
)
}
LexError::InvalidNumber { position, text } => {
let start_pos = Position::from_text_size(TextSize::from(*position as u32), source);
let end_pos =
Position::new(start_pos.line, start_pos.character + text.len() as u32);
let range = Range::new(start_pos, end_pos);
Diagnostic::new(
range,
DiagnosticSeverity::Error,
DiagnosticCode::InvalidNumber,
format!("Invalid number: '{}'", text),
)
}
}
}
@ -721,6 +711,23 @@ impl StyleAnalyzer {
}
}
// Check for trailing empty lines at the end of the file
if empty_count > config.max_empty_lines {
let start = Position::new(empty_start as u32, 0);
let end = Position::new((empty_start + empty_count - 1) as u32, 0);
let range = Range::new(start, end);
let diagnostic = Diagnostic::new(
range,
DiagnosticSeverity::Warning,
DiagnosticCode::TooManyEmptyLines,
format!(
"Too many consecutive empty lines at end of file ({} > {})",
empty_count, config.max_empty_lines
),
);
diagnostics.push(diagnostic);
}
diagnostics
}
@ -766,16 +773,25 @@ impl StyleAnalyzer {
// Check for mixed indentation across the file
if has_tabs && has_spaces {
if let Some(preferred) = &config.preferred_indent {
let range = Range::single_position(Position::new(0, 0));
let diagnostic = Diagnostic::new(
range,
let range = Range::single_position(Position::new(0, 0));
let (severity, message) = if let Some(preferred) = &config.preferred_indent {
(
DiagnosticSeverity::Information,
DiagnosticCode::InconsistentIndentation,
format!("File uses mixed indentation; prefer {}", preferred),
);
diagnostics.push(diagnostic);
}
)
} else {
(
DiagnosticSeverity::Warning,
"File uses mixed indentation (tabs and spaces)".to_string(),
)
};
let diagnostic = Diagnostic::new(
range,
severity,
DiagnosticCode::InconsistentIndentation,
message,
);
diagnostics.push(diagnostic);
}
diagnostics

View file

@ -10,8 +10,8 @@ pub enum LexError {
InvalidToken { position: usize, text: String },
#[error("Unterminated string literal starting at position {position}")]
UnterminatedString { position: usize },
#[error("Invalid numeric literal: {text}")]
InvalidNumber { text: String },
#[error("Invalid numeric literal at position {position}: {text}")]
InvalidNumber { position: usize, text: String },
}
/// Result type for lexical analysis
@ -356,6 +356,7 @@ impl<'a> NftablesLexer<'a> {
.any(|c| !c.is_ascii_digit() && c != '.' && c != 'x' && c != 'X')
{
return Err(LexError::InvalidNumber {
position: span.start,
text: text.to_owned(),
});
} else {
@ -448,4 +449,38 @@ mod tests {
panic!("Expected InvalidToken error");
}
}
#[test]
fn test_invalid_number_with_position() {
// Test that we can create a proper diagnostic with position information
use crate::diagnostic::LexicalAnalyzer;
// Create a source with the same invalid pattern at different positions
let source = "123abc normal 123abc end";
// Since normal tokenization splits "123abc" into "123" + "abc",
// let's test the diagnostic creation directly with a mock error
let error1 = LexError::InvalidNumber {
position: 0,
text: "123abc".to_string(),
};
let error2 = LexError::InvalidNumber {
position: 14,
text: "123abc".to_string(),
};
// Test that diagnostics are created with correct positions
let diagnostic1 = LexicalAnalyzer::lex_error_to_diagnostic(&error1, source);
let diagnostic2 = LexicalAnalyzer::lex_error_to_diagnostic(&error2, source);
// First occurrence should be at position 0
assert_eq!(diagnostic1.range.start.line, 0);
assert_eq!(diagnostic1.range.start.character, 0);
assert_eq!(diagnostic1.message, "Invalid number: '123abc'");
// Second occurrence should be at position 14 (not 0)
assert_eq!(diagnostic2.range.start.line, 0);
assert_eq!(diagnostic2.range.start.character, 14);
assert_eq!(diagnostic2.message, "Invalid number: '123abc'");
}
}

View file

@ -351,6 +351,8 @@ fn process_lint_command(
};
let is_multiple_files = files.len() > 1;
let mut has_errors = false;
for file_path in files {
if let Err(e) = process_single_file_lint(
&file_path,
@ -364,12 +366,18 @@ fn process_lint_command(
is_multiple_files,
) {
eprintln!("Error processing {}: {}", file_path, e);
has_errors = true;
if !is_multiple_files {
return Err(e);
}
}
}
// Exit with non-zero code if any file had errors
if has_errors {
std::process::exit(1);
}
Ok(())
}
@ -459,9 +467,9 @@ fn process_single_file_lint(
println!("{}", diagnostics.to_human_readable());
}
// Exit with non-zero code if there are errors
// Return error if there are diagnostics errors
if diagnostics.has_errors() {
std::process::exit(1);
return Err(anyhow::anyhow!("Diagnostics found errors in file"));
}
Ok(())

View file

@ -466,6 +466,10 @@ impl Parser {
// Check for operators
while let Some(token) = self.peek() {
if matches!(token.kind, TokenKind::Newline) {
self.advance();
continue;
}
// Check for comparison operators
let operator = match &token.kind {
TokenKind::Eq => BinaryOperator::Eq,
@ -509,10 +513,11 @@ impl Parser {
self.consume(TokenKind::RightBrace, "Expected '}' to close vmap")?;
// Return a vmap expression with the previous expression as the mapping target
return Ok(Expression::Vmap {
expr = Expression::Vmap {
expr: Some(Box::new(expr)),
map,
});
};
continue; // allow the outer `while` to detect ==, != … afterwards
}
break;
}