Compare commits
No commits in common. "92735f3eeef9061b03504598f23d6c1347a8a4f0" and "e0f93d03072e15345c9eb068e90d0b1dad8e54be" have entirely different histories.
92735f3eee
...
e0f93d0307
7 changed files with 169 additions and 601 deletions
374
README.md
374
README.md
|
|
@ -30,68 +30,26 @@ supported, I cannot guarantee that _everything_ is supported just yet.
|
|||
- **Validation** - Syntax checking with precise error locations
|
||||
- **Optimization** - Configurable empty line reduction and whitespace control
|
||||
|
||||
### Diagnostics & Analysis
|
||||
|
||||
- **Comprehensive diagnostics** - Syntax, semantic, style, and best practice
|
||||
analysis
|
||||
- **Modular analysis** - Run specific diagnostic modules (`lexical`, `syntax`,
|
||||
`style`, `semantic`)
|
||||
- **LSP-compatible output** - JSON format for editor integration
|
||||
- **Human-readable reports** - Detailed error messages with context and location
|
||||
information
|
||||
- **Configurable severity** - Control which diagnostic categories to
|
||||
enable/disable
|
||||
|
||||
## Usage
|
||||
|
||||
### Formatting
|
||||
|
||||
```bash
|
||||
# Format a specific file (in place)
|
||||
nff format /etc/nftables.conf
|
||||
|
||||
# Format all .nft files in current directory (in place)
|
||||
nff format
|
||||
# Basic formatting
|
||||
nff -f /etc/nftables.conf
|
||||
|
||||
# Custom indentation (4 spaces)
|
||||
nff format config.nft --indent spaces --spaces 4
|
||||
nff -f config.nft --indent spaces --spaces 4
|
||||
|
||||
# Optimize formatting (reduce empty lines)
|
||||
nff format config.nft --optimize
|
||||
nff -f config.nft --optimize
|
||||
|
||||
# Output to stdout instead of modifying files
|
||||
nff format config.nft --stdout
|
||||
# Output to file
|
||||
nff -f config.nft -o formatted.nft
|
||||
|
||||
# Syntax validation only
|
||||
nff format config.nft --check
|
||||
nff -f config.nft --check
|
||||
|
||||
# Debug output for development (or debugging)
|
||||
nff format config.nft --debug
|
||||
```
|
||||
|
||||
### Linting and Diagnostics
|
||||
|
||||
```bash
|
||||
# Run comprehensive diagnostics on a file
|
||||
nff lint /etc/nftables.conf
|
||||
|
||||
# Lint all .nft files in current directory
|
||||
nff lint
|
||||
|
||||
# JSON output for editor integration
|
||||
nff lint config.nft --json
|
||||
|
||||
# Run specific diagnostic modules
|
||||
nff lint config.nft --modules syntax,style
|
||||
|
||||
# Available modules: lexical, syntax, style, semantic
|
||||
nff lint config.nft --modules semantic
|
||||
|
||||
# Configure diagnostic settings (note: flags are enabled by default)
|
||||
nff lint config.nft --style-warnings=false --best-practices=false
|
||||
|
||||
# Debug output with diagnostics
|
||||
nff lint config.nft --debug
|
||||
nff -f config.nft --debug
|
||||
```
|
||||
|
||||
## Architecture
|
||||
|
|
@ -111,235 +69,12 @@ graph TD
|
|||
AST --> Formatter
|
||||
Formatter --> Output
|
||||
CST --> Formatter
|
||||
|
||||
Input --> Diagnostics[Diagnostic System]
|
||||
Diagnostics --> LexAnalyzer[Lexical Analyzer]
|
||||
Diagnostics --> SyntaxAnalyzer[Syntax Analyzer]
|
||||
Diagnostics --> StyleAnalyzer[Style Analyzer]
|
||||
Diagnostics --> SemanticAnalyzer[Semantic Analyzer]
|
||||
|
||||
LexAnalyzer --> DiagOutput[JSON/Human Output]
|
||||
SyntaxAnalyzer --> DiagOutput
|
||||
StyleAnalyzer --> DiagOutput
|
||||
SemanticAnalyzer --> DiagOutput
|
||||
```
|
||||
|
||||
## Installation
|
||||
|
||||
Recommended way of installing nff is to use Nix.
|
||||
|
||||
### Editor Integration
|
||||
|
||||
#### Neovim Setup
|
||||
|
||||
nff can be integrated into Neovim as a diagnostics source for nftables files.
|
||||
Here are several setup approaches:
|
||||
|
||||
##### Option 2: Using none-ls
|
||||
|
||||
```lua
|
||||
local null_ls = require("null-ls")
|
||||
|
||||
null_ls.setup({
|
||||
sources = {
|
||||
-- nftables diagnostics
|
||||
null_ls.builtins.diagnostics.nff.with({
|
||||
command = "nff",
|
||||
args = { "lint", "$FILENAME", "--json" },
|
||||
format = "json",
|
||||
check_exit_code = false,
|
||||
filetypes = { "nftables" },
|
||||
}),
|
||||
|
||||
-- nftables formatting
|
||||
null_ls.builtins.formatting.nff.with({
|
||||
command = "nff",
|
||||
args = { "format", "$FILENAME", "--stdout" },
|
||||
filetypes = { "nftables" },
|
||||
}),
|
||||
},
|
||||
})
|
||||
```
|
||||
|
||||
##### Option 2: Using nvim-lint (recommended)
|
||||
|
||||
```lua
|
||||
-- ~/.config/nvim/lua/config/lint.lua
|
||||
require('lint').linters.nff = {
|
||||
cmd = 'nff',
|
||||
stdin = false,
|
||||
args = { 'lint', '%s', '--json' },
|
||||
stream = 'stdout',
|
||||
ignore_exitcode = true,
|
||||
parser = function(output)
|
||||
local diagnostics = {}
|
||||
local ok, decoded = pcall(vim.fn.json_decode, output)
|
||||
|
||||
if not ok or not decoded.diagnostics then
|
||||
return diagnostics
|
||||
end
|
||||
|
||||
for _, diagnostic in ipairs(decoded.diagnostics) do
|
||||
table.insert(diagnostics, {
|
||||
lnum = diagnostic.range.start.line,
|
||||
col = diagnostic.range.start.character,
|
||||
severity = diagnostic.severity == "Error" and vim.diagnostic.severity.ERROR or vim.diagnostic.severity.WARN,
|
||||
message = diagnostic.message,
|
||||
source = "nff",
|
||||
code = diagnostic.code,
|
||||
})
|
||||
end
|
||||
|
||||
return diagnostics
|
||||
end,
|
||||
}
|
||||
|
||||
-- Setup linting for nftables files
|
||||
vim.api.nvim_create_autocmd({ "BufEnter", "BufWritePost" }, {
|
||||
pattern = "*.nft",
|
||||
callback = function()
|
||||
require("lint").try_lint("nff")
|
||||
end,
|
||||
})
|
||||
```
|
||||
|
||||
##### Option 3: Custom Lua Function
|
||||
|
||||
For a simple custom solution:
|
||||
|
||||
```lua
|
||||
-- ~/.config/nvim/lua/nff.lua
|
||||
local M = {}
|
||||
|
||||
function M.lint_nftables()
|
||||
local filename = vim.fn.expand('%:p')
|
||||
if vim.bo.filetype ~= 'nftables' then
|
||||
return
|
||||
end
|
||||
|
||||
local cmd = { 'nff', 'lint', filename, '--json' }
|
||||
|
||||
vim.fn.jobstart(cmd, {
|
||||
stdout_buffered = true,
|
||||
on_stdout = function(_, data)
|
||||
if data then
|
||||
local output = table.concat(data, '\n')
|
||||
local ok, result = pcall(vim.fn.json_decode, output)
|
||||
|
||||
if ok and result.diagnostics then
|
||||
local diagnostics = {}
|
||||
for _, diag in ipairs(result.diagnostics) do
|
||||
table.insert(diagnostics, {
|
||||
lnum = diag.range.start.line,
|
||||
col = diag.range.start.character,
|
||||
severity = diag.severity == "Error" and vim.diagnostic.severity.ERROR or vim.diagnostic.severity.WARN,
|
||||
message = diag.message,
|
||||
source = "nff",
|
||||
})
|
||||
end
|
||||
|
||||
vim.diagnostic.set(vim.api.nvim_create_namespace('nff'), 0, diagnostics)
|
||||
end
|
||||
end
|
||||
end,
|
||||
})
|
||||
end
|
||||
|
||||
-- Auto-run on save
|
||||
vim.api.nvim_create_autocmd("BufWritePost", {
|
||||
pattern = "*.nft",
|
||||
callback = M.lint_nftables,
|
||||
})
|
||||
|
||||
return M
|
||||
```
|
||||
|
||||
## Diagnostic Categories
|
||||
|
||||
nff provides comprehensive analysis across multiple categories:
|
||||
|
||||
### Syntax Errors
|
||||
|
||||
- Parse errors with precise location information
|
||||
- Missing tokens (semicolons, braces, etc.)
|
||||
- Unexpected tokens
|
||||
- Unterminated strings
|
||||
- Invalid numbers
|
||||
|
||||
### Semantic Validation
|
||||
|
||||
- Unknown table families (`inet`, `ip`, `ip6`, etc.)
|
||||
- Invalid chain types and hooks
|
||||
- Incorrect priority values
|
||||
- Missing chain policies
|
||||
- Duplicate table/chain names
|
||||
- Invalid CIDR notation
|
||||
- Invalid port ranges
|
||||
|
||||
### Style Warnings
|
||||
|
||||
- Missing shebang line
|
||||
- Inconsistent indentation (mixed tabs/spaces)
|
||||
- Trailing whitespace
|
||||
- Lines exceeding maximum length (configurable)
|
||||
- Excessive empty lines
|
||||
- Preferred syntax alternatives
|
||||
|
||||
### Best Practices
|
||||
|
||||
- Chains without explicit policies
|
||||
- Rules without actions
|
||||
- Overly permissive rules
|
||||
- Duplicate or conflicting rules
|
||||
- Unused variables or sets
|
||||
- Deprecated syntax usage
|
||||
- Missing documentation
|
||||
- Security risks
|
||||
|
||||
### Performance Hints
|
||||
|
||||
- Inefficient rule ordering
|
||||
- Large sets without timeouts
|
||||
- Missing counters where beneficial
|
||||
|
||||
## JSON Output Format
|
||||
|
||||
When using `--json`, nff outputs LSP-compatible diagnostics:
|
||||
|
||||
```json
|
||||
{
|
||||
"diagnostics": [
|
||||
{
|
||||
"range": {
|
||||
"start": { "line": 5, "character": 10 },
|
||||
"end": { "line": 5, "character": 20 }
|
||||
},
|
||||
"severity": "Error",
|
||||
"code": "NFT001",
|
||||
"source": "nff",
|
||||
"message": "Expected ';' after policy",
|
||||
"related_information": [],
|
||||
"code_actions": [],
|
||||
"tags": []
|
||||
}
|
||||
],
|
||||
"file_path": "config.nft",
|
||||
"source_text": "..."
|
||||
}
|
||||
```
|
||||
|
||||
### Diagnostic Codes
|
||||
|
||||
nff uses structured diagnostic codes for categorization:
|
||||
|
||||
- **NFT001-NFT099**: Syntax errors
|
||||
- **NFT101-NFT199**: Semantic errors
|
||||
- **NFT201-NFT299**: Style warnings
|
||||
- **NFT301-NFT399**: Best practice recommendations
|
||||
- **NFT401-NFT499**: Performance hints
|
||||
- **NFT501-NFT599**: Formatting issues
|
||||
- **NFT601-NFT699**: nftables-specific validations
|
||||
|
||||
## Development
|
||||
|
||||
### Testing
|
||||
|
|
@ -461,88 +196,6 @@ table inet protection {
|
|||
}
|
||||
```
|
||||
|
||||
## Diagnostics Examples
|
||||
|
||||
### Error Detection
|
||||
|
||||
Input file with issues:
|
||||
|
||||
```nftables
|
||||
table inet firewall {
|
||||
chain input {
|
||||
type filter hook input priority 100
|
||||
tcp dport 22 accept
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Human-readable output:
|
||||
|
||||
```
|
||||
Found 2 issues in config.nft:
|
||||
config.nft:3:37: error: Expected ';' after policy [NFT001]
|
||||
1: table inet firewall {
|
||||
2: chain input {
|
||||
→ 3: type filter hook input priority 100
|
||||
4: tcp dport 22 accept
|
||||
5: }
|
||||
|
||||
config.nft:3:1: warning: Filter chain should have an explicit policy [NFT301]
|
||||
1: table inet firewall {
|
||||
2: chain input {
|
||||
→ 3: type filter hook input priority 100
|
||||
4: tcp dport 22 accept
|
||||
5: }
|
||||
```
|
||||
|
||||
JSON output:
|
||||
|
||||
```json
|
||||
{
|
||||
"diagnostics": [
|
||||
{
|
||||
"range": {
|
||||
"start": { "line": 2, "character": 37 },
|
||||
"end": { "line": 2, "character": 37 }
|
||||
},
|
||||
"severity": "Error",
|
||||
"code": "NFT001",
|
||||
"source": "nff",
|
||||
"message": "Expected ';' after policy"
|
||||
},
|
||||
{
|
||||
"range": {
|
||||
"start": { "line": 2, "character": 0 },
|
||||
"end": { "line": 2, "character": 37 }
|
||||
},
|
||||
"severity": "Warning",
|
||||
"code": "NFT301",
|
||||
"source": "nff",
|
||||
"message": "Filter chain should have an explicit policy"
|
||||
}
|
||||
],
|
||||
"file_path": "config.nft",
|
||||
"source_text": "..."
|
||||
}
|
||||
```
|
||||
|
||||
### Style Analysis
|
||||
|
||||
Input with style issues:
|
||||
|
||||
```nftables
|
||||
table inet test{chain input{type filter hook input priority 0;policy drop;tcp dport 22 accept;}}
|
||||
```
|
||||
|
||||
Style warnings:
|
||||
|
||||
```
|
||||
Found 3 issues in style.nft:
|
||||
style.nft:1:1: warning: Consider adding a shebang line [NFT201]
|
||||
style.nft:1:121: warning: Line too long (98 > 80 characters) [NFT205]
|
||||
style.nft:1:16: warning: Missing space after '{' [NFT503]
|
||||
```
|
||||
|
||||
## Contributing
|
||||
|
||||
### Code Style
|
||||
|
|
@ -584,17 +237,6 @@ Below are the design goals of nff's architechture.
|
|||
- **Memory efficiency**: Streaming token processing where possible
|
||||
- **Grammar completeness**: Covers full nftables syntax specification
|
||||
|
||||
### Diagnostic Architecture
|
||||
|
||||
The diagnostic system uses a modular architecture with specialized analyzers:
|
||||
|
||||
- **Modular design**: Each analyzer focuses on specific concerns (lexical,
|
||||
syntax, style, semantic)
|
||||
- **Configurable analysis**: Enable/disable specific diagnostic categories
|
||||
- **LSP compatibility**: JSON output follows Language Server Protocol standards
|
||||
- **Performance optimized**: Concurrent analysis when possible
|
||||
- **Extensible**: Easy to add new diagnostic rules and categories
|
||||
|
||||
## License
|
||||
|
||||
nff is licensed under [MPL v2.0](LICENSE). See license file for more details on
|
||||
|
|
|
|||
|
|
@ -4,8 +4,6 @@
|
|||
rustfmt,
|
||||
clippy,
|
||||
cargo,
|
||||
cargo-machete,
|
||||
cargo-nextest,
|
||||
rustPlatform,
|
||||
}:
|
||||
mkShell {
|
||||
|
|
@ -15,8 +13,6 @@ mkShell {
|
|||
rustfmt
|
||||
clippy
|
||||
cargo
|
||||
cargo-machete
|
||||
cargo-nextest
|
||||
];
|
||||
|
||||
RUST_SRC_PATH = "${rustPlatform.rustLibSrc}";
|
||||
|
|
|
|||
256
src/cst.rs
256
src/cst.rs
|
|
@ -13,150 +13,150 @@ use thiserror::Error;
|
|||
pub enum SyntaxKind {
|
||||
// Root and containers
|
||||
Root = 0,
|
||||
Table = 1,
|
||||
Chain = 2,
|
||||
Rule = 3,
|
||||
Set = 4,
|
||||
Map = 5,
|
||||
Element = 6,
|
||||
Table,
|
||||
Chain,
|
||||
Rule,
|
||||
Set,
|
||||
Map,
|
||||
Element,
|
||||
|
||||
// Expressions
|
||||
Expression = 7,
|
||||
BinaryExpr = 8,
|
||||
UnaryExpr = 9,
|
||||
CallExpr = 10,
|
||||
SetExpr = 11,
|
||||
RangeExpr = 12,
|
||||
Expression,
|
||||
BinaryExpr,
|
||||
UnaryExpr,
|
||||
CallExpr,
|
||||
SetExpr,
|
||||
RangeExpr,
|
||||
|
||||
// Statements
|
||||
Statement = 13,
|
||||
IncludeStmt = 14,
|
||||
DefineStmt = 15,
|
||||
FlushStmt = 16,
|
||||
AddStmt = 17,
|
||||
DeleteStmt = 18,
|
||||
Statement,
|
||||
IncludeStmt,
|
||||
DefineStmt,
|
||||
FlushStmt,
|
||||
AddStmt,
|
||||
DeleteStmt,
|
||||
|
||||
// Literals and identifiers
|
||||
Identifier = 19,
|
||||
StringLiteral = 20,
|
||||
NumberLiteral = 21,
|
||||
IpAddress = 22,
|
||||
Ipv6Address = 23,
|
||||
MacAddress = 24,
|
||||
Identifier,
|
||||
StringLiteral,
|
||||
NumberLiteral,
|
||||
IpAddress,
|
||||
Ipv6Address,
|
||||
MacAddress,
|
||||
|
||||
// Keywords
|
||||
TableKw = 25,
|
||||
ChainKw = 26,
|
||||
RuleKw = 27,
|
||||
SetKw = 28,
|
||||
MapKw = 29,
|
||||
ElementKw = 30,
|
||||
IncludeKw = 31,
|
||||
DefineKw = 32,
|
||||
FlushKw = 33,
|
||||
AddKw = 34,
|
||||
DeleteKw = 35,
|
||||
InsertKw = 36,
|
||||
ReplaceKw = 37,
|
||||
TableKw,
|
||||
ChainKw,
|
||||
RuleKw,
|
||||
SetKw,
|
||||
MapKw,
|
||||
ElementKw,
|
||||
IncludeKw,
|
||||
DefineKw,
|
||||
FlushKw,
|
||||
AddKw,
|
||||
DeleteKw,
|
||||
InsertKw,
|
||||
ReplaceKw,
|
||||
|
||||
// Chain types and hooks
|
||||
FilterKw = 38,
|
||||
NatKw = 39,
|
||||
RouteKw = 40,
|
||||
InputKw = 41,
|
||||
OutputKw = 42,
|
||||
ForwardKw = 43,
|
||||
PreroutingKw = 44,
|
||||
PostroutingKw = 45,
|
||||
FilterKw,
|
||||
NatKw,
|
||||
RouteKw,
|
||||
InputKw,
|
||||
OutputKw,
|
||||
ForwardKw,
|
||||
PreroutingKw,
|
||||
PostroutingKw,
|
||||
|
||||
// Protocols and families
|
||||
IpKw = 46,
|
||||
Ip6Kw = 47,
|
||||
InetKw = 48,
|
||||
ArpKw = 49,
|
||||
BridgeKw = 50,
|
||||
NetdevKw = 51,
|
||||
TcpKw = 52,
|
||||
UdpKw = 53,
|
||||
IcmpKw = 54,
|
||||
Icmpv6Kw = 55,
|
||||
IpKw,
|
||||
Ip6Kw,
|
||||
InetKw,
|
||||
ArpKw,
|
||||
BridgeKw,
|
||||
NetdevKw,
|
||||
TcpKw,
|
||||
UdpKw,
|
||||
IcmpKw,
|
||||
Icmpv6Kw,
|
||||
|
||||
// Match keywords
|
||||
SportKw = 56,
|
||||
DportKw = 57,
|
||||
SaddrKw = 58,
|
||||
DaddrKw = 59,
|
||||
ProtocolKw = 60,
|
||||
NexthdrKw = 61,
|
||||
TypeKw = 62,
|
||||
HookKw = 63,
|
||||
PriorityKw = 64,
|
||||
PolicyKw = 65,
|
||||
IifnameKw = 66,
|
||||
OifnameKw = 67,
|
||||
CtKw = 68,
|
||||
StateKw = 69,
|
||||
SportKw,
|
||||
DportKw,
|
||||
SaddrKw,
|
||||
DaddrKw,
|
||||
ProtocolKw,
|
||||
NexthdrKw,
|
||||
TypeKw,
|
||||
HookKw,
|
||||
PriorityKw,
|
||||
PolicyKw,
|
||||
IifnameKw,
|
||||
OifnameKw,
|
||||
CtKw,
|
||||
StateKw,
|
||||
|
||||
// Actions
|
||||
AcceptKw = 70,
|
||||
DropKw = 71,
|
||||
RejectKw = 72,
|
||||
ReturnKw = 73,
|
||||
JumpKw = 74,
|
||||
GotoKw = 75,
|
||||
ContinueKw = 76,
|
||||
LogKw = 77,
|
||||
CommentKw = 78,
|
||||
AcceptKw,
|
||||
DropKw,
|
||||
RejectKw,
|
||||
ReturnKw,
|
||||
JumpKw,
|
||||
GotoKw,
|
||||
ContinueKw,
|
||||
LogKw,
|
||||
CommentKw,
|
||||
|
||||
// States
|
||||
EstablishedKw = 79,
|
||||
RelatedKw = 80,
|
||||
NewKw = 81,
|
||||
InvalidKw = 82,
|
||||
|
||||
// Operators
|
||||
EqOp = 83,
|
||||
NeOp = 84,
|
||||
LeOp = 85,
|
||||
GeOp = 86,
|
||||
LtOp = 87,
|
||||
GtOp = 88,
|
||||
|
||||
// Punctuation
|
||||
LeftBrace = 89,
|
||||
RightBrace = 90,
|
||||
LeftParen = 91,
|
||||
RightParen = 92,
|
||||
LeftBracket = 93,
|
||||
RightBracket = 94,
|
||||
Comma = 95,
|
||||
Semicolon = 96,
|
||||
Colon = 97,
|
||||
Assign = 98,
|
||||
Dash = 99,
|
||||
Slash = 100,
|
||||
Dot = 101,
|
||||
|
||||
// Trivia
|
||||
Whitespace = 102,
|
||||
Newline = 103,
|
||||
Comment = 104,
|
||||
Shebang = 105,
|
||||
|
||||
// Error recovery
|
||||
Error = 106,
|
||||
EstablishedKw,
|
||||
RelatedKw,
|
||||
NewKw,
|
||||
InvalidKw,
|
||||
|
||||
// Additional protocol keywords
|
||||
VmapKw = 107,
|
||||
NdRouterAdvertKw = 108,
|
||||
NdNeighborSolicitKw = 109,
|
||||
NdNeighborAdvertKw = 110,
|
||||
EchoRequestKw = 111,
|
||||
DestUnreachableKw = 112,
|
||||
RouterAdvertisementKw = 113,
|
||||
TimeExceededKw = 114,
|
||||
ParameterProblemKw = 115,
|
||||
PacketTooBigKw = 116,
|
||||
VmapKw,
|
||||
NdRouterAdvertKw,
|
||||
NdNeighborSolicitKw,
|
||||
NdNeighborAdvertKw,
|
||||
EchoRequestKw,
|
||||
DestUnreachableKw,
|
||||
RouterAdvertisementKw,
|
||||
TimeExceededKw,
|
||||
ParameterProblemKw,
|
||||
PacketTooBigKw,
|
||||
|
||||
// Operators
|
||||
EqOp,
|
||||
NeOp,
|
||||
LeOp,
|
||||
GeOp,
|
||||
LtOp,
|
||||
GtOp,
|
||||
|
||||
// Punctuation
|
||||
LeftBrace,
|
||||
RightBrace,
|
||||
LeftParen,
|
||||
RightParen,
|
||||
LeftBracket,
|
||||
RightBracket,
|
||||
Comma,
|
||||
Semicolon,
|
||||
Colon,
|
||||
Assign,
|
||||
Dash,
|
||||
Slash,
|
||||
Dot,
|
||||
|
||||
// Trivia
|
||||
Whitespace,
|
||||
Newline,
|
||||
Comment,
|
||||
Shebang,
|
||||
|
||||
// Error recovery
|
||||
Error,
|
||||
}
|
||||
|
||||
impl From<TokenKind> for SyntaxKind {
|
||||
|
|
@ -324,13 +324,7 @@ impl SyntaxKind {
|
|||
}
|
||||
|
||||
pub fn from_raw(raw: RawSyntaxKind) -> Self {
|
||||
match raw.0 {
|
||||
0 => SyntaxKind::Root,
|
||||
1 => SyntaxKind::Table,
|
||||
// ... other variants ...
|
||||
116 => SyntaxKind::PacketTooBigKw,
|
||||
_ => SyntaxKind::Error, // Fallback to Error for invalid values
|
||||
}
|
||||
unsafe { std::mem::transmute(raw.0 as u16) }
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -453,7 +453,7 @@ impl AnalyzerModule for LexicalAnalyzer {
|
|||
}
|
||||
|
||||
impl LexicalAnalyzer {
|
||||
pub fn lex_error_to_diagnostic(error: &LexError, source: &str) -> Diagnostic {
|
||||
fn lex_error_to_diagnostic(error: &LexError, source: &str) -> Diagnostic {
|
||||
match error {
|
||||
LexError::InvalidToken { position, text } => {
|
||||
let pos = Position::from_text_size(TextSize::from(*position as u32), source);
|
||||
|
|
@ -478,8 +478,9 @@ impl LexicalAnalyzer {
|
|||
"Unterminated string literal".to_string(),
|
||||
)
|
||||
}
|
||||
LexError::InvalidNumber { position, text } => {
|
||||
let start_pos = Position::from_text_size(TextSize::from(*position as u32), source);
|
||||
LexError::InvalidNumber { text } => {
|
||||
if let Some(pos) = source.find(text) {
|
||||
let start_pos = Position::from_text_size(TextSize::from(pos as u32), source);
|
||||
let end_pos =
|
||||
Position::new(start_pos.line, start_pos.character + text.len() as u32);
|
||||
let range = Range::new(start_pos, end_pos);
|
||||
|
|
@ -489,6 +490,15 @@ impl LexicalAnalyzer {
|
|||
DiagnosticCode::InvalidNumber,
|
||||
format!("Invalid number: '{}'", text),
|
||||
)
|
||||
} else {
|
||||
let range = Range::single_position(Position::new(0, 0));
|
||||
Diagnostic::new(
|
||||
range,
|
||||
DiagnosticSeverity::Error,
|
||||
DiagnosticCode::InvalidNumber,
|
||||
format!("Invalid number: '{}'", text),
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -711,23 +721,6 @@ impl StyleAnalyzer {
|
|||
}
|
||||
}
|
||||
|
||||
// Check for trailing empty lines at the end of the file
|
||||
if empty_count > config.max_empty_lines {
|
||||
let start = Position::new(empty_start as u32, 0);
|
||||
let end = Position::new((empty_start + empty_count - 1) as u32, 0);
|
||||
let range = Range::new(start, end);
|
||||
let diagnostic = Diagnostic::new(
|
||||
range,
|
||||
DiagnosticSeverity::Warning,
|
||||
DiagnosticCode::TooManyEmptyLines,
|
||||
format!(
|
||||
"Too many consecutive empty lines at end of file ({} > {})",
|
||||
empty_count, config.max_empty_lines
|
||||
),
|
||||
);
|
||||
diagnostics.push(diagnostic);
|
||||
}
|
||||
|
||||
diagnostics
|
||||
}
|
||||
|
||||
|
|
@ -773,26 +766,17 @@ impl StyleAnalyzer {
|
|||
|
||||
// Check for mixed indentation across the file
|
||||
if has_tabs && has_spaces {
|
||||
if let Some(preferred) = &config.preferred_indent {
|
||||
let range = Range::single_position(Position::new(0, 0));
|
||||
let (severity, message) = if let Some(preferred) = &config.preferred_indent {
|
||||
(
|
||||
DiagnosticSeverity::Information,
|
||||
format!("File uses mixed indentation; prefer {}", preferred),
|
||||
)
|
||||
} else {
|
||||
(
|
||||
DiagnosticSeverity::Warning,
|
||||
"File uses mixed indentation (tabs and spaces)".to_string(),
|
||||
)
|
||||
};
|
||||
let diagnostic = Diagnostic::new(
|
||||
range,
|
||||
severity,
|
||||
DiagnosticSeverity::Information,
|
||||
DiagnosticCode::InconsistentIndentation,
|
||||
message,
|
||||
format!("File uses mixed indentation; prefer {}", preferred),
|
||||
);
|
||||
diagnostics.push(diagnostic);
|
||||
}
|
||||
}
|
||||
|
||||
diagnostics
|
||||
}
|
||||
|
|
|
|||
39
src/lexer.rs
39
src/lexer.rs
|
|
@ -10,8 +10,8 @@ pub enum LexError {
|
|||
InvalidToken { position: usize, text: String },
|
||||
#[error("Unterminated string literal starting at position {position}")]
|
||||
UnterminatedString { position: usize },
|
||||
#[error("Invalid numeric literal at position {position}: {text}")]
|
||||
InvalidNumber { position: usize, text: String },
|
||||
#[error("Invalid numeric literal: {text}")]
|
||||
InvalidNumber { text: String },
|
||||
}
|
||||
|
||||
/// Result type for lexical analysis
|
||||
|
|
@ -356,7 +356,6 @@ impl<'a> NftablesLexer<'a> {
|
|||
.any(|c| !c.is_ascii_digit() && c != '.' && c != 'x' && c != 'X')
|
||||
{
|
||||
return Err(LexError::InvalidNumber {
|
||||
position: span.start,
|
||||
text: text.to_owned(),
|
||||
});
|
||||
} else {
|
||||
|
|
@ -449,38 +448,4 @@ mod tests {
|
|||
panic!("Expected InvalidToken error");
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_invalid_number_with_position() {
|
||||
// Test that we can create a proper diagnostic with position information
|
||||
use crate::diagnostic::LexicalAnalyzer;
|
||||
|
||||
// Create a source with the same invalid pattern at different positions
|
||||
let source = "123abc normal 123abc end";
|
||||
|
||||
// Since normal tokenization splits "123abc" into "123" + "abc",
|
||||
// let's test the diagnostic creation directly with a mock error
|
||||
let error1 = LexError::InvalidNumber {
|
||||
position: 0,
|
||||
text: "123abc".to_string(),
|
||||
};
|
||||
let error2 = LexError::InvalidNumber {
|
||||
position: 14,
|
||||
text: "123abc".to_string(),
|
||||
};
|
||||
|
||||
// Test that diagnostics are created with correct positions
|
||||
let diagnostic1 = LexicalAnalyzer::lex_error_to_diagnostic(&error1, source);
|
||||
let diagnostic2 = LexicalAnalyzer::lex_error_to_diagnostic(&error2, source);
|
||||
|
||||
// First occurrence should be at position 0
|
||||
assert_eq!(diagnostic1.range.start.line, 0);
|
||||
assert_eq!(diagnostic1.range.start.character, 0);
|
||||
assert_eq!(diagnostic1.message, "Invalid number: '123abc'");
|
||||
|
||||
// Second occurrence should be at position 14 (not 0)
|
||||
assert_eq!(diagnostic2.range.start.line, 0);
|
||||
assert_eq!(diagnostic2.range.start.character, 14);
|
||||
assert_eq!(diagnostic2.message, "Invalid number: '123abc'");
|
||||
}
|
||||
}
|
||||
|
|
|
|||
12
src/main.rs
12
src/main.rs
|
|
@ -351,8 +351,6 @@ fn process_lint_command(
|
|||
};
|
||||
|
||||
let is_multiple_files = files.len() > 1;
|
||||
let mut has_errors = false;
|
||||
|
||||
for file_path in files {
|
||||
if let Err(e) = process_single_file_lint(
|
||||
&file_path,
|
||||
|
|
@ -366,18 +364,12 @@ fn process_lint_command(
|
|||
is_multiple_files,
|
||||
) {
|
||||
eprintln!("Error processing {}: {}", file_path, e);
|
||||
has_errors = true;
|
||||
if !is_multiple_files {
|
||||
return Err(e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Exit with non-zero code if any file had errors
|
||||
if has_errors {
|
||||
std::process::exit(1);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
|
@ -467,9 +459,9 @@ fn process_single_file_lint(
|
|||
println!("{}", diagnostics.to_human_readable());
|
||||
}
|
||||
|
||||
// Return error if there are diagnostics errors
|
||||
// Exit with non-zero code if there are errors
|
||||
if diagnostics.has_errors() {
|
||||
return Err(anyhow::anyhow!("Diagnostics found errors in file"));
|
||||
std::process::exit(1);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
|
|
|
|||
|
|
@ -466,10 +466,6 @@ impl Parser {
|
|||
|
||||
// Check for operators
|
||||
while let Some(token) = self.peek() {
|
||||
if matches!(token.kind, TokenKind::Newline) {
|
||||
self.advance();
|
||||
continue;
|
||||
}
|
||||
// Check for comparison operators
|
||||
let operator = match &token.kind {
|
||||
TokenKind::Eq => BinaryOperator::Eq,
|
||||
|
|
@ -513,11 +509,10 @@ impl Parser {
|
|||
self.consume(TokenKind::RightBrace, "Expected '}' to close vmap")?;
|
||||
|
||||
// Return a vmap expression with the previous expression as the mapping target
|
||||
expr = Expression::Vmap {
|
||||
return Ok(Expression::Vmap {
|
||||
expr: Some(Box::new(expr)),
|
||||
map,
|
||||
};
|
||||
continue; // allow the outer `while` to detect ==, != … afterwards
|
||||
});
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue