Compare commits

..

No commits in common. "92735f3eeef9061b03504598f23d6c1347a8a4f0" and "e0f93d03072e15345c9eb068e90d0b1dad8e54be" have entirely different histories.

7 changed files with 169 additions and 601 deletions

374
README.md
View file

@ -30,68 +30,26 @@ supported, I cannot guarantee that _everything_ is supported just yet.
- **Validation** - Syntax checking with precise error locations - **Validation** - Syntax checking with precise error locations
- **Optimization** - Configurable empty line reduction and whitespace control - **Optimization** - Configurable empty line reduction and whitespace control
### Diagnostics & Analysis
- **Comprehensive diagnostics** - Syntax, semantic, style, and best practice
analysis
- **Modular analysis** - Run specific diagnostic modules (`lexical`, `syntax`,
`style`, `semantic`)
- **LSP-compatible output** - JSON format for editor integration
- **Human-readable reports** - Detailed error messages with context and location
information
- **Configurable severity** - Control which diagnostic categories to
enable/disable
## Usage ## Usage
### Formatting
```bash ```bash
# Format a specific file (in place) # Basic formatting
nff format /etc/nftables.conf nff -f /etc/nftables.conf
# Format all .nft files in current directory (in place)
nff format
# Custom indentation (4 spaces) # Custom indentation (4 spaces)
nff format config.nft --indent spaces --spaces 4 nff -f config.nft --indent spaces --spaces 4
# Optimize formatting (reduce empty lines) # Optimize formatting (reduce empty lines)
nff format config.nft --optimize nff -f config.nft --optimize
# Output to stdout instead of modifying files # Output to file
nff format config.nft --stdout nff -f config.nft -o formatted.nft
# Syntax validation only # Syntax validation only
nff format config.nft --check nff -f config.nft --check
# Debug output for development (or debugging) # Debug output for development (or debugging)
nff format config.nft --debug nff -f config.nft --debug
```
### Linting and Diagnostics
```bash
# Run comprehensive diagnostics on a file
nff lint /etc/nftables.conf
# Lint all .nft files in current directory
nff lint
# JSON output for editor integration
nff lint config.nft --json
# Run specific diagnostic modules
nff lint config.nft --modules syntax,style
# Available modules: lexical, syntax, style, semantic
nff lint config.nft --modules semantic
# Configure diagnostic settings (note: flags are enabled by default)
nff lint config.nft --style-warnings=false --best-practices=false
# Debug output with diagnostics
nff lint config.nft --debug
``` ```
## Architecture ## Architecture
@ -111,235 +69,12 @@ graph TD
AST --> Formatter AST --> Formatter
Formatter --> Output Formatter --> Output
CST --> Formatter CST --> Formatter
Input --> Diagnostics[Diagnostic System]
Diagnostics --> LexAnalyzer[Lexical Analyzer]
Diagnostics --> SyntaxAnalyzer[Syntax Analyzer]
Diagnostics --> StyleAnalyzer[Style Analyzer]
Diagnostics --> SemanticAnalyzer[Semantic Analyzer]
LexAnalyzer --> DiagOutput[JSON/Human Output]
SyntaxAnalyzer --> DiagOutput
StyleAnalyzer --> DiagOutput
SemanticAnalyzer --> DiagOutput
``` ```
## Installation ## Installation
Recommended way of installing nff is to use Nix. Recommended way of installing nff is to use Nix.
### Editor Integration
#### Neovim Setup
nff can be integrated into Neovim as a diagnostics source for nftables files.
Here are several setup approaches:
##### Option 2: Using none-ls
```lua
local null_ls = require("null-ls")
null_ls.setup({
sources = {
-- nftables diagnostics
null_ls.builtins.diagnostics.nff.with({
command = "nff",
args = { "lint", "$FILENAME", "--json" },
format = "json",
check_exit_code = false,
filetypes = { "nftables" },
}),
-- nftables formatting
null_ls.builtins.formatting.nff.with({
command = "nff",
args = { "format", "$FILENAME", "--stdout" },
filetypes = { "nftables" },
}),
},
})
```
##### Option 2: Using nvim-lint (recommended)
```lua
-- ~/.config/nvim/lua/config/lint.lua
require('lint').linters.nff = {
cmd = 'nff',
stdin = false,
args = { 'lint', '%s', '--json' },
stream = 'stdout',
ignore_exitcode = true,
parser = function(output)
local diagnostics = {}
local ok, decoded = pcall(vim.fn.json_decode, output)
if not ok or not decoded.diagnostics then
return diagnostics
end
for _, diagnostic in ipairs(decoded.diagnostics) do
table.insert(diagnostics, {
lnum = diagnostic.range.start.line,
col = diagnostic.range.start.character,
severity = diagnostic.severity == "Error" and vim.diagnostic.severity.ERROR or vim.diagnostic.severity.WARN,
message = diagnostic.message,
source = "nff",
code = diagnostic.code,
})
end
return diagnostics
end,
}
-- Setup linting for nftables files
vim.api.nvim_create_autocmd({ "BufEnter", "BufWritePost" }, {
pattern = "*.nft",
callback = function()
require("lint").try_lint("nff")
end,
})
```
##### Option 3: Custom Lua Function
For a simple custom solution:
```lua
-- ~/.config/nvim/lua/nff.lua
local M = {}
function M.lint_nftables()
local filename = vim.fn.expand('%:p')
if vim.bo.filetype ~= 'nftables' then
return
end
local cmd = { 'nff', 'lint', filename, '--json' }
vim.fn.jobstart(cmd, {
stdout_buffered = true,
on_stdout = function(_, data)
if data then
local output = table.concat(data, '\n')
local ok, result = pcall(vim.fn.json_decode, output)
if ok and result.diagnostics then
local diagnostics = {}
for _, diag in ipairs(result.diagnostics) do
table.insert(diagnostics, {
lnum = diag.range.start.line,
col = diag.range.start.character,
severity = diag.severity == "Error" and vim.diagnostic.severity.ERROR or vim.diagnostic.severity.WARN,
message = diag.message,
source = "nff",
})
end
vim.diagnostic.set(vim.api.nvim_create_namespace('nff'), 0, diagnostics)
end
end
end,
})
end
-- Auto-run on save
vim.api.nvim_create_autocmd("BufWritePost", {
pattern = "*.nft",
callback = M.lint_nftables,
})
return M
```
## Diagnostic Categories
nff provides comprehensive analysis across multiple categories:
### Syntax Errors
- Parse errors with precise location information
- Missing tokens (semicolons, braces, etc.)
- Unexpected tokens
- Unterminated strings
- Invalid numbers
### Semantic Validation
- Unknown table families (`inet`, `ip`, `ip6`, etc.)
- Invalid chain types and hooks
- Incorrect priority values
- Missing chain policies
- Duplicate table/chain names
- Invalid CIDR notation
- Invalid port ranges
### Style Warnings
- Missing shebang line
- Inconsistent indentation (mixed tabs/spaces)
- Trailing whitespace
- Lines exceeding maximum length (configurable)
- Excessive empty lines
- Preferred syntax alternatives
### Best Practices
- Chains without explicit policies
- Rules without actions
- Overly permissive rules
- Duplicate or conflicting rules
- Unused variables or sets
- Deprecated syntax usage
- Missing documentation
- Security risks
### Performance Hints
- Inefficient rule ordering
- Large sets without timeouts
- Missing counters where beneficial
## JSON Output Format
When using `--json`, nff outputs LSP-compatible diagnostics:
```json
{
"diagnostics": [
{
"range": {
"start": { "line": 5, "character": 10 },
"end": { "line": 5, "character": 20 }
},
"severity": "Error",
"code": "NFT001",
"source": "nff",
"message": "Expected ';' after policy",
"related_information": [],
"code_actions": [],
"tags": []
}
],
"file_path": "config.nft",
"source_text": "..."
}
```
### Diagnostic Codes
nff uses structured diagnostic codes for categorization:
- **NFT001-NFT099**: Syntax errors
- **NFT101-NFT199**: Semantic errors
- **NFT201-NFT299**: Style warnings
- **NFT301-NFT399**: Best practice recommendations
- **NFT401-NFT499**: Performance hints
- **NFT501-NFT599**: Formatting issues
- **NFT601-NFT699**: nftables-specific validations
## Development ## Development
### Testing ### Testing
@ -461,88 +196,6 @@ table inet protection {
} }
``` ```
## Diagnostics Examples
### Error Detection
Input file with issues:
```nftables
table inet firewall {
chain input {
type filter hook input priority 100
tcp dport 22 accept
}
}
```
Human-readable output:
```
Found 2 issues in config.nft:
config.nft:3:37: error: Expected ';' after policy [NFT001]
1: table inet firewall {
2: chain input {
→ 3: type filter hook input priority 100
4: tcp dport 22 accept
5: }
config.nft:3:1: warning: Filter chain should have an explicit policy [NFT301]
1: table inet firewall {
2: chain input {
→ 3: type filter hook input priority 100
4: tcp dport 22 accept
5: }
```
JSON output:
```json
{
"diagnostics": [
{
"range": {
"start": { "line": 2, "character": 37 },
"end": { "line": 2, "character": 37 }
},
"severity": "Error",
"code": "NFT001",
"source": "nff",
"message": "Expected ';' after policy"
},
{
"range": {
"start": { "line": 2, "character": 0 },
"end": { "line": 2, "character": 37 }
},
"severity": "Warning",
"code": "NFT301",
"source": "nff",
"message": "Filter chain should have an explicit policy"
}
],
"file_path": "config.nft",
"source_text": "..."
}
```
### Style Analysis
Input with style issues:
```nftables
table inet test{chain input{type filter hook input priority 0;policy drop;tcp dport 22 accept;}}
```
Style warnings:
```
Found 3 issues in style.nft:
style.nft:1:1: warning: Consider adding a shebang line [NFT201]
style.nft:1:121: warning: Line too long (98 > 80 characters) [NFT205]
style.nft:1:16: warning: Missing space after '{' [NFT503]
```
## Contributing ## Contributing
### Code Style ### Code Style
@ -584,17 +237,6 @@ Below are the design goals of nff's architechture.
- **Memory efficiency**: Streaming token processing where possible - **Memory efficiency**: Streaming token processing where possible
- **Grammar completeness**: Covers full nftables syntax specification - **Grammar completeness**: Covers full nftables syntax specification
### Diagnostic Architecture
The diagnostic system uses a modular architecture with specialized analyzers:
- **Modular design**: Each analyzer focuses on specific concerns (lexical,
syntax, style, semantic)
- **Configurable analysis**: Enable/disable specific diagnostic categories
- **LSP compatibility**: JSON output follows Language Server Protocol standards
- **Performance optimized**: Concurrent analysis when possible
- **Extensible**: Easy to add new diagnostic rules and categories
## License ## License
nff is licensed under [MPL v2.0](LICENSE). See license file for more details on nff is licensed under [MPL v2.0](LICENSE). See license file for more details on

View file

@ -4,8 +4,6 @@
rustfmt, rustfmt,
clippy, clippy,
cargo, cargo,
cargo-machete,
cargo-nextest,
rustPlatform, rustPlatform,
}: }:
mkShell { mkShell {
@ -15,8 +13,6 @@ mkShell {
rustfmt rustfmt
clippy clippy
cargo cargo
cargo-machete
cargo-nextest
]; ];
RUST_SRC_PATH = "${rustPlatform.rustLibSrc}"; RUST_SRC_PATH = "${rustPlatform.rustLibSrc}";

View file

@ -13,150 +13,150 @@ use thiserror::Error;
pub enum SyntaxKind { pub enum SyntaxKind {
// Root and containers // Root and containers
Root = 0, Root = 0,
Table = 1, Table,
Chain = 2, Chain,
Rule = 3, Rule,
Set = 4, Set,
Map = 5, Map,
Element = 6, Element,
// Expressions // Expressions
Expression = 7, Expression,
BinaryExpr = 8, BinaryExpr,
UnaryExpr = 9, UnaryExpr,
CallExpr = 10, CallExpr,
SetExpr = 11, SetExpr,
RangeExpr = 12, RangeExpr,
// Statements // Statements
Statement = 13, Statement,
IncludeStmt = 14, IncludeStmt,
DefineStmt = 15, DefineStmt,
FlushStmt = 16, FlushStmt,
AddStmt = 17, AddStmt,
DeleteStmt = 18, DeleteStmt,
// Literals and identifiers // Literals and identifiers
Identifier = 19, Identifier,
StringLiteral = 20, StringLiteral,
NumberLiteral = 21, NumberLiteral,
IpAddress = 22, IpAddress,
Ipv6Address = 23, Ipv6Address,
MacAddress = 24, MacAddress,
// Keywords // Keywords
TableKw = 25, TableKw,
ChainKw = 26, ChainKw,
RuleKw = 27, RuleKw,
SetKw = 28, SetKw,
MapKw = 29, MapKw,
ElementKw = 30, ElementKw,
IncludeKw = 31, IncludeKw,
DefineKw = 32, DefineKw,
FlushKw = 33, FlushKw,
AddKw = 34, AddKw,
DeleteKw = 35, DeleteKw,
InsertKw = 36, InsertKw,
ReplaceKw = 37, ReplaceKw,
// Chain types and hooks // Chain types and hooks
FilterKw = 38, FilterKw,
NatKw = 39, NatKw,
RouteKw = 40, RouteKw,
InputKw = 41, InputKw,
OutputKw = 42, OutputKw,
ForwardKw = 43, ForwardKw,
PreroutingKw = 44, PreroutingKw,
PostroutingKw = 45, PostroutingKw,
// Protocols and families // Protocols and families
IpKw = 46, IpKw,
Ip6Kw = 47, Ip6Kw,
InetKw = 48, InetKw,
ArpKw = 49, ArpKw,
BridgeKw = 50, BridgeKw,
NetdevKw = 51, NetdevKw,
TcpKw = 52, TcpKw,
UdpKw = 53, UdpKw,
IcmpKw = 54, IcmpKw,
Icmpv6Kw = 55, Icmpv6Kw,
// Match keywords // Match keywords
SportKw = 56, SportKw,
DportKw = 57, DportKw,
SaddrKw = 58, SaddrKw,
DaddrKw = 59, DaddrKw,
ProtocolKw = 60, ProtocolKw,
NexthdrKw = 61, NexthdrKw,
TypeKw = 62, TypeKw,
HookKw = 63, HookKw,
PriorityKw = 64, PriorityKw,
PolicyKw = 65, PolicyKw,
IifnameKw = 66, IifnameKw,
OifnameKw = 67, OifnameKw,
CtKw = 68, CtKw,
StateKw = 69, StateKw,
// Actions // Actions
AcceptKw = 70, AcceptKw,
DropKw = 71, DropKw,
RejectKw = 72, RejectKw,
ReturnKw = 73, ReturnKw,
JumpKw = 74, JumpKw,
GotoKw = 75, GotoKw,
ContinueKw = 76, ContinueKw,
LogKw = 77, LogKw,
CommentKw = 78, CommentKw,
// States // States
EstablishedKw = 79, EstablishedKw,
RelatedKw = 80, RelatedKw,
NewKw = 81, NewKw,
InvalidKw = 82, InvalidKw,
// Operators
EqOp = 83,
NeOp = 84,
LeOp = 85,
GeOp = 86,
LtOp = 87,
GtOp = 88,
// Punctuation
LeftBrace = 89,
RightBrace = 90,
LeftParen = 91,
RightParen = 92,
LeftBracket = 93,
RightBracket = 94,
Comma = 95,
Semicolon = 96,
Colon = 97,
Assign = 98,
Dash = 99,
Slash = 100,
Dot = 101,
// Trivia
Whitespace = 102,
Newline = 103,
Comment = 104,
Shebang = 105,
// Error recovery
Error = 106,
// Additional protocol keywords // Additional protocol keywords
VmapKw = 107, VmapKw,
NdRouterAdvertKw = 108, NdRouterAdvertKw,
NdNeighborSolicitKw = 109, NdNeighborSolicitKw,
NdNeighborAdvertKw = 110, NdNeighborAdvertKw,
EchoRequestKw = 111, EchoRequestKw,
DestUnreachableKw = 112, DestUnreachableKw,
RouterAdvertisementKw = 113, RouterAdvertisementKw,
TimeExceededKw = 114, TimeExceededKw,
ParameterProblemKw = 115, ParameterProblemKw,
PacketTooBigKw = 116, PacketTooBigKw,
// Operators
EqOp,
NeOp,
LeOp,
GeOp,
LtOp,
GtOp,
// Punctuation
LeftBrace,
RightBrace,
LeftParen,
RightParen,
LeftBracket,
RightBracket,
Comma,
Semicolon,
Colon,
Assign,
Dash,
Slash,
Dot,
// Trivia
Whitespace,
Newline,
Comment,
Shebang,
// Error recovery
Error,
} }
impl From<TokenKind> for SyntaxKind { impl From<TokenKind> for SyntaxKind {
@ -324,13 +324,7 @@ impl SyntaxKind {
} }
pub fn from_raw(raw: RawSyntaxKind) -> Self { pub fn from_raw(raw: RawSyntaxKind) -> Self {
match raw.0 { unsafe { std::mem::transmute(raw.0 as u16) }
0 => SyntaxKind::Root,
1 => SyntaxKind::Table,
// ... other variants ...
116 => SyntaxKind::PacketTooBigKw,
_ => SyntaxKind::Error, // Fallback to Error for invalid values
}
} }
} }

View file

@ -453,7 +453,7 @@ impl AnalyzerModule for LexicalAnalyzer {
} }
impl LexicalAnalyzer { impl LexicalAnalyzer {
pub fn lex_error_to_diagnostic(error: &LexError, source: &str) -> Diagnostic { fn lex_error_to_diagnostic(error: &LexError, source: &str) -> Diagnostic {
match error { match error {
LexError::InvalidToken { position, text } => { LexError::InvalidToken { position, text } => {
let pos = Position::from_text_size(TextSize::from(*position as u32), source); let pos = Position::from_text_size(TextSize::from(*position as u32), source);
@ -478,8 +478,9 @@ impl LexicalAnalyzer {
"Unterminated string literal".to_string(), "Unterminated string literal".to_string(),
) )
} }
LexError::InvalidNumber { position, text } => { LexError::InvalidNumber { text } => {
let start_pos = Position::from_text_size(TextSize::from(*position as u32), source); if let Some(pos) = source.find(text) {
let start_pos = Position::from_text_size(TextSize::from(pos as u32), source);
let end_pos = let end_pos =
Position::new(start_pos.line, start_pos.character + text.len() as u32); Position::new(start_pos.line, start_pos.character + text.len() as u32);
let range = Range::new(start_pos, end_pos); let range = Range::new(start_pos, end_pos);
@ -489,6 +490,15 @@ impl LexicalAnalyzer {
DiagnosticCode::InvalidNumber, DiagnosticCode::InvalidNumber,
format!("Invalid number: '{}'", text), format!("Invalid number: '{}'", text),
) )
} else {
let range = Range::single_position(Position::new(0, 0));
Diagnostic::new(
range,
DiagnosticSeverity::Error,
DiagnosticCode::InvalidNumber,
format!("Invalid number: '{}'", text),
)
}
} }
} }
} }
@ -711,23 +721,6 @@ impl StyleAnalyzer {
} }
} }
// Check for trailing empty lines at the end of the file
if empty_count > config.max_empty_lines {
let start = Position::new(empty_start as u32, 0);
let end = Position::new((empty_start + empty_count - 1) as u32, 0);
let range = Range::new(start, end);
let diagnostic = Diagnostic::new(
range,
DiagnosticSeverity::Warning,
DiagnosticCode::TooManyEmptyLines,
format!(
"Too many consecutive empty lines at end of file ({} > {})",
empty_count, config.max_empty_lines
),
);
diagnostics.push(diagnostic);
}
diagnostics diagnostics
} }
@ -773,26 +766,17 @@ impl StyleAnalyzer {
// Check for mixed indentation across the file // Check for mixed indentation across the file
if has_tabs && has_spaces { if has_tabs && has_spaces {
if let Some(preferred) = &config.preferred_indent {
let range = Range::single_position(Position::new(0, 0)); let range = Range::single_position(Position::new(0, 0));
let (severity, message) = if let Some(preferred) = &config.preferred_indent {
(
DiagnosticSeverity::Information,
format!("File uses mixed indentation; prefer {}", preferred),
)
} else {
(
DiagnosticSeverity::Warning,
"File uses mixed indentation (tabs and spaces)".to_string(),
)
};
let diagnostic = Diagnostic::new( let diagnostic = Diagnostic::new(
range, range,
severity, DiagnosticSeverity::Information,
DiagnosticCode::InconsistentIndentation, DiagnosticCode::InconsistentIndentation,
message, format!("File uses mixed indentation; prefer {}", preferred),
); );
diagnostics.push(diagnostic); diagnostics.push(diagnostic);
} }
}
diagnostics diagnostics
} }

View file

@ -10,8 +10,8 @@ pub enum LexError {
InvalidToken { position: usize, text: String }, InvalidToken { position: usize, text: String },
#[error("Unterminated string literal starting at position {position}")] #[error("Unterminated string literal starting at position {position}")]
UnterminatedString { position: usize }, UnterminatedString { position: usize },
#[error("Invalid numeric literal at position {position}: {text}")] #[error("Invalid numeric literal: {text}")]
InvalidNumber { position: usize, text: String }, InvalidNumber { text: String },
} }
/// Result type for lexical analysis /// Result type for lexical analysis
@ -356,7 +356,6 @@ impl<'a> NftablesLexer<'a> {
.any(|c| !c.is_ascii_digit() && c != '.' && c != 'x' && c != 'X') .any(|c| !c.is_ascii_digit() && c != '.' && c != 'x' && c != 'X')
{ {
return Err(LexError::InvalidNumber { return Err(LexError::InvalidNumber {
position: span.start,
text: text.to_owned(), text: text.to_owned(),
}); });
} else { } else {
@ -449,38 +448,4 @@ mod tests {
panic!("Expected InvalidToken error"); panic!("Expected InvalidToken error");
} }
} }
#[test]
fn test_invalid_number_with_position() {
// Test that we can create a proper diagnostic with position information
use crate::diagnostic::LexicalAnalyzer;
// Create a source with the same invalid pattern at different positions
let source = "123abc normal 123abc end";
// Since normal tokenization splits "123abc" into "123" + "abc",
// let's test the diagnostic creation directly with a mock error
let error1 = LexError::InvalidNumber {
position: 0,
text: "123abc".to_string(),
};
let error2 = LexError::InvalidNumber {
position: 14,
text: "123abc".to_string(),
};
// Test that diagnostics are created with correct positions
let diagnostic1 = LexicalAnalyzer::lex_error_to_diagnostic(&error1, source);
let diagnostic2 = LexicalAnalyzer::lex_error_to_diagnostic(&error2, source);
// First occurrence should be at position 0
assert_eq!(diagnostic1.range.start.line, 0);
assert_eq!(diagnostic1.range.start.character, 0);
assert_eq!(diagnostic1.message, "Invalid number: '123abc'");
// Second occurrence should be at position 14 (not 0)
assert_eq!(diagnostic2.range.start.line, 0);
assert_eq!(diagnostic2.range.start.character, 14);
assert_eq!(diagnostic2.message, "Invalid number: '123abc'");
}
} }

View file

@ -351,8 +351,6 @@ fn process_lint_command(
}; };
let is_multiple_files = files.len() > 1; let is_multiple_files = files.len() > 1;
let mut has_errors = false;
for file_path in files { for file_path in files {
if let Err(e) = process_single_file_lint( if let Err(e) = process_single_file_lint(
&file_path, &file_path,
@ -366,18 +364,12 @@ fn process_lint_command(
is_multiple_files, is_multiple_files,
) { ) {
eprintln!("Error processing {}: {}", file_path, e); eprintln!("Error processing {}: {}", file_path, e);
has_errors = true;
if !is_multiple_files { if !is_multiple_files {
return Err(e); return Err(e);
} }
} }
} }
// Exit with non-zero code if any file had errors
if has_errors {
std::process::exit(1);
}
Ok(()) Ok(())
} }
@ -467,9 +459,9 @@ fn process_single_file_lint(
println!("{}", diagnostics.to_human_readable()); println!("{}", diagnostics.to_human_readable());
} }
// Return error if there are diagnostics errors // Exit with non-zero code if there are errors
if diagnostics.has_errors() { if diagnostics.has_errors() {
return Err(anyhow::anyhow!("Diagnostics found errors in file")); std::process::exit(1);
} }
Ok(()) Ok(())

View file

@ -466,10 +466,6 @@ impl Parser {
// Check for operators // Check for operators
while let Some(token) = self.peek() { while let Some(token) = self.peek() {
if matches!(token.kind, TokenKind::Newline) {
self.advance();
continue;
}
// Check for comparison operators // Check for comparison operators
let operator = match &token.kind { let operator = match &token.kind {
TokenKind::Eq => BinaryOperator::Eq, TokenKind::Eq => BinaryOperator::Eq,
@ -513,11 +509,10 @@ impl Parser {
self.consume(TokenKind::RightBrace, "Expected '}' to close vmap")?; self.consume(TokenKind::RightBrace, "Expected '}' to close vmap")?;
// Return a vmap expression with the previous expression as the mapping target // Return a vmap expression with the previous expression as the mapping target
expr = Expression::Vmap { return Ok(Expression::Vmap {
expr: Some(Box::new(expr)), expr: Some(Box::new(expr)),
map, map,
}; });
continue; // allow the outer `while` to detect ==, != … afterwards
} }
break; break;
} }