nff: add vmap expression support and improve diagnostics

This commit is contained in:
raf 2025-06-02 08:59:45 +03:00
commit c4a71f2e85
Signed by: NotAShelf
GPG key ID: 29D95B64378DB4BF
6 changed files with 440 additions and 3 deletions

View file

@ -125,6 +125,12 @@ pub enum Expression {
// Set expressions
Set(Vec<Expression>),
// Vmap expressions (value maps)
Vmap {
expr: Box<Expression>,
map: Vec<(Expression, Expression)>,
},
// Range expressions
Range {
start: Box<Expression>,

View file

@ -114,6 +114,18 @@ pub enum SyntaxKind {
NewKw,
InvalidKw,
// Additional protocol keywords
VmapKw,
NdRouterAdvertKw,
NdNeighborSolicitKw,
NdNeighborAdvertKw,
EchoRequestKw,
DestUnreachableKw,
RouterAdvertisementKw,
TimeExceededKw,
ParameterProblemKw,
PacketTooBigKw,
// Operators
EqOp,
NeOp,
@ -215,6 +227,17 @@ impl From<TokenKind> for SyntaxKind {
TokenKind::New => SyntaxKind::NewKw,
TokenKind::Invalid => SyntaxKind::InvalidKw,
TokenKind::Vmap => SyntaxKind::VmapKw,
TokenKind::NdRouterAdvert => SyntaxKind::NdRouterAdvertKw,
TokenKind::NdNeighborSolicit => SyntaxKind::NdNeighborSolicitKw,
TokenKind::NdNeighborAdvert => SyntaxKind::NdNeighborAdvertKw,
TokenKind::EchoRequest => SyntaxKind::EchoRequestKw,
TokenKind::DestUnreachable => SyntaxKind::DestUnreachableKw,
TokenKind::RouterAdvertisement => SyntaxKind::RouterAdvertisementKw,
TokenKind::TimeExceeded => SyntaxKind::TimeExceededKw,
TokenKind::ParameterProblem => SyntaxKind::ParameterProblemKw,
TokenKind::PacketTooBig => SyntaxKind::PacketTooBigKw,
TokenKind::Eq => SyntaxKind::EqOp,
TokenKind::Ne => SyntaxKind::NeOp,
TokenKind::Le => SyntaxKind::LeOp,

View file

@ -129,6 +129,28 @@ pub enum TokenKind {
#[token("new")]
New,
// Additional protocol keywords
#[token("vmap")]
Vmap,
#[token("nd-router-advert")]
NdRouterAdvert,
#[token("nd-neighbor-solicit")]
NdNeighborSolicit,
#[token("nd-neighbor-advert")]
NdNeighborAdvert,
#[token("echo-request")]
EchoRequest,
#[token("destination-unreachable")]
DestUnreachable,
#[token("router-advertisement")]
RouterAdvertisement,
#[token("time-exceeded")]
TimeExceeded,
#[token("parameter-problem")]
ParameterProblem,
#[token("packet-too-big")]
PacketTooBig,
// Actions
#[token("accept")]
Accept,

View file

@ -15,7 +15,7 @@ use thiserror::Error;
use crate::cst::CstBuilder;
use crate::diagnostic::{DiagnosticAnalyzer, DiagnosticConfig};
use crate::lexer::NftablesLexer;
use crate::lexer::{NftablesLexer, Token, TokenKind};
use crate::parser::Parser as NftablesParser;
use crate::syntax::{FormatConfig, IndentStyle, NftablesFormatter};
@ -27,6 +27,27 @@ enum FormatterError {
InvalidFile(String),
#[error("Parse error: {0}")]
ParseError(String),
#[error("Syntax error at line {line}, column {column}: {message}")]
SyntaxError {
line: usize,
column: usize,
message: String,
suggestion: Option<String>,
},
#[error("Unsupported nftables syntax at line {line}, column {column}: {feature}")]
UnsupportedSyntax {
line: usize,
column: usize,
feature: String,
suggestion: Option<String>,
},
#[error("Invalid nftables syntax at line {line}, column {column}: {message}")]
InvalidSyntax {
line: usize,
column: usize,
message: String,
suggestion: Option<String>,
},
#[error("IO error: {0}")]
Io(#[from] io::Error),
}
@ -268,7 +289,7 @@ fn process_single_file_format(
let mut parser = NftablesParser::new(tokens.clone());
parser
.parse()
.map_err(|e| FormatterError::ParseError(e.to_string()))?
.map_err(|e| analyze_parse_error(&source, &tokens, &e.to_string()))?
};
if debug {
@ -446,6 +467,277 @@ fn process_single_file_lint(
Ok(())
}
/// Intelligent error analysis to categorize parse errors and provide location information
fn analyze_parse_error(source: &str, tokens: &[Token], error: &str) -> FormatterError {
// Convert line/column position from token ranges
let lines: Vec<&str> = source.lines().collect();
// Look for common error patterns and provide specific messages
if error.contains("unexpected token") || error.contains("expected") {
// Try to find the problematic token
if let Some(error_token) = find_error_token(tokens) {
let (line, column) = position_from_range(&error_token.range, source);
// Analyze the specific token to categorize the error
match categorize_syntax_error(&error_token, source, &lines) {
ErrorCategory::UnsupportedSyntax {
feature,
suggestion,
} => FormatterError::UnsupportedSyntax {
line,
column,
feature,
suggestion,
},
ErrorCategory::InvalidSyntax {
message,
suggestion,
} => FormatterError::InvalidSyntax {
line,
column,
message,
suggestion,
},
ErrorCategory::SyntaxError {
message,
suggestion,
} => FormatterError::SyntaxError {
line,
column,
message,
suggestion,
},
}
} else {
// Fallback to generic parse error
FormatterError::ParseError(error.to_string())
}
} else {
FormatterError::ParseError(error.to_string())
}
}
#[derive(Debug)]
enum ErrorCategory {
UnsupportedSyntax {
feature: String,
suggestion: Option<String>,
},
InvalidSyntax {
message: String,
suggestion: Option<String>,
},
SyntaxError {
message: String,
suggestion: Option<String>,
},
}
/// Find the first error token in the token stream
fn find_error_token(tokens: &[Token]) -> Option<&Token> {
tokens
.iter()
.find(|token| matches!(token.kind, TokenKind::Error))
}
/// Convert TextRange to line/column position
fn position_from_range(range: &text_size::TextRange, source: &str) -> (usize, usize) {
let start_offset: usize = range.start().into();
let lines: Vec<&str> = source.lines().collect();
let mut current_offset = 0;
for (line_idx, line) in lines.iter().enumerate() {
let line_end = current_offset + line.len();
if start_offset <= line_end {
let column = start_offset - current_offset;
return (line_idx + 1, column + 1); // 1-based indexing
}
current_offset = line_end + 1; // +1 for newline
}
(1, 1) // fallback
}
/// Categorize syntax errors based on token content and context
fn categorize_syntax_error(token: &Token, source: &str, lines: &[&str]) -> ErrorCategory {
let token_text = &token.text;
let (line_num, _) = position_from_range(&token.range, source);
let line_content = lines.get(line_num.saturating_sub(1)).unwrap_or(&"");
// Check for unsupported nftables features
if is_unsupported_feature(token_text, line_content) {
let (feature, suggestion) = classify_unsupported_feature(token_text, line_content);
return ErrorCategory::UnsupportedSyntax {
feature,
suggestion,
};
}
// Check for invalid but supported syntax
if is_invalid_syntax(token_text, line_content) {
let (message, suggestion) = classify_invalid_syntax(token_text, line_content);
return ErrorCategory::InvalidSyntax {
message,
suggestion,
};
}
// Default to syntax error
ErrorCategory::SyntaxError {
message: format!("Unexpected token '{}'", token_text),
suggestion: suggest_correction(token_text, line_content),
}
}
/// Check if the token represents an unsupported nftables feature
fn is_unsupported_feature(token_text: &str, line_content: &str) -> bool {
// List of advanced nftables features that might not be fully supported yet
let unsupported_keywords = [
"quota", "limit", "counter", "meter", "socket", "fib", "rt", "ipsec", "tunnel", "comp",
"dccp", "sctp", "gre", "esp", "ah", "vlan", "arp", "rateest", "osf", "netdev", "meta",
"exthdr", "payload", "lookup", "dynset", "flow", "hash", "jhash", "symhash", "crc32",
];
unsupported_keywords
.iter()
.any(|&keyword| token_text.contains(keyword) || line_content.contains(keyword))
}
/// Check if the syntax is invalid (malformed but within supported features)
fn is_invalid_syntax(token_text: &str, line_content: &str) -> bool {
// Check for common syntax mistakes
if token_text.contains("..") || token_text.contains("::") {
return true; // Double operators usually indicate mistakes
}
// Check for malformed addresses or ranges
if token_text.contains("/") && !is_valid_cidr(token_text) {
return true;
}
// Check for malformed brackets/braces
let open_braces = line_content.matches('{').count();
let close_braces = line_content.matches('}').count();
if open_braces != close_braces {
return true;
}
false
}
/// Classify unsupported feature and provide suggestion
fn classify_unsupported_feature(token_text: &str, line_content: &str) -> (String, Option<String>) {
let feature = if token_text.contains("quota") {
(
"quota management".to_string(),
Some("Use explicit rule counting instead".to_string()),
)
} else if token_text.contains("limit") {
(
"rate limiting".to_string(),
Some("Consider using simpler rule-based rate limiting".to_string()),
)
} else if token_text.contains("counter") {
(
"packet counters".to_string(),
Some("Use rule-level statistics instead".to_string()),
)
} else if line_content.contains("meta") {
(
"meta expressions".to_string(),
Some("Use explicit protocol matching instead".to_string()),
)
} else {
(format!("advanced feature '{}'", token_text), None)
};
feature
}
/// Classify invalid syntax and provide suggestion
fn classify_invalid_syntax(token_text: &str, line_content: &str) -> (String, Option<String>) {
if token_text.contains("/") && !is_valid_cidr(token_text) {
return (
"Invalid CIDR notation".to_string(),
Some("Use format like '192.168.1.0/24' or '::1/128'".to_string()),
);
}
if token_text.contains("..") {
return (
"Invalid range operator".to_string(),
Some("Use '-' for ranges like '1000-2000'".to_string()),
);
}
if line_content.contains('{') && !line_content.contains('}') {
return (
"Unmatched opening brace".to_string(),
Some("Ensure all '{' have matching '}'".to_string()),
);
}
(
format!("Malformed token '{}'", token_text),
Some("Check nftables syntax documentation".to_string()),
)
}
/// Suggest correction for common typos
fn suggest_correction(token_text: &str, line_content: &str) -> Option<String> {
// Common typos and their corrections
let corrections = [
("tabel", "table"),
("cahin", "chain"),
("accpet", "accept"),
("rejct", "reject"),
("prtocol", "protocol"),
("addres", "address"),
("pririty", "priority"),
("poicy", "policy"),
];
for (typo, correction) in &corrections {
if token_text.contains(typo) {
return Some(format!("Did you mean '{}'?", correction));
}
}
// Context-based suggestions
if line_content.contains("type") && line_content.contains("hook") {
if !line_content.contains("filter")
&& !line_content.contains("nat")
&& !line_content.contains("route")
{
return Some("Chain type should be 'filter', 'nat', or 'route'".to_string());
}
}
None
}
/// Validate CIDR notation
fn is_valid_cidr(text: &str) -> bool {
if let Some(slash_pos) = text.find('/') {
let (addr, prefix) = text.split_at(slash_pos);
let prefix = &prefix[1..]; // Remove the '/'
// Check if prefix is a valid number
if let Ok(prefix_len) = prefix.parse::<u8>() {
// Basic validation - IPv4 should be <= 32, IPv6 <= 128
if addr.contains(':') {
prefix_len <= 128 // IPv6
} else {
prefix_len <= 32 // IPv4
}
} else {
false
}
} else {
false
}
}
fn main() -> Result<()> {
let args = Args::parse();

View file

@ -465,7 +465,9 @@ impl Parser {
fn parse_comparison_expression(&mut self) -> Result<Expression> {
let mut expr = self.parse_range_expression()?;
// Check for operators
while let Some(token) = self.peek() {
// Check for comparison operators
let operator = match &token.kind {
TokenKind::Eq => BinaryOperator::Eq,
TokenKind::Ne => BinaryOperator::Ne,
@ -473,7 +475,48 @@ impl Parser {
TokenKind::Le => BinaryOperator::Le,
TokenKind::Gt => BinaryOperator::Gt,
TokenKind::Ge => BinaryOperator::Ge,
_ => break,
_ => {
// Check for vmap after an expression
if matches!(&token.kind, TokenKind::Vmap) {
self.advance(); // consume 'vmap'
// Parse the map contents
self.consume(TokenKind::LeftBrace, "Expected '{' after vmap")?;
let mut map = Vec::new();
while !self.current_token_is(&TokenKind::RightBrace) && !self.is_at_end() {
// Skip commas and newlines
if self.current_token_is(&TokenKind::Comma)
|| self.current_token_is(&TokenKind::Newline)
{
self.advance();
continue;
}
// Parse key
let key = self.parse_expression()?;
// Parse colon separator
self.consume(TokenKind::Colon, "Expected ':' in vmap key-value pair")?;
// Parse value
let value = self.parse_expression()?;
// Add the key-value pair to the map
map.push((key, value));
}
self.consume(TokenKind::RightBrace, "Expected '}' to close vmap")?;
// Return a vmap expression with the previous expression as the mapping target
return Ok(Expression::Vmap {
expr: Box::new(expr),
map,
});
}
break;
}
};
self.advance(); // consume operator
@ -753,6 +796,43 @@ impl Parser {
let addr = self.advance().unwrap().text.clone();
Ok(Expression::MacAddress(addr))
}
Some(TokenKind::Vmap) => {
self.advance(); // consume 'vmap'
// Parse the map contents
self.consume(TokenKind::LeftBrace, "Expected '{' after vmap")?;
let mut map = Vec::new();
while !self.current_token_is(&TokenKind::RightBrace) && !self.is_at_end() {
// Skip commas and newlines
if self.current_token_is(&TokenKind::Comma)
|| self.current_token_is(&TokenKind::Newline)
{
self.advance();
continue;
}
// Parse key
let key = self.parse_expression()?;
// Parse colon separator
self.consume(TokenKind::Colon, "Expected ':' in vmap key-value pair")?;
// Parse value
let value = self.parse_expression()?;
// Add the key-value pair to the map
map.push((key, value));
}
self.consume(TokenKind::RightBrace, "Expected '}' to close vmap")?;
// The expression that came before "vmap" is the expr being mapped
let expr = Box::new(Expression::Identifier("dummy".to_string())); // This will be replaced in post-processing
Ok(Expression::Vmap { expr, map })
}
Some(TokenKind::LeftBrace) => {
self.advance(); // consume '{'
let mut elements = Vec::new();

View file

@ -292,6 +292,20 @@ impl NftablesFormatter {
output.push('-');
self.format_expression(output, end);
}
Expression::Vmap { expr, map } => {
self.format_expression(output, expr);
output.push_str(" vmap { ");
for (i, (key, value)) in map.iter().enumerate() {
if i > 0 {
output.push_str(", ");
}
self.format_expression(output, key);
output.push_str(" : ");
self.format_expression(output, value);
}
output.push_str(" }");
}
}
}
}