nff: add vmap expression support and improve diagnostics

This commit is contained in:
raf 2025-06-02 08:59:45 +03:00
commit c4a71f2e85
Signed by: NotAShelf
GPG key ID: 29D95B64378DB4BF
6 changed files with 440 additions and 3 deletions

View file

@ -15,7 +15,7 @@ use thiserror::Error;
use crate::cst::CstBuilder;
use crate::diagnostic::{DiagnosticAnalyzer, DiagnosticConfig};
use crate::lexer::NftablesLexer;
use crate::lexer::{NftablesLexer, Token, TokenKind};
use crate::parser::Parser as NftablesParser;
use crate::syntax::{FormatConfig, IndentStyle, NftablesFormatter};
@ -27,6 +27,27 @@ enum FormatterError {
InvalidFile(String),
#[error("Parse error: {0}")]
ParseError(String),
#[error("Syntax error at line {line}, column {column}: {message}")]
SyntaxError {
line: usize,
column: usize,
message: String,
suggestion: Option<String>,
},
#[error("Unsupported nftables syntax at line {line}, column {column}: {feature}")]
UnsupportedSyntax {
line: usize,
column: usize,
feature: String,
suggestion: Option<String>,
},
#[error("Invalid nftables syntax at line {line}, column {column}: {message}")]
InvalidSyntax {
line: usize,
column: usize,
message: String,
suggestion: Option<String>,
},
#[error("IO error: {0}")]
Io(#[from] io::Error),
}
@ -268,7 +289,7 @@ fn process_single_file_format(
let mut parser = NftablesParser::new(tokens.clone());
parser
.parse()
.map_err(|e| FormatterError::ParseError(e.to_string()))?
.map_err(|e| analyze_parse_error(&source, &tokens, &e.to_string()))?
};
if debug {
@ -446,6 +467,277 @@ fn process_single_file_lint(
Ok(())
}
/// Intelligent error analysis to categorize parse errors and provide location information
fn analyze_parse_error(source: &str, tokens: &[Token], error: &str) -> FormatterError {
// Convert line/column position from token ranges
let lines: Vec<&str> = source.lines().collect();
// Look for common error patterns and provide specific messages
if error.contains("unexpected token") || error.contains("expected") {
// Try to find the problematic token
if let Some(error_token) = find_error_token(tokens) {
let (line, column) = position_from_range(&error_token.range, source);
// Analyze the specific token to categorize the error
match categorize_syntax_error(&error_token, source, &lines) {
ErrorCategory::UnsupportedSyntax {
feature,
suggestion,
} => FormatterError::UnsupportedSyntax {
line,
column,
feature,
suggestion,
},
ErrorCategory::InvalidSyntax {
message,
suggestion,
} => FormatterError::InvalidSyntax {
line,
column,
message,
suggestion,
},
ErrorCategory::SyntaxError {
message,
suggestion,
} => FormatterError::SyntaxError {
line,
column,
message,
suggestion,
},
}
} else {
// Fallback to generic parse error
FormatterError::ParseError(error.to_string())
}
} else {
FormatterError::ParseError(error.to_string())
}
}
#[derive(Debug)]
enum ErrorCategory {
UnsupportedSyntax {
feature: String,
suggestion: Option<String>,
},
InvalidSyntax {
message: String,
suggestion: Option<String>,
},
SyntaxError {
message: String,
suggestion: Option<String>,
},
}
/// Find the first error token in the token stream
fn find_error_token(tokens: &[Token]) -> Option<&Token> {
tokens
.iter()
.find(|token| matches!(token.kind, TokenKind::Error))
}
/// Convert TextRange to line/column position
fn position_from_range(range: &text_size::TextRange, source: &str) -> (usize, usize) {
let start_offset: usize = range.start().into();
let lines: Vec<&str> = source.lines().collect();
let mut current_offset = 0;
for (line_idx, line) in lines.iter().enumerate() {
let line_end = current_offset + line.len();
if start_offset <= line_end {
let column = start_offset - current_offset;
return (line_idx + 1, column + 1); // 1-based indexing
}
current_offset = line_end + 1; // +1 for newline
}
(1, 1) // fallback
}
/// Categorize syntax errors based on token content and context
fn categorize_syntax_error(token: &Token, source: &str, lines: &[&str]) -> ErrorCategory {
let token_text = &token.text;
let (line_num, _) = position_from_range(&token.range, source);
let line_content = lines.get(line_num.saturating_sub(1)).unwrap_or(&"");
// Check for unsupported nftables features
if is_unsupported_feature(token_text, line_content) {
let (feature, suggestion) = classify_unsupported_feature(token_text, line_content);
return ErrorCategory::UnsupportedSyntax {
feature,
suggestion,
};
}
// Check for invalid but supported syntax
if is_invalid_syntax(token_text, line_content) {
let (message, suggestion) = classify_invalid_syntax(token_text, line_content);
return ErrorCategory::InvalidSyntax {
message,
suggestion,
};
}
// Default to syntax error
ErrorCategory::SyntaxError {
message: format!("Unexpected token '{}'", token_text),
suggestion: suggest_correction(token_text, line_content),
}
}
/// Check if the token represents an unsupported nftables feature
fn is_unsupported_feature(token_text: &str, line_content: &str) -> bool {
// List of advanced nftables features that might not be fully supported yet
let unsupported_keywords = [
"quota", "limit", "counter", "meter", "socket", "fib", "rt", "ipsec", "tunnel", "comp",
"dccp", "sctp", "gre", "esp", "ah", "vlan", "arp", "rateest", "osf", "netdev", "meta",
"exthdr", "payload", "lookup", "dynset", "flow", "hash", "jhash", "symhash", "crc32",
];
unsupported_keywords
.iter()
.any(|&keyword| token_text.contains(keyword) || line_content.contains(keyword))
}
/// Check if the syntax is invalid (malformed but within supported features)
fn is_invalid_syntax(token_text: &str, line_content: &str) -> bool {
// Check for common syntax mistakes
if token_text.contains("..") || token_text.contains("::") {
return true; // Double operators usually indicate mistakes
}
// Check for malformed addresses or ranges
if token_text.contains("/") && !is_valid_cidr(token_text) {
return true;
}
// Check for malformed brackets/braces
let open_braces = line_content.matches('{').count();
let close_braces = line_content.matches('}').count();
if open_braces != close_braces {
return true;
}
false
}
/// Classify unsupported feature and provide suggestion
fn classify_unsupported_feature(token_text: &str, line_content: &str) -> (String, Option<String>) {
let feature = if token_text.contains("quota") {
(
"quota management".to_string(),
Some("Use explicit rule counting instead".to_string()),
)
} else if token_text.contains("limit") {
(
"rate limiting".to_string(),
Some("Consider using simpler rule-based rate limiting".to_string()),
)
} else if token_text.contains("counter") {
(
"packet counters".to_string(),
Some("Use rule-level statistics instead".to_string()),
)
} else if line_content.contains("meta") {
(
"meta expressions".to_string(),
Some("Use explicit protocol matching instead".to_string()),
)
} else {
(format!("advanced feature '{}'", token_text), None)
};
feature
}
/// Classify invalid syntax and provide suggestion
fn classify_invalid_syntax(token_text: &str, line_content: &str) -> (String, Option<String>) {
if token_text.contains("/") && !is_valid_cidr(token_text) {
return (
"Invalid CIDR notation".to_string(),
Some("Use format like '192.168.1.0/24' or '::1/128'".to_string()),
);
}
if token_text.contains("..") {
return (
"Invalid range operator".to_string(),
Some("Use '-' for ranges like '1000-2000'".to_string()),
);
}
if line_content.contains('{') && !line_content.contains('}') {
return (
"Unmatched opening brace".to_string(),
Some("Ensure all '{' have matching '}'".to_string()),
);
}
(
format!("Malformed token '{}'", token_text),
Some("Check nftables syntax documentation".to_string()),
)
}
/// Suggest correction for common typos
fn suggest_correction(token_text: &str, line_content: &str) -> Option<String> {
// Common typos and their corrections
let corrections = [
("tabel", "table"),
("cahin", "chain"),
("accpet", "accept"),
("rejct", "reject"),
("prtocol", "protocol"),
("addres", "address"),
("pririty", "priority"),
("poicy", "policy"),
];
for (typo, correction) in &corrections {
if token_text.contains(typo) {
return Some(format!("Did you mean '{}'?", correction));
}
}
// Context-based suggestions
if line_content.contains("type") && line_content.contains("hook") {
if !line_content.contains("filter")
&& !line_content.contains("nat")
&& !line_content.contains("route")
{
return Some("Chain type should be 'filter', 'nat', or 'route'".to_string());
}
}
None
}
/// Validate CIDR notation
fn is_valid_cidr(text: &str) -> bool {
if let Some(slash_pos) = text.find('/') {
let (addr, prefix) = text.split_at(slash_pos);
let prefix = &prefix[1..]; // Remove the '/'
// Check if prefix is a valid number
if let Ok(prefix_len) = prefix.parse::<u8>() {
// Basic validation - IPv4 should be <= 32, IPv6 <= 128
if addr.contains(':') {
prefix_len <= 128 // IPv6
} else {
prefix_len <= 32 // IPv4
}
} else {
false
}
} else {
false
}
}
fn main() -> Result<()> {
let args = Args::parse();