Compare commits

..

5 commits

5 changed files with 587 additions and 386 deletions

57
Cargo.lock generated
View file

@ -281,6 +281,7 @@ dependencies = [
"cstree",
"glob",
"logos",
"num_enum",
"regex",
"serde",
"serde_json",
@ -288,6 +289,27 @@ dependencies = [
"thiserror",
]
[[package]]
name = "num_enum"
version = "0.7.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4e613fc340b2220f734a8595782c551f1250e969d87d3be1ae0579e8d4065179"
dependencies = [
"num_enum_derive",
]
[[package]]
name = "num_enum_derive"
version = "0.7.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "af1844ef2428cc3e1cb900be36181049ef3d3193c63e43026cfe202983b27a56"
dependencies = [
"proc-macro-crate",
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "parking_lot"
version = "0.12.3"
@ -311,6 +333,15 @@ dependencies = [
"windows-targets",
]
[[package]]
name = "proc-macro-crate"
version = "3.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "edce586971a4dfaa28950c6f18ed55e0406c1ab88bbce2c6f6293a7aaba73d35"
dependencies = [
"toml_edit",
]
[[package]]
name = "proc-macro2"
version = "1.0.95"
@ -487,6 +518,23 @@ dependencies = [
"syn",
]
[[package]]
name = "toml_datetime"
version = "0.6.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3da5db5a963e24bc68be8b17b6fa82814bb22ee8660f192bb182771d498f09a3"
[[package]]
name = "toml_edit"
version = "0.22.26"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "310068873db2c5b3e7659d2cc35d21855dbafa50d1ce336397c666e3cb08137e"
dependencies = [
"indexmap",
"toml_datetime",
"winnow",
]
[[package]]
name = "triomphe"
version = "0.1.14"
@ -573,3 +621,12 @@ name = "windows_x86_64_msvc"
version = "0.52.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "32b752e52a2da0ddfbdbcc6fceadfeede4c939ed16d13e648833a61dfb611ed8"
[[package]]
name = "winnow"
version = "0.7.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c06928c8748d81b05c9be96aad92e1b6ff01833332f281e8cfca3be4b35fc9ec"
dependencies = [
"memchr",
]

View file

@ -14,5 +14,6 @@ cstree = "0.12"
text-size = "1.1"
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
regex = "1.11.1"
regex = "1.11"
glob = "0.3"
num_enum = "0.7"

View file

@ -4,11 +4,15 @@
use crate::lexer::{Token, TokenKind};
use cstree::{RawSyntaxKind, green::GreenNode, util::NodeOrToken};
use num_enum::{IntoPrimitive, TryFromPrimitive};
use std::fmt;
use thiserror::Error;
/// nftables syntax node types
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
/// Uses `TryFromPrimitive` for safe conversion from raw values with fallback to `Error`.
#[derive(
Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, TryFromPrimitive, IntoPrimitive,
)]
#[repr(u16)]
pub enum SyntaxKind {
// Root and containers
@ -161,116 +165,128 @@ pub enum SyntaxKind {
impl From<TokenKind> for SyntaxKind {
fn from(kind: TokenKind) -> Self {
use TokenKind::*;
match kind {
TokenKind::Table => SyntaxKind::TableKw,
TokenKind::Chain => SyntaxKind::ChainKw,
TokenKind::Rule => SyntaxKind::RuleKw,
TokenKind::Set => SyntaxKind::SetKw,
TokenKind::Map => SyntaxKind::MapKw,
TokenKind::Element => SyntaxKind::ElementKw,
TokenKind::Include => SyntaxKind::IncludeKw,
TokenKind::Define => SyntaxKind::DefineKw,
TokenKind::Flush => SyntaxKind::FlushKw,
TokenKind::Add => SyntaxKind::AddKw,
TokenKind::Delete => SyntaxKind::DeleteKw,
TokenKind::Insert => SyntaxKind::InsertKw,
TokenKind::Replace => SyntaxKind::ReplaceKw,
// Keywords -> Kw variants
Table => SyntaxKind::TableKw,
Chain => SyntaxKind::ChainKw,
Rule => SyntaxKind::RuleKw,
Set => SyntaxKind::SetKw,
Map => SyntaxKind::MapKw,
Element => SyntaxKind::ElementKw,
Include => SyntaxKind::IncludeKw,
Define => SyntaxKind::DefineKw,
Flush => SyntaxKind::FlushKw,
Add => SyntaxKind::AddKw,
Delete => SyntaxKind::DeleteKw,
Insert => SyntaxKind::InsertKw,
Replace => SyntaxKind::ReplaceKw,
TokenKind::Filter => SyntaxKind::FilterKw,
TokenKind::Nat => SyntaxKind::NatKw,
TokenKind::Route => SyntaxKind::RouteKw,
// Chain types and hooks
Filter => SyntaxKind::FilterKw,
Nat => SyntaxKind::NatKw,
Route => SyntaxKind::RouteKw,
Input => SyntaxKind::InputKw,
Output => SyntaxKind::OutputKw,
Forward => SyntaxKind::ForwardKw,
Prerouting => SyntaxKind::PreroutingKw,
Postrouting => SyntaxKind::PostroutingKw,
TokenKind::Input => SyntaxKind::InputKw,
TokenKind::Output => SyntaxKind::OutputKw,
TokenKind::Forward => SyntaxKind::ForwardKw,
TokenKind::Prerouting => SyntaxKind::PreroutingKw,
TokenKind::Postrouting => SyntaxKind::PostroutingKw,
// Protocols and families
Ip => SyntaxKind::IpKw,
Ip6 => SyntaxKind::Ip6Kw,
Inet => SyntaxKind::InetKw,
Arp => SyntaxKind::ArpKw,
Bridge => SyntaxKind::BridgeKw,
Netdev => SyntaxKind::NetdevKw,
Tcp => SyntaxKind::TcpKw,
Udp => SyntaxKind::UdpKw,
Icmp => SyntaxKind::IcmpKw,
Icmpv6 => SyntaxKind::Icmpv6Kw,
TokenKind::Ip => SyntaxKind::IpKw,
TokenKind::Ip6 => SyntaxKind::Ip6Kw,
TokenKind::Inet => SyntaxKind::InetKw,
TokenKind::Arp => SyntaxKind::ArpKw,
TokenKind::Bridge => SyntaxKind::BridgeKw,
TokenKind::Netdev => SyntaxKind::NetdevKw,
TokenKind::Tcp => SyntaxKind::TcpKw,
TokenKind::Udp => SyntaxKind::UdpKw,
TokenKind::Icmp => SyntaxKind::IcmpKw,
TokenKind::Icmpv6 => SyntaxKind::Icmpv6Kw,
// Match keywords
Sport => SyntaxKind::SportKw,
Dport => SyntaxKind::DportKw,
Saddr => SyntaxKind::SaddrKw,
Daddr => SyntaxKind::DaddrKw,
Protocol => SyntaxKind::ProtocolKw,
Nexthdr => SyntaxKind::NexthdrKw,
Type => SyntaxKind::TypeKw,
Hook => SyntaxKind::HookKw,
Priority => SyntaxKind::PriorityKw,
Policy => SyntaxKind::PolicyKw,
Iifname => SyntaxKind::IifnameKw,
Oifname => SyntaxKind::OifnameKw,
Ct => SyntaxKind::CtKw,
State => SyntaxKind::StateKw,
TokenKind::Sport => SyntaxKind::SportKw,
TokenKind::Dport => SyntaxKind::DportKw,
TokenKind::Saddr => SyntaxKind::SaddrKw,
TokenKind::Daddr => SyntaxKind::DaddrKw,
TokenKind::Protocol => SyntaxKind::ProtocolKw,
TokenKind::Nexthdr => SyntaxKind::NexthdrKw,
TokenKind::Type => SyntaxKind::TypeKw,
TokenKind::Hook => SyntaxKind::HookKw,
TokenKind::Priority => SyntaxKind::PriorityKw,
TokenKind::Policy => SyntaxKind::PolicyKw,
TokenKind::Iifname => SyntaxKind::IifnameKw,
TokenKind::Oifname => SyntaxKind::OifnameKw,
TokenKind::Ct => SyntaxKind::CtKw,
TokenKind::State => SyntaxKind::StateKw,
// Actions
Accept => SyntaxKind::AcceptKw,
Drop => SyntaxKind::DropKw,
Reject => SyntaxKind::RejectKw,
Return => SyntaxKind::ReturnKw,
Jump => SyntaxKind::JumpKw,
Goto => SyntaxKind::GotoKw,
Continue => SyntaxKind::ContinueKw,
Log => SyntaxKind::LogKw,
Comment => SyntaxKind::CommentKw,
TokenKind::Accept => SyntaxKind::AcceptKw,
TokenKind::Drop => SyntaxKind::DropKw,
TokenKind::Reject => SyntaxKind::RejectKw,
TokenKind::Return => SyntaxKind::ReturnKw,
TokenKind::Jump => SyntaxKind::JumpKw,
TokenKind::Goto => SyntaxKind::GotoKw,
TokenKind::Continue => SyntaxKind::ContinueKw,
TokenKind::Log => SyntaxKind::LogKw,
TokenKind::Comment => SyntaxKind::CommentKw,
// States
Established => SyntaxKind::EstablishedKw,
Related => SyntaxKind::RelatedKw,
New => SyntaxKind::NewKw,
Invalid => SyntaxKind::InvalidKw,
TokenKind::Established => SyntaxKind::EstablishedKw,
TokenKind::Related => SyntaxKind::RelatedKw,
TokenKind::New => SyntaxKind::NewKw,
TokenKind::Invalid => SyntaxKind::InvalidKw,
// Additional protocol keywords
Vmap => SyntaxKind::VmapKw,
NdRouterAdvert => SyntaxKind::NdRouterAdvertKw,
NdNeighborSolicit => SyntaxKind::NdNeighborSolicitKw,
NdNeighborAdvert => SyntaxKind::NdNeighborAdvertKw,
EchoRequest => SyntaxKind::EchoRequestKw,
DestUnreachable => SyntaxKind::DestUnreachableKw,
RouterAdvertisement => SyntaxKind::RouterAdvertisementKw,
TimeExceeded => SyntaxKind::TimeExceededKw,
ParameterProblem => SyntaxKind::ParameterProblemKw,
PacketTooBig => SyntaxKind::PacketTooBigKw,
TokenKind::Vmap => SyntaxKind::VmapKw,
TokenKind::NdRouterAdvert => SyntaxKind::NdRouterAdvertKw,
TokenKind::NdNeighborSolicit => SyntaxKind::NdNeighborSolicitKw,
TokenKind::NdNeighborAdvert => SyntaxKind::NdNeighborAdvertKw,
TokenKind::EchoRequest => SyntaxKind::EchoRequestKw,
TokenKind::DestUnreachable => SyntaxKind::DestUnreachableKw,
TokenKind::RouterAdvertisement => SyntaxKind::RouterAdvertisementKw,
TokenKind::TimeExceeded => SyntaxKind::TimeExceededKw,
TokenKind::ParameterProblem => SyntaxKind::ParameterProblemKw,
TokenKind::PacketTooBig => SyntaxKind::PacketTooBigKw,
// Operators - direct mapping
Eq => SyntaxKind::EqOp,
Ne => SyntaxKind::NeOp,
Le => SyntaxKind::LeOp,
Ge => SyntaxKind::GeOp,
Lt => SyntaxKind::LtOp,
Gt => SyntaxKind::GtOp,
TokenKind::Eq => SyntaxKind::EqOp,
TokenKind::Ne => SyntaxKind::NeOp,
TokenKind::Le => SyntaxKind::LeOp,
TokenKind::Ge => SyntaxKind::GeOp,
TokenKind::Lt => SyntaxKind::LtOp,
TokenKind::Gt => SyntaxKind::GtOp,
// Punctuation - direct mapping
LeftBrace => SyntaxKind::LeftBrace,
RightBrace => SyntaxKind::RightBrace,
LeftParen => SyntaxKind::LeftParen,
RightParen => SyntaxKind::RightParen,
LeftBracket => SyntaxKind::LeftBracket,
RightBracket => SyntaxKind::RightBracket,
Comma => SyntaxKind::Comma,
Semicolon => SyntaxKind::Semicolon,
Colon => SyntaxKind::Colon,
Assign => SyntaxKind::Assign,
Dash => SyntaxKind::Dash,
Slash => SyntaxKind::Slash,
Dot => SyntaxKind::Dot,
TokenKind::LeftBrace => SyntaxKind::LeftBrace,
TokenKind::RightBrace => SyntaxKind::RightBrace,
TokenKind::LeftParen => SyntaxKind::LeftParen,
TokenKind::RightParen => SyntaxKind::RightParen,
TokenKind::LeftBracket => SyntaxKind::LeftBracket,
TokenKind::RightBracket => SyntaxKind::RightBracket,
TokenKind::Comma => SyntaxKind::Comma,
TokenKind::Semicolon => SyntaxKind::Semicolon,
TokenKind::Colon => SyntaxKind::Colon,
TokenKind::Assign => SyntaxKind::Assign,
TokenKind::Dash => SyntaxKind::Dash,
TokenKind::Slash => SyntaxKind::Slash,
TokenKind::Dot => SyntaxKind::Dot,
// Literals - map data-carrying variants to their types
StringLiteral(_) => SyntaxKind::StringLiteral,
NumberLiteral(_) => SyntaxKind::NumberLiteral,
IpAddress(_) => SyntaxKind::IpAddress,
Ipv6Address(_) => SyntaxKind::Ipv6Address,
MacAddress(_) => SyntaxKind::MacAddress,
Identifier(_) => SyntaxKind::Identifier,
TokenKind::StringLiteral(_) => SyntaxKind::StringLiteral,
TokenKind::NumberLiteral(_) => SyntaxKind::NumberLiteral,
TokenKind::IpAddress(_) => SyntaxKind::IpAddress,
TokenKind::Ipv6Address(_) => SyntaxKind::Ipv6Address,
TokenKind::MacAddress(_) => SyntaxKind::MacAddress,
TokenKind::Identifier(_) => SyntaxKind::Identifier,
// Special tokens
Newline => SyntaxKind::Newline,
CommentLine(_) => SyntaxKind::Comment,
Shebang(_) => SyntaxKind::Shebang,
TokenKind::Newline => SyntaxKind::Newline,
TokenKind::CommentLine(_) => SyntaxKind::Comment,
TokenKind::Shebang(_) => SyntaxKind::Shebang,
TokenKind::Error => SyntaxKind::Error,
// Error fallback
Error => SyntaxKind::Error,
}
}
}
@ -324,13 +340,7 @@ impl SyntaxKind {
}
pub fn from_raw(raw: RawSyntaxKind) -> Self {
match raw.0 {
0 => SyntaxKind::Root,
1 => SyntaxKind::Table,
// ... other variants ...
116 => SyntaxKind::PacketTooBigKw,
_ => SyntaxKind::Error, // Fallback to Error for invalid values
}
Self::try_from(raw.0 as u16).unwrap_or(SyntaxKind::Error)
}
}
@ -1237,7 +1247,7 @@ mod tests {
let mut lexer = NftablesLexer::new(source);
let tokens = lexer.tokenize().expect("Tokenization should succeed");
// CST is now implemented - test that it works
// Test CST construction with basic table syntax
let green_tree = CstBuilder::build_tree(&tokens);
// Verify the tree was created successfully
@ -1254,4 +1264,88 @@ mod tests {
let cst_result = CstBuilder::parse_to_cst(&tokens);
assert!(cst_result.is_ok());
}
#[test]
fn test_num_enum_improvements() {
// Test that from_raw uses num_enum for conversion
// Invalid values fall back to Error variant
// Test valid conversions
assert_eq!(SyntaxKind::from_raw(RawSyntaxKind(0)), SyntaxKind::Root);
assert_eq!(SyntaxKind::from_raw(RawSyntaxKind(1)), SyntaxKind::Table);
assert_eq!(SyntaxKind::from_raw(RawSyntaxKind(25)), SyntaxKind::TableKw);
assert_eq!(SyntaxKind::from_raw(RawSyntaxKind(106)), SyntaxKind::Error);
assert_eq!(
SyntaxKind::from_raw(RawSyntaxKind(116)),
SyntaxKind::PacketTooBigKw
);
// Test invalid values automatically fall back to Error
assert_eq!(SyntaxKind::from_raw(RawSyntaxKind(999)), SyntaxKind::Error);
assert_eq!(SyntaxKind::from_raw(RawSyntaxKind(1000)), SyntaxKind::Error);
// Test bidirectional conversion
for variant in [
SyntaxKind::Root,
SyntaxKind::Table,
SyntaxKind::TableKw,
SyntaxKind::Error,
SyntaxKind::PacketTooBigKw,
] {
let raw = variant.to_raw();
let converted_back = SyntaxKind::from_raw(raw);
assert_eq!(variant, converted_back);
}
}
#[test]
fn test_token_kind_conversion_improvements() {
// Test that From<TokenKind> conversion is complete and correct
use crate::lexer::TokenKind;
// Test keyword mappings
assert_eq!(SyntaxKind::from(TokenKind::Table), SyntaxKind::TableKw);
assert_eq!(SyntaxKind::from(TokenKind::Chain), SyntaxKind::ChainKw);
assert_eq!(SyntaxKind::from(TokenKind::Accept), SyntaxKind::AcceptKw);
// Test operators
assert_eq!(SyntaxKind::from(TokenKind::Eq), SyntaxKind::EqOp);
assert_eq!(SyntaxKind::from(TokenKind::Lt), SyntaxKind::LtOp);
// Test punctuation
assert_eq!(
SyntaxKind::from(TokenKind::LeftBrace),
SyntaxKind::LeftBrace
);
assert_eq!(
SyntaxKind::from(TokenKind::Semicolon),
SyntaxKind::Semicolon
);
// Test literals (with data)
assert_eq!(
SyntaxKind::from(TokenKind::StringLiteral("test".to_string())),
SyntaxKind::StringLiteral
);
assert_eq!(
SyntaxKind::from(TokenKind::NumberLiteral(42)),
SyntaxKind::NumberLiteral
);
assert_eq!(
SyntaxKind::from(TokenKind::IpAddress("192.168.1.1".to_string())),
SyntaxKind::IpAddress
);
assert_eq!(
SyntaxKind::from(TokenKind::Identifier("test".to_string())),
SyntaxKind::Identifier
);
// Test special tokens
assert_eq!(SyntaxKind::from(TokenKind::Newline), SyntaxKind::Newline);
assert_eq!(
SyntaxKind::from(TokenKind::CommentLine("# comment".to_string())),
SyntaxKind::Comment
);
assert_eq!(SyntaxKind::from(TokenKind::Error), SyntaxKind::Error);
}
}

View file

@ -607,6 +607,11 @@ impl AnalyzerModule for StyleAnalyzer {
fn analyze(&self, source: &str, config: &DiagnosticConfig) -> Vec<Diagnostic> {
let mut diagnostics = Vec::new();
// Only perform style analysis if enabled
if !config.enable_style_warnings {
return diagnostics;
}
// Check for missing shebang
if !source.starts_with("#!") {
let range = Range::new(Position::new(0, 0), Position::new(0, 0));
@ -802,14 +807,29 @@ impl StyleAnalyzer {
pub struct SemanticAnalyzer;
impl AnalyzerModule for SemanticAnalyzer {
fn analyze(&self, source: &str, _config: &DiagnosticConfig) -> Vec<Diagnostic> {
fn analyze(&self, source: &str, config: &DiagnosticConfig) -> Vec<Diagnostic> {
let mut diagnostics = Vec::new();
// Parse and validate nftables-specific constructs
// Always run semantic validation (syntax/semantic errors)
diagnostics.extend(self.validate_table_declarations(source));
diagnostics.extend(self.validate_chain_declarations(source));
diagnostics.extend(self.validate_chain_declarations_semantic(source));
diagnostics.extend(self.validate_cidr_notation(source));
diagnostics.extend(self.check_for_redundant_rules(source));
// Best practices checks (only if enabled)
if config.enable_best_practices {
diagnostics.extend(self.validate_chain_best_practices(source));
diagnostics.extend(self.check_for_redundant_rules(source));
}
// Performance hints (only if enabled)
if config.enable_performance_hints {
diagnostics.extend(self.check_performance_hints(source));
}
// Security warnings (only if enabled)
if config.enable_security_warnings {
diagnostics.extend(self.check_security_warnings(source));
}
diagnostics
}
@ -880,7 +900,7 @@ impl SemanticAnalyzer {
diagnostics
}
fn validate_chain_declarations(&self, source: &str) -> Vec<Diagnostic> {
fn validate_chain_declarations_semantic(&self, source: &str) -> Vec<Diagnostic> {
let mut diagnostics = Vec::new();
for (line_idx, line) in source.lines().enumerate() {
@ -888,7 +908,7 @@ impl SemanticAnalyzer {
let trimmed = line.trim();
if trimmed.starts_with("type ") && trimmed.contains("hook") {
// Validate chain type and hook
// Validate chain type and hook (semantic validation)
if let Some(hook_pos) = trimmed.find("hook") {
let hook_part = &trimmed[hook_pos..];
let hook_words: Vec<&str> = hook_part.split_whitespace().collect();
@ -916,22 +936,109 @@ impl SemanticAnalyzer {
}
}
}
}
}
// Check for missing policy in filter chains
if trimmed.contains("type filter") && !trimmed.contains("policy") {
diagnostics
}
fn validate_chain_best_practices(&self, source: &str) -> Vec<Diagnostic> {
let mut diagnostics = Vec::new();
for (line_idx, line) in source.lines().enumerate() {
let line_num = line_idx as u32;
let trimmed = line.trim();
// Check for missing policy in filter chains (best practice)
if trimmed.contains("type filter") && !trimmed.contains("policy") {
let range = Range::new(
Position::new(line_num, 0),
Position::new(line_num, line.len() as u32),
);
let diagnostic = Diagnostic::new(
range,
DiagnosticSeverity::Warning,
DiagnosticCode::ChainWithoutPolicy,
"Filter chain should have an explicit policy".to_string(),
);
diagnostics.push(diagnostic);
}
}
diagnostics
}
fn check_performance_hints(&self, source: &str) -> Vec<Diagnostic> {
let mut diagnostics = Vec::new();
for (line_idx, line) in source.lines().enumerate() {
let line_num = line_idx as u32;
let trimmed = line.trim();
// Check for large sets without timeout (performance hint)
if trimmed.contains("set ") && trimmed.contains("{") && !trimmed.contains("timeout") {
// Simple heuristic: if set definition is long, suggest timeout
if trimmed.len() > 100 {
let range = Range::new(
Position::new(line_num, 0),
Position::new(line_num, line.len() as u32),
);
let diagnostic = Diagnostic::new(
range,
DiagnosticSeverity::Warning,
DiagnosticCode::ChainWithoutPolicy,
"Filter chain should have an explicit policy".to_string(),
DiagnosticSeverity::Hint,
DiagnosticCode::LargeSetWithoutTimeout,
"Consider adding a timeout to large sets for better performance"
.to_string(),
);
diagnostics.push(diagnostic);
}
}
// Check for missing counters (performance hint)
if (trimmed.contains(" accept") || trimmed.contains(" drop"))
&& !trimmed.contains("counter")
{
let range = Range::new(
Position::new(line_num, 0),
Position::new(line_num, line.len() as u32),
);
let diagnostic = Diagnostic::new(
range,
DiagnosticSeverity::Hint,
DiagnosticCode::MissingCounters,
"Consider adding counters to rules for monitoring and debugging".to_string(),
);
diagnostics.push(diagnostic);
}
}
diagnostics
}
fn check_security_warnings(&self, source: &str) -> Vec<Diagnostic> {
let mut diagnostics = Vec::new();
for (line_idx, line) in source.lines().enumerate() {
let line_num = line_idx as u32;
let trimmed = line.trim();
// Check for overly permissive rules (security warning)
if trimmed.contains(" accept")
&& (trimmed.contains("0.0.0.0/0") || trimmed.contains("::/0"))
{
let range = Range::new(
Position::new(line_num, 0),
Position::new(line_num, line.len() as u32),
);
let diagnostic = Diagnostic::new(
range,
DiagnosticSeverity::Warning,
DiagnosticCode::SecurityRisk,
"Rule accepts traffic from anywhere - consider restricting source addresses"
.to_string(),
);
diagnostics.push(diagnostic);
}
}
diagnostics

View file

@ -34,24 +34,18 @@ enum FormatterError {
message: String,
suggestion: Option<String>,
},
#[error("Unsupported nftables syntax at line {line}, column {column}: {feature}")]
UnsupportedSyntax {
line: usize,
column: usize,
feature: String,
suggestion: Option<String>,
},
#[error("Invalid nftables syntax at line {line}, column {column}: {message}")]
InvalidSyntax {
line: usize,
column: usize,
message: String,
suggestion: Option<String>,
},
#[error("IO error: {0}")]
Io(#[from] io::Error),
}
#[derive(Error, Debug)]
enum LintError {
#[error("Lint errors found in {file_count} file(s)")]
DiagnosticErrors { file_count: usize },
#[error("File discovery error: {0}")]
FileDiscovery(#[from] anyhow::Error),
}
#[derive(Parser, Debug, Clone)]
#[command(
name = "nff",
@ -107,19 +101,19 @@ enum Commands {
json: bool,
/// Include style warnings in diagnostics
#[arg(long, default_value = "true")]
#[arg(long, action = clap::ArgAction::Set, default_value = "true")]
style_warnings: bool,
/// Include best practice recommendations in diagnostics
#[arg(long, default_value = "true")]
#[arg(long, action = clap::ArgAction::Set, default_value = "true")]
best_practices: bool,
/// Include performance hints in diagnostics
#[arg(long, default_value = "true")]
#[arg(long, action = clap::ArgAction::Set, default_value = "true")]
performance_hints: bool,
/// Include security warnings in diagnostics
#[arg(long, default_value = "true")]
#[arg(long, action = clap::ArgAction::Set, default_value = "true")]
security_warnings: bool,
/// Diagnostic modules to run (comma-separated: lexical,syntax,style,semantic)
@ -289,7 +283,7 @@ fn process_single_file_format(
let mut parser = NftablesParser::new(tokens.clone());
parser
.parse()
.map_err(|e| analyze_parse_error(&source, &tokens, &e.to_string()))?
.map_err(|e| convert_parse_error_to_formatter_error(&e, &source, &tokens))?
};
if debug {
@ -351,7 +345,7 @@ fn process_lint_command(
};
let is_multiple_files = files.len() > 1;
let mut has_errors = false;
let mut error_file_count = 0;
for file_path in files {
if let Err(e) = process_single_file_lint(
@ -366,16 +360,18 @@ fn process_lint_command(
is_multiple_files,
) {
eprintln!("Error processing {}: {}", file_path, e);
has_errors = true;
error_file_count += 1;
if !is_multiple_files {
return Err(e);
}
}
}
// Exit with non-zero code if any file had errors
if has_errors {
std::process::exit(1);
if error_file_count > 0 {
return Err(LintError::DiagnosticErrors {
file_count: error_file_count,
}
.into());
}
Ok(())
@ -475,77 +471,203 @@ fn process_single_file_lint(
Ok(())
}
/// Intelligent error analysis to categorize parse errors and provide location information
fn analyze_parse_error(source: &str, tokens: &[Token], error: &str) -> FormatterError {
// Convert line/column position from token ranges
let lines: Vec<&str> = source.lines().collect();
/// Convert parser errors to formatter errors with proper location information
fn convert_parse_error_to_formatter_error(
error: &crate::parser::ParseError,
source: &str,
tokens: &[Token],
) -> FormatterError {
use crate::parser::ParseError;
// Look for common error patterns and provide specific messages
if error.contains("unexpected token") || error.contains("expected") {
// Try to find the problematic token
if let Some(error_token) = find_error_token(tokens) {
let (line, column) = position_from_range(&error_token.range, source);
// Analyze the specific token to categorize the error
match categorize_syntax_error(&error_token, source, &lines) {
ErrorCategory::UnsupportedSyntax {
feature,
suggestion,
} => FormatterError::UnsupportedSyntax {
line,
column,
feature,
suggestion,
},
ErrorCategory::InvalidSyntax {
message,
suggestion,
} => FormatterError::InvalidSyntax {
line,
column,
message,
suggestion,
},
ErrorCategory::SyntaxError {
message,
suggestion,
} => FormatterError::SyntaxError {
line,
column,
message,
suggestion,
},
match error {
ParseError::UnexpectedToken {
line,
column,
expected,
found,
} => FormatterError::SyntaxError {
line: *line,
column: *column,
message: format!("Expected {}, found '{}'", expected, found),
suggestion: None,
},
ParseError::MissingToken { expected } => {
// Try to find current position from last token
let (line, column) = if let Some(last_token) = tokens.last() {
position_from_range(&last_token.range, source)
} else {
(1, 1)
};
FormatterError::SyntaxError {
line,
column,
message: format!("Missing token: expected {}", expected),
suggestion: None,
}
}
ParseError::InvalidExpression { message } => {
// Try to find the current token position
let (line, column) = find_current_parse_position(tokens, source);
FormatterError::SyntaxError {
line,
column,
message: format!("Invalid expression: {}", message),
suggestion: None,
}
}
ParseError::InvalidStatement { message } => {
let (line, column) = find_current_parse_position(tokens, source);
FormatterError::SyntaxError {
line,
column,
message: format!("Invalid statement: {}", message),
suggestion: None,
}
}
ParseError::SemanticError { message } => {
let (line, column) = find_current_parse_position(tokens, source);
FormatterError::SyntaxError {
line,
column,
message: format!("Semantic error: {}", message),
suggestion: None,
}
}
ParseError::LexError(lex_error) => {
// Convert lexical errors to formatter errors with location
convert_lex_error_to_formatter_error(lex_error, source)
}
ParseError::AnyhowError(anyhow_error) => {
// For anyhow errors, try to extract location from error message and context
let error_msg = anyhow_error.to_string();
let (line, column) = find_error_location_from_context(&error_msg, tokens, source);
let suggestion = generate_suggestion_for_error(&error_msg);
FormatterError::SyntaxError {
line,
column,
message: error_msg,
suggestion,
}
} else {
// Fallback to generic parse error
FormatterError::ParseError(error.to_string())
}
} else {
FormatterError::ParseError(error.to_string())
}
}
#[derive(Debug)]
enum ErrorCategory {
UnsupportedSyntax {
feature: String,
suggestion: Option<String>,
},
InvalidSyntax {
message: String,
suggestion: Option<String>,
},
SyntaxError {
message: String,
suggestion: Option<String>,
},
/// Find the current parsing position from tokens
fn find_current_parse_position(tokens: &[Token], source: &str) -> (usize, usize) {
// Look for the last non-whitespace, non-comment token
for token in tokens.iter().rev() {
match token.kind {
TokenKind::Newline | TokenKind::CommentLine(_) => continue,
_ => return position_from_range(&token.range, source),
}
}
(1, 1) // fallback
}
/// Find the first error token in the token stream
fn find_error_token(tokens: &[Token]) -> Option<&Token> {
tokens
.iter()
.find(|token| matches!(token.kind, TokenKind::Error))
/// Convert lexical errors to formatter errors
fn convert_lex_error_to_formatter_error(
lex_error: &crate::lexer::LexError,
source: &str,
) -> FormatterError {
use crate::lexer::LexError;
match lex_error {
LexError::InvalidToken { position, text } => {
let (line, column) = offset_to_line_column(*position, source);
FormatterError::SyntaxError {
line,
column,
message: format!("Invalid token: '{}'", text),
suggestion: None,
}
}
LexError::UnterminatedString { position } => {
let (line, column) = offset_to_line_column(*position, source);
FormatterError::SyntaxError {
line,
column,
message: "Unterminated string literal".to_string(),
suggestion: Some("Add closing quote".to_string()),
}
}
LexError::InvalidNumber { position, text } => {
let (line, column) = offset_to_line_column(*position, source);
FormatterError::SyntaxError {
line,
column,
message: format!("Invalid number: '{}'", text),
suggestion: Some("Check number format".to_string()),
}
}
}
}
/// Convert byte offset to line/column position
fn offset_to_line_column(offset: usize, source: &str) -> (usize, usize) {
let mut line = 1;
let mut column = 1;
for (i, ch) in source.char_indices() {
if i >= offset {
break;
}
if ch == '\n' {
line += 1;
column = 1;
} else {
column += 1;
}
}
(line, column)
}
/// Find error location from context clues in the error message
fn find_error_location_from_context(
error_msg: &str,
tokens: &[Token],
source: &str,
) -> (usize, usize) {
// Look for context clues in the error message
if error_msg.contains("Expected string or identifier, got:") {
// Find the problematic token mentioned in the error
if let Some(bad_token_text) = extract_token_from_error_message(error_msg) {
// Find this token in the token stream
for token in tokens {
if token.text == bad_token_text {
return position_from_range(&token.range, source);
}
}
}
}
// Fallback to finding last meaningful token
find_current_parse_position(tokens, source)
}
/// Extract the problematic token from error message
fn extract_token_from_error_message(error_msg: &str) -> Option<String> {
// Parse messages like "Expected string or identifier, got: {"
if let Some(got_part) = error_msg.split("got: ").nth(1) {
Some(got_part.trim().to_string())
} else {
None
}
}
/// Generate helpful suggestions based on error message
fn generate_suggestion_for_error(error_msg: &str) -> Option<String> {
if error_msg.contains("Expected string or identifier") {
Some(
"Check if you're missing quotes around a string value or have an unexpected character"
.to_string(),
)
} else if error_msg.contains("Expected") && error_msg.contains("got:") {
Some("Check syntax and ensure proper nftables structure".to_string())
} else {
None
}
}
/// Convert TextRange to line/column position
@ -566,186 +688,6 @@ fn position_from_range(range: &text_size::TextRange, source: &str) -> (usize, us
(1, 1) // fallback
}
/// Categorize syntax errors based on token content and context
fn categorize_syntax_error(token: &Token, source: &str, lines: &[&str]) -> ErrorCategory {
let token_text = &token.text;
let (line_num, _) = position_from_range(&token.range, source);
let line_content = lines.get(line_num.saturating_sub(1)).unwrap_or(&"");
// Check for unsupported nftables features
if is_unsupported_feature(token_text, line_content) {
let (feature, suggestion) = classify_unsupported_feature(token_text, line_content);
return ErrorCategory::UnsupportedSyntax {
feature,
suggestion,
};
}
// Check for invalid but supported syntax
if is_invalid_syntax(token_text, line_content) {
let (message, suggestion) = classify_invalid_syntax(token_text, line_content);
return ErrorCategory::InvalidSyntax {
message,
suggestion,
};
}
// Default to syntax error
ErrorCategory::SyntaxError {
message: format!("Unexpected token '{}'", token_text),
suggestion: suggest_correction(token_text, line_content),
}
}
/// Check if the token represents an unsupported nftables feature
fn is_unsupported_feature(token_text: &str, line_content: &str) -> bool {
// List of advanced nftables features that might not be fully supported yet
let unsupported_keywords = [
"quota", "limit", "counter", "meter", "socket", "fib", "rt", "ipsec", "tunnel", "comp",
"dccp", "sctp", "gre", "esp", "ah", "vlan", "arp", "rateest", "osf", "netdev", "meta",
"exthdr", "payload", "lookup", "dynset", "flow", "hash", "jhash", "symhash", "crc32",
];
unsupported_keywords
.iter()
.any(|&keyword| token_text.contains(keyword) || line_content.contains(keyword))
}
/// Check if the syntax is invalid (malformed but within supported features)
fn is_invalid_syntax(token_text: &str, line_content: &str) -> bool {
// Check for common syntax mistakes
if token_text.contains("..") || token_text.contains("::") {
return true; // Double operators usually indicate mistakes
}
// Check for malformed addresses or ranges
if token_text.contains("/") && !is_valid_cidr(token_text) {
return true;
}
// Check for malformed brackets/braces
let open_braces = line_content.matches('{').count();
let close_braces = line_content.matches('}').count();
if open_braces != close_braces {
return true;
}
false
}
/// Classify unsupported feature and provide suggestion
fn classify_unsupported_feature(token_text: &str, line_content: &str) -> (String, Option<String>) {
let feature = if token_text.contains("quota") {
(
"quota management".to_string(),
Some("Use explicit rule counting instead".to_string()),
)
} else if token_text.contains("limit") {
(
"rate limiting".to_string(),
Some("Consider using simpler rule-based rate limiting".to_string()),
)
} else if token_text.contains("counter") {
(
"packet counters".to_string(),
Some("Use rule-level statistics instead".to_string()),
)
} else if line_content.contains("meta") {
(
"meta expressions".to_string(),
Some("Use explicit protocol matching instead".to_string()),
)
} else {
(format!("advanced feature '{}'", token_text), None)
};
feature
}
/// Classify invalid syntax and provide suggestion
fn classify_invalid_syntax(token_text: &str, line_content: &str) -> (String, Option<String>) {
if token_text.contains("/") && !is_valid_cidr(token_text) {
return (
"Invalid CIDR notation".to_string(),
Some("Use format like '192.168.1.0/24' or '::1/128'".to_string()),
);
}
if token_text.contains("..") {
return (
"Invalid range operator".to_string(),
Some("Use '-' for ranges like '1000-2000'".to_string()),
);
}
if line_content.contains('{') && !line_content.contains('}') {
return (
"Unmatched opening brace".to_string(),
Some("Ensure all '{' have matching '}'".to_string()),
);
}
(
format!("Malformed token '{}'", token_text),
Some("Check nftables syntax documentation".to_string()),
)
}
/// Suggest correction for common typos
fn suggest_correction(token_text: &str, line_content: &str) -> Option<String> {
// Common typos and their corrections
let corrections = [
("tabel", "table"),
("cahin", "chain"),
("accpet", "accept"),
("rejct", "reject"),
("prtocol", "protocol"),
("addres", "address"),
("pririty", "priority"),
("poicy", "policy"),
];
for (typo, correction) in &corrections {
if token_text.contains(typo) {
return Some(format!("Did you mean '{}'?", correction));
}
}
// Context-based suggestions
if line_content.contains("type") && line_content.contains("hook") {
if !line_content.contains("filter")
&& !line_content.contains("nat")
&& !line_content.contains("route")
{
return Some("Chain type should be 'filter', 'nat', or 'route'".to_string());
}
}
None
}
/// Validate CIDR notation
fn is_valid_cidr(text: &str) -> bool {
if let Some(slash_pos) = text.find('/') {
let (addr, prefix) = text.split_at(slash_pos);
let prefix = &prefix[1..]; // Remove the '/'
// Check if prefix is a valid number
if let Ok(prefix_len) = prefix.parse::<u8>() {
// Basic validation - IPv4 should be <= 32, IPv6 <= 128
if addr.contains(':') {
prefix_len <= 128 // IPv6
} else {
prefix_len <= 32 // IPv4
}
} else {
false
}
} else {
false
}
}
fn main() -> Result<()> {
let args = Args::parse();