Compare commits

..

No commits in common. "b9d8cb6d5d5469db5a221b98e713232010e87cc4" and "92735f3eeef9061b03504598f23d6c1347a8a4f0" have entirely different histories.

5 changed files with 385 additions and 586 deletions

57
Cargo.lock generated
View file

@ -281,7 +281,6 @@ dependencies = [
"cstree", "cstree",
"glob", "glob",
"logos", "logos",
"num_enum",
"regex", "regex",
"serde", "serde",
"serde_json", "serde_json",
@ -289,27 +288,6 @@ dependencies = [
"thiserror", "thiserror",
] ]
[[package]]
name = "num_enum"
version = "0.7.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4e613fc340b2220f734a8595782c551f1250e969d87d3be1ae0579e8d4065179"
dependencies = [
"num_enum_derive",
]
[[package]]
name = "num_enum_derive"
version = "0.7.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "af1844ef2428cc3e1cb900be36181049ef3d3193c63e43026cfe202983b27a56"
dependencies = [
"proc-macro-crate",
"proc-macro2",
"quote",
"syn",
]
[[package]] [[package]]
name = "parking_lot" name = "parking_lot"
version = "0.12.3" version = "0.12.3"
@ -333,15 +311,6 @@ dependencies = [
"windows-targets", "windows-targets",
] ]
[[package]]
name = "proc-macro-crate"
version = "3.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "edce586971a4dfaa28950c6f18ed55e0406c1ab88bbce2c6f6293a7aaba73d35"
dependencies = [
"toml_edit",
]
[[package]] [[package]]
name = "proc-macro2" name = "proc-macro2"
version = "1.0.95" version = "1.0.95"
@ -518,23 +487,6 @@ dependencies = [
"syn", "syn",
] ]
[[package]]
name = "toml_datetime"
version = "0.6.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3da5db5a963e24bc68be8b17b6fa82814bb22ee8660f192bb182771d498f09a3"
[[package]]
name = "toml_edit"
version = "0.22.26"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "310068873db2c5b3e7659d2cc35d21855dbafa50d1ce336397c666e3cb08137e"
dependencies = [
"indexmap",
"toml_datetime",
"winnow",
]
[[package]] [[package]]
name = "triomphe" name = "triomphe"
version = "0.1.14" version = "0.1.14"
@ -621,12 +573,3 @@ name = "windows_x86_64_msvc"
version = "0.52.4" version = "0.52.4"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "32b752e52a2da0ddfbdbcc6fceadfeede4c939ed16d13e648833a61dfb611ed8" checksum = "32b752e52a2da0ddfbdbcc6fceadfeede4c939ed16d13e648833a61dfb611ed8"
[[package]]
name = "winnow"
version = "0.7.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c06928c8748d81b05c9be96aad92e1b6ff01833332f281e8cfca3be4b35fc9ec"
dependencies = [
"memchr",
]

View file

@ -14,6 +14,5 @@ cstree = "0.12"
text-size = "1.1" text-size = "1.1"
serde = { version = "1.0", features = ["derive"] } serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0" serde_json = "1.0"
regex = "1.11" regex = "1.11.1"
glob = "0.3" glob = "0.3"
num_enum = "0.7"

View file

@ -4,15 +4,11 @@
use crate::lexer::{Token, TokenKind}; use crate::lexer::{Token, TokenKind};
use cstree::{RawSyntaxKind, green::GreenNode, util::NodeOrToken}; use cstree::{RawSyntaxKind, green::GreenNode, util::NodeOrToken};
use num_enum::{IntoPrimitive, TryFromPrimitive};
use std::fmt; use std::fmt;
use thiserror::Error; use thiserror::Error;
/// nftables syntax node types /// nftables syntax node types
/// Uses `TryFromPrimitive` for safe conversion from raw values with fallback to `Error`. #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
#[derive(
Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, TryFromPrimitive, IntoPrimitive,
)]
#[repr(u16)] #[repr(u16)]
pub enum SyntaxKind { pub enum SyntaxKind {
// Root and containers // Root and containers
@ -165,128 +161,116 @@ pub enum SyntaxKind {
impl From<TokenKind> for SyntaxKind { impl From<TokenKind> for SyntaxKind {
fn from(kind: TokenKind) -> Self { fn from(kind: TokenKind) -> Self {
use TokenKind::*;
match kind { match kind {
// Keywords -> Kw variants TokenKind::Table => SyntaxKind::TableKw,
Table => SyntaxKind::TableKw, TokenKind::Chain => SyntaxKind::ChainKw,
Chain => SyntaxKind::ChainKw, TokenKind::Rule => SyntaxKind::RuleKw,
Rule => SyntaxKind::RuleKw, TokenKind::Set => SyntaxKind::SetKw,
Set => SyntaxKind::SetKw, TokenKind::Map => SyntaxKind::MapKw,
Map => SyntaxKind::MapKw, TokenKind::Element => SyntaxKind::ElementKw,
Element => SyntaxKind::ElementKw, TokenKind::Include => SyntaxKind::IncludeKw,
Include => SyntaxKind::IncludeKw, TokenKind::Define => SyntaxKind::DefineKw,
Define => SyntaxKind::DefineKw, TokenKind::Flush => SyntaxKind::FlushKw,
Flush => SyntaxKind::FlushKw, TokenKind::Add => SyntaxKind::AddKw,
Add => SyntaxKind::AddKw, TokenKind::Delete => SyntaxKind::DeleteKw,
Delete => SyntaxKind::DeleteKw, TokenKind::Insert => SyntaxKind::InsertKw,
Insert => SyntaxKind::InsertKw, TokenKind::Replace => SyntaxKind::ReplaceKw,
Replace => SyntaxKind::ReplaceKw,
// Chain types and hooks TokenKind::Filter => SyntaxKind::FilterKw,
Filter => SyntaxKind::FilterKw, TokenKind::Nat => SyntaxKind::NatKw,
Nat => SyntaxKind::NatKw, TokenKind::Route => SyntaxKind::RouteKw,
Route => SyntaxKind::RouteKw,
Input => SyntaxKind::InputKw,
Output => SyntaxKind::OutputKw,
Forward => SyntaxKind::ForwardKw,
Prerouting => SyntaxKind::PreroutingKw,
Postrouting => SyntaxKind::PostroutingKw,
// Protocols and families TokenKind::Input => SyntaxKind::InputKw,
Ip => SyntaxKind::IpKw, TokenKind::Output => SyntaxKind::OutputKw,
Ip6 => SyntaxKind::Ip6Kw, TokenKind::Forward => SyntaxKind::ForwardKw,
Inet => SyntaxKind::InetKw, TokenKind::Prerouting => SyntaxKind::PreroutingKw,
Arp => SyntaxKind::ArpKw, TokenKind::Postrouting => SyntaxKind::PostroutingKw,
Bridge => SyntaxKind::BridgeKw,
Netdev => SyntaxKind::NetdevKw,
Tcp => SyntaxKind::TcpKw,
Udp => SyntaxKind::UdpKw,
Icmp => SyntaxKind::IcmpKw,
Icmpv6 => SyntaxKind::Icmpv6Kw,
// Match keywords TokenKind::Ip => SyntaxKind::IpKw,
Sport => SyntaxKind::SportKw, TokenKind::Ip6 => SyntaxKind::Ip6Kw,
Dport => SyntaxKind::DportKw, TokenKind::Inet => SyntaxKind::InetKw,
Saddr => SyntaxKind::SaddrKw, TokenKind::Arp => SyntaxKind::ArpKw,
Daddr => SyntaxKind::DaddrKw, TokenKind::Bridge => SyntaxKind::BridgeKw,
Protocol => SyntaxKind::ProtocolKw, TokenKind::Netdev => SyntaxKind::NetdevKw,
Nexthdr => SyntaxKind::NexthdrKw, TokenKind::Tcp => SyntaxKind::TcpKw,
Type => SyntaxKind::TypeKw, TokenKind::Udp => SyntaxKind::UdpKw,
Hook => SyntaxKind::HookKw, TokenKind::Icmp => SyntaxKind::IcmpKw,
Priority => SyntaxKind::PriorityKw, TokenKind::Icmpv6 => SyntaxKind::Icmpv6Kw,
Policy => SyntaxKind::PolicyKw,
Iifname => SyntaxKind::IifnameKw,
Oifname => SyntaxKind::OifnameKw,
Ct => SyntaxKind::CtKw,
State => SyntaxKind::StateKw,
// Actions TokenKind::Sport => SyntaxKind::SportKw,
Accept => SyntaxKind::AcceptKw, TokenKind::Dport => SyntaxKind::DportKw,
Drop => SyntaxKind::DropKw, TokenKind::Saddr => SyntaxKind::SaddrKw,
Reject => SyntaxKind::RejectKw, TokenKind::Daddr => SyntaxKind::DaddrKw,
Return => SyntaxKind::ReturnKw, TokenKind::Protocol => SyntaxKind::ProtocolKw,
Jump => SyntaxKind::JumpKw, TokenKind::Nexthdr => SyntaxKind::NexthdrKw,
Goto => SyntaxKind::GotoKw, TokenKind::Type => SyntaxKind::TypeKw,
Continue => SyntaxKind::ContinueKw, TokenKind::Hook => SyntaxKind::HookKw,
Log => SyntaxKind::LogKw, TokenKind::Priority => SyntaxKind::PriorityKw,
Comment => SyntaxKind::CommentKw, TokenKind::Policy => SyntaxKind::PolicyKw,
TokenKind::Iifname => SyntaxKind::IifnameKw,
TokenKind::Oifname => SyntaxKind::OifnameKw,
TokenKind::Ct => SyntaxKind::CtKw,
TokenKind::State => SyntaxKind::StateKw,
// States TokenKind::Accept => SyntaxKind::AcceptKw,
Established => SyntaxKind::EstablishedKw, TokenKind::Drop => SyntaxKind::DropKw,
Related => SyntaxKind::RelatedKw, TokenKind::Reject => SyntaxKind::RejectKw,
New => SyntaxKind::NewKw, TokenKind::Return => SyntaxKind::ReturnKw,
Invalid => SyntaxKind::InvalidKw, TokenKind::Jump => SyntaxKind::JumpKw,
TokenKind::Goto => SyntaxKind::GotoKw,
TokenKind::Continue => SyntaxKind::ContinueKw,
TokenKind::Log => SyntaxKind::LogKw,
TokenKind::Comment => SyntaxKind::CommentKw,
// Additional protocol keywords TokenKind::Established => SyntaxKind::EstablishedKw,
Vmap => SyntaxKind::VmapKw, TokenKind::Related => SyntaxKind::RelatedKw,
NdRouterAdvert => SyntaxKind::NdRouterAdvertKw, TokenKind::New => SyntaxKind::NewKw,
NdNeighborSolicit => SyntaxKind::NdNeighborSolicitKw, TokenKind::Invalid => SyntaxKind::InvalidKw,
NdNeighborAdvert => SyntaxKind::NdNeighborAdvertKw,
EchoRequest => SyntaxKind::EchoRequestKw,
DestUnreachable => SyntaxKind::DestUnreachableKw,
RouterAdvertisement => SyntaxKind::RouterAdvertisementKw,
TimeExceeded => SyntaxKind::TimeExceededKw,
ParameterProblem => SyntaxKind::ParameterProblemKw,
PacketTooBig => SyntaxKind::PacketTooBigKw,
// Operators - direct mapping TokenKind::Vmap => SyntaxKind::VmapKw,
Eq => SyntaxKind::EqOp, TokenKind::NdRouterAdvert => SyntaxKind::NdRouterAdvertKw,
Ne => SyntaxKind::NeOp, TokenKind::NdNeighborSolicit => SyntaxKind::NdNeighborSolicitKw,
Le => SyntaxKind::LeOp, TokenKind::NdNeighborAdvert => SyntaxKind::NdNeighborAdvertKw,
Ge => SyntaxKind::GeOp, TokenKind::EchoRequest => SyntaxKind::EchoRequestKw,
Lt => SyntaxKind::LtOp, TokenKind::DestUnreachable => SyntaxKind::DestUnreachableKw,
Gt => SyntaxKind::GtOp, TokenKind::RouterAdvertisement => SyntaxKind::RouterAdvertisementKw,
TokenKind::TimeExceeded => SyntaxKind::TimeExceededKw,
TokenKind::ParameterProblem => SyntaxKind::ParameterProblemKw,
TokenKind::PacketTooBig => SyntaxKind::PacketTooBigKw,
// Punctuation - direct mapping TokenKind::Eq => SyntaxKind::EqOp,
LeftBrace => SyntaxKind::LeftBrace, TokenKind::Ne => SyntaxKind::NeOp,
RightBrace => SyntaxKind::RightBrace, TokenKind::Le => SyntaxKind::LeOp,
LeftParen => SyntaxKind::LeftParen, TokenKind::Ge => SyntaxKind::GeOp,
RightParen => SyntaxKind::RightParen, TokenKind::Lt => SyntaxKind::LtOp,
LeftBracket => SyntaxKind::LeftBracket, TokenKind::Gt => SyntaxKind::GtOp,
RightBracket => SyntaxKind::RightBracket,
Comma => SyntaxKind::Comma,
Semicolon => SyntaxKind::Semicolon,
Colon => SyntaxKind::Colon,
Assign => SyntaxKind::Assign,
Dash => SyntaxKind::Dash,
Slash => SyntaxKind::Slash,
Dot => SyntaxKind::Dot,
// Literals - map data-carrying variants to their types TokenKind::LeftBrace => SyntaxKind::LeftBrace,
StringLiteral(_) => SyntaxKind::StringLiteral, TokenKind::RightBrace => SyntaxKind::RightBrace,
NumberLiteral(_) => SyntaxKind::NumberLiteral, TokenKind::LeftParen => SyntaxKind::LeftParen,
IpAddress(_) => SyntaxKind::IpAddress, TokenKind::RightParen => SyntaxKind::RightParen,
Ipv6Address(_) => SyntaxKind::Ipv6Address, TokenKind::LeftBracket => SyntaxKind::LeftBracket,
MacAddress(_) => SyntaxKind::MacAddress, TokenKind::RightBracket => SyntaxKind::RightBracket,
Identifier(_) => SyntaxKind::Identifier, TokenKind::Comma => SyntaxKind::Comma,
TokenKind::Semicolon => SyntaxKind::Semicolon,
TokenKind::Colon => SyntaxKind::Colon,
TokenKind::Assign => SyntaxKind::Assign,
TokenKind::Dash => SyntaxKind::Dash,
TokenKind::Slash => SyntaxKind::Slash,
TokenKind::Dot => SyntaxKind::Dot,
// Special tokens TokenKind::StringLiteral(_) => SyntaxKind::StringLiteral,
Newline => SyntaxKind::Newline, TokenKind::NumberLiteral(_) => SyntaxKind::NumberLiteral,
CommentLine(_) => SyntaxKind::Comment, TokenKind::IpAddress(_) => SyntaxKind::IpAddress,
Shebang(_) => SyntaxKind::Shebang, TokenKind::Ipv6Address(_) => SyntaxKind::Ipv6Address,
TokenKind::MacAddress(_) => SyntaxKind::MacAddress,
TokenKind::Identifier(_) => SyntaxKind::Identifier,
// Error fallback TokenKind::Newline => SyntaxKind::Newline,
Error => SyntaxKind::Error, TokenKind::CommentLine(_) => SyntaxKind::Comment,
TokenKind::Shebang(_) => SyntaxKind::Shebang,
TokenKind::Error => SyntaxKind::Error,
} }
} }
} }
@ -340,7 +324,13 @@ impl SyntaxKind {
} }
pub fn from_raw(raw: RawSyntaxKind) -> Self { pub fn from_raw(raw: RawSyntaxKind) -> Self {
Self::try_from(raw.0 as u16).unwrap_or(SyntaxKind::Error) match raw.0 {
0 => SyntaxKind::Root,
1 => SyntaxKind::Table,
// ... other variants ...
116 => SyntaxKind::PacketTooBigKw,
_ => SyntaxKind::Error, // Fallback to Error for invalid values
}
} }
} }
@ -1247,7 +1237,7 @@ mod tests {
let mut lexer = NftablesLexer::new(source); let mut lexer = NftablesLexer::new(source);
let tokens = lexer.tokenize().expect("Tokenization should succeed"); let tokens = lexer.tokenize().expect("Tokenization should succeed");
// Test CST construction with basic table syntax // CST is now implemented - test that it works
let green_tree = CstBuilder::build_tree(&tokens); let green_tree = CstBuilder::build_tree(&tokens);
// Verify the tree was created successfully // Verify the tree was created successfully
@ -1264,88 +1254,4 @@ mod tests {
let cst_result = CstBuilder::parse_to_cst(&tokens); let cst_result = CstBuilder::parse_to_cst(&tokens);
assert!(cst_result.is_ok()); assert!(cst_result.is_ok());
} }
#[test]
fn test_num_enum_improvements() {
// Test that from_raw uses num_enum for conversion
// Invalid values fall back to Error variant
// Test valid conversions
assert_eq!(SyntaxKind::from_raw(RawSyntaxKind(0)), SyntaxKind::Root);
assert_eq!(SyntaxKind::from_raw(RawSyntaxKind(1)), SyntaxKind::Table);
assert_eq!(SyntaxKind::from_raw(RawSyntaxKind(25)), SyntaxKind::TableKw);
assert_eq!(SyntaxKind::from_raw(RawSyntaxKind(106)), SyntaxKind::Error);
assert_eq!(
SyntaxKind::from_raw(RawSyntaxKind(116)),
SyntaxKind::PacketTooBigKw
);
// Test invalid values automatically fall back to Error
assert_eq!(SyntaxKind::from_raw(RawSyntaxKind(999)), SyntaxKind::Error);
assert_eq!(SyntaxKind::from_raw(RawSyntaxKind(1000)), SyntaxKind::Error);
// Test bidirectional conversion
for variant in [
SyntaxKind::Root,
SyntaxKind::Table,
SyntaxKind::TableKw,
SyntaxKind::Error,
SyntaxKind::PacketTooBigKw,
] {
let raw = variant.to_raw();
let converted_back = SyntaxKind::from_raw(raw);
assert_eq!(variant, converted_back);
}
}
#[test]
fn test_token_kind_conversion_improvements() {
// Test that From<TokenKind> conversion is complete and correct
use crate::lexer::TokenKind;
// Test keyword mappings
assert_eq!(SyntaxKind::from(TokenKind::Table), SyntaxKind::TableKw);
assert_eq!(SyntaxKind::from(TokenKind::Chain), SyntaxKind::ChainKw);
assert_eq!(SyntaxKind::from(TokenKind::Accept), SyntaxKind::AcceptKw);
// Test operators
assert_eq!(SyntaxKind::from(TokenKind::Eq), SyntaxKind::EqOp);
assert_eq!(SyntaxKind::from(TokenKind::Lt), SyntaxKind::LtOp);
// Test punctuation
assert_eq!(
SyntaxKind::from(TokenKind::LeftBrace),
SyntaxKind::LeftBrace
);
assert_eq!(
SyntaxKind::from(TokenKind::Semicolon),
SyntaxKind::Semicolon
);
// Test literals (with data)
assert_eq!(
SyntaxKind::from(TokenKind::StringLiteral("test".to_string())),
SyntaxKind::StringLiteral
);
assert_eq!(
SyntaxKind::from(TokenKind::NumberLiteral(42)),
SyntaxKind::NumberLiteral
);
assert_eq!(
SyntaxKind::from(TokenKind::IpAddress("192.168.1.1".to_string())),
SyntaxKind::IpAddress
);
assert_eq!(
SyntaxKind::from(TokenKind::Identifier("test".to_string())),
SyntaxKind::Identifier
);
// Test special tokens
assert_eq!(SyntaxKind::from(TokenKind::Newline), SyntaxKind::Newline);
assert_eq!(
SyntaxKind::from(TokenKind::CommentLine("# comment".to_string())),
SyntaxKind::Comment
);
assert_eq!(SyntaxKind::from(TokenKind::Error), SyntaxKind::Error);
}
} }

View file

@ -607,11 +607,6 @@ impl AnalyzerModule for StyleAnalyzer {
fn analyze(&self, source: &str, config: &DiagnosticConfig) -> Vec<Diagnostic> { fn analyze(&self, source: &str, config: &DiagnosticConfig) -> Vec<Diagnostic> {
let mut diagnostics = Vec::new(); let mut diagnostics = Vec::new();
// Only perform style analysis if enabled
if !config.enable_style_warnings {
return diagnostics;
}
// Check for missing shebang // Check for missing shebang
if !source.starts_with("#!") { if !source.starts_with("#!") {
let range = Range::new(Position::new(0, 0), Position::new(0, 0)); let range = Range::new(Position::new(0, 0), Position::new(0, 0));
@ -807,29 +802,14 @@ impl StyleAnalyzer {
pub struct SemanticAnalyzer; pub struct SemanticAnalyzer;
impl AnalyzerModule for SemanticAnalyzer { impl AnalyzerModule for SemanticAnalyzer {
fn analyze(&self, source: &str, config: &DiagnosticConfig) -> Vec<Diagnostic> { fn analyze(&self, source: &str, _config: &DiagnosticConfig) -> Vec<Diagnostic> {
let mut diagnostics = Vec::new(); let mut diagnostics = Vec::new();
// Always run semantic validation (syntax/semantic errors) // Parse and validate nftables-specific constructs
diagnostics.extend(self.validate_table_declarations(source)); diagnostics.extend(self.validate_table_declarations(source));
diagnostics.extend(self.validate_chain_declarations_semantic(source)); diagnostics.extend(self.validate_chain_declarations(source));
diagnostics.extend(self.validate_cidr_notation(source)); diagnostics.extend(self.validate_cidr_notation(source));
// Best practices checks (only if enabled)
if config.enable_best_practices {
diagnostics.extend(self.validate_chain_best_practices(source));
diagnostics.extend(self.check_for_redundant_rules(source)); diagnostics.extend(self.check_for_redundant_rules(source));
}
// Performance hints (only if enabled)
if config.enable_performance_hints {
diagnostics.extend(self.check_performance_hints(source));
}
// Security warnings (only if enabled)
if config.enable_security_warnings {
diagnostics.extend(self.check_security_warnings(source));
}
diagnostics diagnostics
} }
@ -900,7 +880,7 @@ impl SemanticAnalyzer {
diagnostics diagnostics
} }
fn validate_chain_declarations_semantic(&self, source: &str) -> Vec<Diagnostic> { fn validate_chain_declarations(&self, source: &str) -> Vec<Diagnostic> {
let mut diagnostics = Vec::new(); let mut diagnostics = Vec::new();
for (line_idx, line) in source.lines().enumerate() { for (line_idx, line) in source.lines().enumerate() {
@ -908,7 +888,7 @@ impl SemanticAnalyzer {
let trimmed = line.trim(); let trimmed = line.trim();
if trimmed.starts_with("type ") && trimmed.contains("hook") { if trimmed.starts_with("type ") && trimmed.contains("hook") {
// Validate chain type and hook (semantic validation) // Validate chain type and hook
if let Some(hook_pos) = trimmed.find("hook") { if let Some(hook_pos) = trimmed.find("hook") {
let hook_part = &trimmed[hook_pos..]; let hook_part = &trimmed[hook_pos..];
let hook_words: Vec<&str> = hook_part.split_whitespace().collect(); let hook_words: Vec<&str> = hook_part.split_whitespace().collect();
@ -936,20 +916,8 @@ impl SemanticAnalyzer {
} }
} }
} }
}
}
diagnostics // Check for missing policy in filter chains
}
fn validate_chain_best_practices(&self, source: &str) -> Vec<Diagnostic> {
let mut diagnostics = Vec::new();
for (line_idx, line) in source.lines().enumerate() {
let line_num = line_idx as u32;
let trimmed = line.trim();
// Check for missing policy in filter chains (best practice)
if trimmed.contains("type filter") && !trimmed.contains("policy") { if trimmed.contains("type filter") && !trimmed.contains("policy") {
let range = Range::new( let range = Range::new(
Position::new(line_num, 0), Position::new(line_num, 0),
@ -964,81 +932,6 @@ impl SemanticAnalyzer {
diagnostics.push(diagnostic); diagnostics.push(diagnostic);
} }
} }
diagnostics
}
fn check_performance_hints(&self, source: &str) -> Vec<Diagnostic> {
let mut diagnostics = Vec::new();
for (line_idx, line) in source.lines().enumerate() {
let line_num = line_idx as u32;
let trimmed = line.trim();
// Check for large sets without timeout (performance hint)
if trimmed.contains("set ") && trimmed.contains("{") && !trimmed.contains("timeout") {
// Simple heuristic: if set definition is long, suggest timeout
if trimmed.len() > 100 {
let range = Range::new(
Position::new(line_num, 0),
Position::new(line_num, line.len() as u32),
);
let diagnostic = Diagnostic::new(
range,
DiagnosticSeverity::Hint,
DiagnosticCode::LargeSetWithoutTimeout,
"Consider adding a timeout to large sets for better performance"
.to_string(),
);
diagnostics.push(diagnostic);
}
}
// Check for missing counters (performance hint)
if (trimmed.contains(" accept") || trimmed.contains(" drop"))
&& !trimmed.contains("counter")
{
let range = Range::new(
Position::new(line_num, 0),
Position::new(line_num, line.len() as u32),
);
let diagnostic = Diagnostic::new(
range,
DiagnosticSeverity::Hint,
DiagnosticCode::MissingCounters,
"Consider adding counters to rules for monitoring and debugging".to_string(),
);
diagnostics.push(diagnostic);
}
}
diagnostics
}
fn check_security_warnings(&self, source: &str) -> Vec<Diagnostic> {
let mut diagnostics = Vec::new();
for (line_idx, line) in source.lines().enumerate() {
let line_num = line_idx as u32;
let trimmed = line.trim();
// Check for overly permissive rules (security warning)
if trimmed.contains(" accept")
&& (trimmed.contains("0.0.0.0/0") || trimmed.contains("::/0"))
{
let range = Range::new(
Position::new(line_num, 0),
Position::new(line_num, line.len() as u32),
);
let diagnostic = Diagnostic::new(
range,
DiagnosticSeverity::Warning,
DiagnosticCode::SecurityRisk,
"Rule accepts traffic from anywhere - consider restricting source addresses"
.to_string(),
);
diagnostics.push(diagnostic);
}
} }
diagnostics diagnostics

View file

@ -34,18 +34,24 @@ enum FormatterError {
message: String, message: String,
suggestion: Option<String>, suggestion: Option<String>,
}, },
#[error("Unsupported nftables syntax at line {line}, column {column}: {feature}")]
UnsupportedSyntax {
line: usize,
column: usize,
feature: String,
suggestion: Option<String>,
},
#[error("Invalid nftables syntax at line {line}, column {column}: {message}")]
InvalidSyntax {
line: usize,
column: usize,
message: String,
suggestion: Option<String>,
},
#[error("IO error: {0}")] #[error("IO error: {0}")]
Io(#[from] io::Error), Io(#[from] io::Error),
} }
#[derive(Error, Debug)]
enum LintError {
#[error("Lint errors found in {file_count} file(s)")]
DiagnosticErrors { file_count: usize },
#[error("File discovery error: {0}")]
FileDiscovery(#[from] anyhow::Error),
}
#[derive(Parser, Debug, Clone)] #[derive(Parser, Debug, Clone)]
#[command( #[command(
name = "nff", name = "nff",
@ -101,19 +107,19 @@ enum Commands {
json: bool, json: bool,
/// Include style warnings in diagnostics /// Include style warnings in diagnostics
#[arg(long, action = clap::ArgAction::Set, default_value = "true")] #[arg(long, default_value = "true")]
style_warnings: bool, style_warnings: bool,
/// Include best practice recommendations in diagnostics /// Include best practice recommendations in diagnostics
#[arg(long, action = clap::ArgAction::Set, default_value = "true")] #[arg(long, default_value = "true")]
best_practices: bool, best_practices: bool,
/// Include performance hints in diagnostics /// Include performance hints in diagnostics
#[arg(long, action = clap::ArgAction::Set, default_value = "true")] #[arg(long, default_value = "true")]
performance_hints: bool, performance_hints: bool,
/// Include security warnings in diagnostics /// Include security warnings in diagnostics
#[arg(long, action = clap::ArgAction::Set, default_value = "true")] #[arg(long, default_value = "true")]
security_warnings: bool, security_warnings: bool,
/// Diagnostic modules to run (comma-separated: lexical,syntax,style,semantic) /// Diagnostic modules to run (comma-separated: lexical,syntax,style,semantic)
@ -283,7 +289,7 @@ fn process_single_file_format(
let mut parser = NftablesParser::new(tokens.clone()); let mut parser = NftablesParser::new(tokens.clone());
parser parser
.parse() .parse()
.map_err(|e| convert_parse_error_to_formatter_error(&e, &source, &tokens))? .map_err(|e| analyze_parse_error(&source, &tokens, &e.to_string()))?
}; };
if debug { if debug {
@ -345,7 +351,7 @@ fn process_lint_command(
}; };
let is_multiple_files = files.len() > 1; let is_multiple_files = files.len() > 1;
let mut error_file_count = 0; let mut has_errors = false;
for file_path in files { for file_path in files {
if let Err(e) = process_single_file_lint( if let Err(e) = process_single_file_lint(
@ -360,18 +366,16 @@ fn process_lint_command(
is_multiple_files, is_multiple_files,
) { ) {
eprintln!("Error processing {}: {}", file_path, e); eprintln!("Error processing {}: {}", file_path, e);
error_file_count += 1; has_errors = true;
if !is_multiple_files { if !is_multiple_files {
return Err(e); return Err(e);
} }
} }
} }
if error_file_count > 0 { // Exit with non-zero code if any file had errors
return Err(LintError::DiagnosticErrors { if has_errors {
file_count: error_file_count, std::process::exit(1);
}
.into());
} }
Ok(()) Ok(())
@ -471,203 +475,77 @@ fn process_single_file_lint(
Ok(()) Ok(())
} }
/// Convert parser errors to formatter errors with proper location information /// Intelligent error analysis to categorize parse errors and provide location information
fn convert_parse_error_to_formatter_error( fn analyze_parse_error(source: &str, tokens: &[Token], error: &str) -> FormatterError {
error: &crate::parser::ParseError, // Convert line/column position from token ranges
source: &str, let lines: Vec<&str> = source.lines().collect();
tokens: &[Token],
) -> FormatterError {
use crate::parser::ParseError;
match error { // Look for common error patterns and provide specific messages
ParseError::UnexpectedToken { if error.contains("unexpected token") || error.contains("expected") {
line, // Try to find the problematic token
column, if let Some(error_token) = find_error_token(tokens) {
expected, let (line, column) = position_from_range(&error_token.range, source);
found,
} => FormatterError::SyntaxError {
line: *line,
column: *column,
message: format!("Expected {}, found '{}'", expected, found),
suggestion: None,
},
ParseError::MissingToken { expected } => {
// Try to find current position from last token
let (line, column) = if let Some(last_token) = tokens.last() {
position_from_range(&last_token.range, source)
} else {
(1, 1)
};
FormatterError::SyntaxError {
line,
column,
message: format!("Missing token: expected {}", expected),
suggestion: None,
}
}
ParseError::InvalidExpression { message } => {
// Try to find the current token position
let (line, column) = find_current_parse_position(tokens, source);
FormatterError::SyntaxError {
line,
column,
message: format!("Invalid expression: {}", message),
suggestion: None,
}
}
ParseError::InvalidStatement { message } => {
let (line, column) = find_current_parse_position(tokens, source);
FormatterError::SyntaxError {
line,
column,
message: format!("Invalid statement: {}", message),
suggestion: None,
}
}
ParseError::SemanticError { message } => {
let (line, column) = find_current_parse_position(tokens, source);
FormatterError::SyntaxError {
line,
column,
message: format!("Semantic error: {}", message),
suggestion: None,
}
}
ParseError::LexError(lex_error) => {
// Convert lexical errors to formatter errors with location
convert_lex_error_to_formatter_error(lex_error, source)
}
ParseError::AnyhowError(anyhow_error) => {
// For anyhow errors, try to extract location from error message and context
let error_msg = anyhow_error.to_string();
let (line, column) = find_error_location_from_context(&error_msg, tokens, source);
let suggestion = generate_suggestion_for_error(&error_msg);
FormatterError::SyntaxError { // Analyze the specific token to categorize the error
line, match categorize_syntax_error(&error_token, source, &lines) {
column, ErrorCategory::UnsupportedSyntax {
message: error_msg, feature,
suggestion, suggestion,
} } => FormatterError::UnsupportedSyntax {
}
}
}
/// Find the current parsing position from tokens
fn find_current_parse_position(tokens: &[Token], source: &str) -> (usize, usize) {
// Look for the last non-whitespace, non-comment token
for token in tokens.iter().rev() {
match token.kind {
TokenKind::Newline | TokenKind::CommentLine(_) => continue,
_ => return position_from_range(&token.range, source),
}
}
(1, 1) // fallback
}
/// Convert lexical errors to formatter errors
fn convert_lex_error_to_formatter_error(
lex_error: &crate::lexer::LexError,
source: &str,
) -> FormatterError {
use crate::lexer::LexError;
match lex_error {
LexError::InvalidToken { position, text } => {
let (line, column) = offset_to_line_column(*position, source);
FormatterError::SyntaxError {
line, line,
column, column,
message: format!("Invalid token: '{}'", text), feature,
suggestion: None, suggestion,
} },
} ErrorCategory::InvalidSyntax {
LexError::UnterminatedString { position } => { message,
let (line, column) = offset_to_line_column(*position, source); suggestion,
FormatterError::SyntaxError { } => FormatterError::InvalidSyntax {
line, line,
column, column,
message: "Unterminated string literal".to_string(), message,
suggestion: Some("Add closing quote".to_string()), suggestion,
} },
} ErrorCategory::SyntaxError {
LexError::InvalidNumber { position, text } => { message,
let (line, column) = offset_to_line_column(*position, source); suggestion,
FormatterError::SyntaxError { } => FormatterError::SyntaxError {
line, line,
column, column,
message: format!("Invalid number: '{}'", text), message,
suggestion: Some("Check number format".to_string()), suggestion,
},
} }
}
}
}
/// Convert byte offset to line/column position
fn offset_to_line_column(offset: usize, source: &str) -> (usize, usize) {
let mut line = 1;
let mut column = 1;
for (i, ch) in source.char_indices() {
if i >= offset {
break;
}
if ch == '\n' {
line += 1;
column = 1;
} else { } else {
column += 1; // Fallback to generic parse error
FormatterError::ParseError(error.to_string())
} }
}
(line, column)
}
/// Find error location from context clues in the error message
fn find_error_location_from_context(
error_msg: &str,
tokens: &[Token],
source: &str,
) -> (usize, usize) {
// Look for context clues in the error message
if error_msg.contains("Expected string or identifier, got:") {
// Find the problematic token mentioned in the error
if let Some(bad_token_text) = extract_token_from_error_message(error_msg) {
// Find this token in the token stream
for token in tokens {
if token.text == bad_token_text {
return position_from_range(&token.range, source);
}
}
}
}
// Fallback to finding last meaningful token
find_current_parse_position(tokens, source)
}
/// Extract the problematic token from error message
fn extract_token_from_error_message(error_msg: &str) -> Option<String> {
// Parse messages like "Expected string or identifier, got: {"
if let Some(got_part) = error_msg.split("got: ").nth(1) {
Some(got_part.trim().to_string())
} else { } else {
None FormatterError::ParseError(error.to_string())
} }
} }
/// Generate helpful suggestions based on error message #[derive(Debug)]
fn generate_suggestion_for_error(error_msg: &str) -> Option<String> { enum ErrorCategory {
if error_msg.contains("Expected string or identifier") { UnsupportedSyntax {
Some( feature: String,
"Check if you're missing quotes around a string value or have an unexpected character" suggestion: Option<String>,
.to_string(), },
) InvalidSyntax {
} else if error_msg.contains("Expected") && error_msg.contains("got:") { message: String,
Some("Check syntax and ensure proper nftables structure".to_string()) suggestion: Option<String>,
} else { },
None SyntaxError {
} message: String,
suggestion: Option<String>,
},
}
/// Find the first error token in the token stream
fn find_error_token(tokens: &[Token]) -> Option<&Token> {
tokens
.iter()
.find(|token| matches!(token.kind, TokenKind::Error))
} }
/// Convert TextRange to line/column position /// Convert TextRange to line/column position
@ -688,6 +566,186 @@ fn position_from_range(range: &text_size::TextRange, source: &str) -> (usize, us
(1, 1) // fallback (1, 1) // fallback
} }
/// Categorize syntax errors based on token content and context
fn categorize_syntax_error(token: &Token, source: &str, lines: &[&str]) -> ErrorCategory {
let token_text = &token.text;
let (line_num, _) = position_from_range(&token.range, source);
let line_content = lines.get(line_num.saturating_sub(1)).unwrap_or(&"");
// Check for unsupported nftables features
if is_unsupported_feature(token_text, line_content) {
let (feature, suggestion) = classify_unsupported_feature(token_text, line_content);
return ErrorCategory::UnsupportedSyntax {
feature,
suggestion,
};
}
// Check for invalid but supported syntax
if is_invalid_syntax(token_text, line_content) {
let (message, suggestion) = classify_invalid_syntax(token_text, line_content);
return ErrorCategory::InvalidSyntax {
message,
suggestion,
};
}
// Default to syntax error
ErrorCategory::SyntaxError {
message: format!("Unexpected token '{}'", token_text),
suggestion: suggest_correction(token_text, line_content),
}
}
/// Check if the token represents an unsupported nftables feature
fn is_unsupported_feature(token_text: &str, line_content: &str) -> bool {
// List of advanced nftables features that might not be fully supported yet
let unsupported_keywords = [
"quota", "limit", "counter", "meter", "socket", "fib", "rt", "ipsec", "tunnel", "comp",
"dccp", "sctp", "gre", "esp", "ah", "vlan", "arp", "rateest", "osf", "netdev", "meta",
"exthdr", "payload", "lookup", "dynset", "flow", "hash", "jhash", "symhash", "crc32",
];
unsupported_keywords
.iter()
.any(|&keyword| token_text.contains(keyword) || line_content.contains(keyword))
}
/// Check if the syntax is invalid (malformed but within supported features)
fn is_invalid_syntax(token_text: &str, line_content: &str) -> bool {
// Check for common syntax mistakes
if token_text.contains("..") || token_text.contains("::") {
return true; // Double operators usually indicate mistakes
}
// Check for malformed addresses or ranges
if token_text.contains("/") && !is_valid_cidr(token_text) {
return true;
}
// Check for malformed brackets/braces
let open_braces = line_content.matches('{').count();
let close_braces = line_content.matches('}').count();
if open_braces != close_braces {
return true;
}
false
}
/// Classify unsupported feature and provide suggestion
fn classify_unsupported_feature(token_text: &str, line_content: &str) -> (String, Option<String>) {
let feature = if token_text.contains("quota") {
(
"quota management".to_string(),
Some("Use explicit rule counting instead".to_string()),
)
} else if token_text.contains("limit") {
(
"rate limiting".to_string(),
Some("Consider using simpler rule-based rate limiting".to_string()),
)
} else if token_text.contains("counter") {
(
"packet counters".to_string(),
Some("Use rule-level statistics instead".to_string()),
)
} else if line_content.contains("meta") {
(
"meta expressions".to_string(),
Some("Use explicit protocol matching instead".to_string()),
)
} else {
(format!("advanced feature '{}'", token_text), None)
};
feature
}
/// Classify invalid syntax and provide suggestion
fn classify_invalid_syntax(token_text: &str, line_content: &str) -> (String, Option<String>) {
if token_text.contains("/") && !is_valid_cidr(token_text) {
return (
"Invalid CIDR notation".to_string(),
Some("Use format like '192.168.1.0/24' or '::1/128'".to_string()),
);
}
if token_text.contains("..") {
return (
"Invalid range operator".to_string(),
Some("Use '-' for ranges like '1000-2000'".to_string()),
);
}
if line_content.contains('{') && !line_content.contains('}') {
return (
"Unmatched opening brace".to_string(),
Some("Ensure all '{' have matching '}'".to_string()),
);
}
(
format!("Malformed token '{}'", token_text),
Some("Check nftables syntax documentation".to_string()),
)
}
/// Suggest correction for common typos
fn suggest_correction(token_text: &str, line_content: &str) -> Option<String> {
// Common typos and their corrections
let corrections = [
("tabel", "table"),
("cahin", "chain"),
("accpet", "accept"),
("rejct", "reject"),
("prtocol", "protocol"),
("addres", "address"),
("pririty", "priority"),
("poicy", "policy"),
];
for (typo, correction) in &corrections {
if token_text.contains(typo) {
return Some(format!("Did you mean '{}'?", correction));
}
}
// Context-based suggestions
if line_content.contains("type") && line_content.contains("hook") {
if !line_content.contains("filter")
&& !line_content.contains("nat")
&& !line_content.contains("route")
{
return Some("Chain type should be 'filter', 'nat', or 'route'".to_string());
}
}
None
}
/// Validate CIDR notation
fn is_valid_cidr(text: &str) -> bool {
if let Some(slash_pos) = text.find('/') {
let (addr, prefix) = text.split_at(slash_pos);
let prefix = &prefix[1..]; // Remove the '/'
// Check if prefix is a valid number
if let Ok(prefix_len) = prefix.parse::<u8>() {
// Basic validation - IPv4 should be <= 32, IPv6 <= 128
if addr.contains(':') {
prefix_len <= 128 // IPv6
} else {
prefix_len <= 32 // IPv4
}
} else {
false
}
} else {
false
}
}
fn main() -> Result<()> { fn main() -> Result<()> {
let args = Args::parse(); let args = Args::parse();