Compare commits
5 commits
92735f3eee
...
b9d8cb6d5d
| Author | SHA1 | Date | |
|---|---|---|---|
|
b9d8cb6d5d |
|||
|
d1cad19fea |
|||
|
8d7fcd6ef4 |
|||
|
0eaa21a3ff |
|||
|
f7f77a7e19 |
5 changed files with 587 additions and 386 deletions
57
Cargo.lock
generated
57
Cargo.lock
generated
|
|
@ -281,6 +281,7 @@ dependencies = [
|
||||||
"cstree",
|
"cstree",
|
||||||
"glob",
|
"glob",
|
||||||
"logos",
|
"logos",
|
||||||
|
"num_enum",
|
||||||
"regex",
|
"regex",
|
||||||
"serde",
|
"serde",
|
||||||
"serde_json",
|
"serde_json",
|
||||||
|
|
@ -288,6 +289,27 @@ dependencies = [
|
||||||
"thiserror",
|
"thiserror",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "num_enum"
|
||||||
|
version = "0.7.3"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "4e613fc340b2220f734a8595782c551f1250e969d87d3be1ae0579e8d4065179"
|
||||||
|
dependencies = [
|
||||||
|
"num_enum_derive",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "num_enum_derive"
|
||||||
|
version = "0.7.3"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "af1844ef2428cc3e1cb900be36181049ef3d3193c63e43026cfe202983b27a56"
|
||||||
|
dependencies = [
|
||||||
|
"proc-macro-crate",
|
||||||
|
"proc-macro2",
|
||||||
|
"quote",
|
||||||
|
"syn",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "parking_lot"
|
name = "parking_lot"
|
||||||
version = "0.12.3"
|
version = "0.12.3"
|
||||||
|
|
@ -311,6 +333,15 @@ dependencies = [
|
||||||
"windows-targets",
|
"windows-targets",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "proc-macro-crate"
|
||||||
|
version = "3.3.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "edce586971a4dfaa28950c6f18ed55e0406c1ab88bbce2c6f6293a7aaba73d35"
|
||||||
|
dependencies = [
|
||||||
|
"toml_edit",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "proc-macro2"
|
name = "proc-macro2"
|
||||||
version = "1.0.95"
|
version = "1.0.95"
|
||||||
|
|
@ -487,6 +518,23 @@ dependencies = [
|
||||||
"syn",
|
"syn",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "toml_datetime"
|
||||||
|
version = "0.6.9"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "3da5db5a963e24bc68be8b17b6fa82814bb22ee8660f192bb182771d498f09a3"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "toml_edit"
|
||||||
|
version = "0.22.26"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "310068873db2c5b3e7659d2cc35d21855dbafa50d1ce336397c666e3cb08137e"
|
||||||
|
dependencies = [
|
||||||
|
"indexmap",
|
||||||
|
"toml_datetime",
|
||||||
|
"winnow",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "triomphe"
|
name = "triomphe"
|
||||||
version = "0.1.14"
|
version = "0.1.14"
|
||||||
|
|
@ -573,3 +621,12 @@ name = "windows_x86_64_msvc"
|
||||||
version = "0.52.4"
|
version = "0.52.4"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "32b752e52a2da0ddfbdbcc6fceadfeede4c939ed16d13e648833a61dfb611ed8"
|
checksum = "32b752e52a2da0ddfbdbcc6fceadfeede4c939ed16d13e648833a61dfb611ed8"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "winnow"
|
||||||
|
version = "0.7.10"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "c06928c8748d81b05c9be96aad92e1b6ff01833332f281e8cfca3be4b35fc9ec"
|
||||||
|
dependencies = [
|
||||||
|
"memchr",
|
||||||
|
]
|
||||||
|
|
|
||||||
|
|
@ -14,5 +14,6 @@ cstree = "0.12"
|
||||||
text-size = "1.1"
|
text-size = "1.1"
|
||||||
serde = { version = "1.0", features = ["derive"] }
|
serde = { version = "1.0", features = ["derive"] }
|
||||||
serde_json = "1.0"
|
serde_json = "1.0"
|
||||||
regex = "1.11.1"
|
regex = "1.11"
|
||||||
glob = "0.3"
|
glob = "0.3"
|
||||||
|
num_enum = "0.7"
|
||||||
|
|
|
||||||
308
src/cst.rs
308
src/cst.rs
|
|
@ -4,11 +4,15 @@
|
||||||
|
|
||||||
use crate::lexer::{Token, TokenKind};
|
use crate::lexer::{Token, TokenKind};
|
||||||
use cstree::{RawSyntaxKind, green::GreenNode, util::NodeOrToken};
|
use cstree::{RawSyntaxKind, green::GreenNode, util::NodeOrToken};
|
||||||
|
use num_enum::{IntoPrimitive, TryFromPrimitive};
|
||||||
use std::fmt;
|
use std::fmt;
|
||||||
use thiserror::Error;
|
use thiserror::Error;
|
||||||
|
|
||||||
/// nftables syntax node types
|
/// nftables syntax node types
|
||||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
|
/// Uses `TryFromPrimitive` for safe conversion from raw values with fallback to `Error`.
|
||||||
|
#[derive(
|
||||||
|
Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, TryFromPrimitive, IntoPrimitive,
|
||||||
|
)]
|
||||||
#[repr(u16)]
|
#[repr(u16)]
|
||||||
pub enum SyntaxKind {
|
pub enum SyntaxKind {
|
||||||
// Root and containers
|
// Root and containers
|
||||||
|
|
@ -161,116 +165,128 @@ pub enum SyntaxKind {
|
||||||
|
|
||||||
impl From<TokenKind> for SyntaxKind {
|
impl From<TokenKind> for SyntaxKind {
|
||||||
fn from(kind: TokenKind) -> Self {
|
fn from(kind: TokenKind) -> Self {
|
||||||
|
use TokenKind::*;
|
||||||
match kind {
|
match kind {
|
||||||
TokenKind::Table => SyntaxKind::TableKw,
|
// Keywords -> Kw variants
|
||||||
TokenKind::Chain => SyntaxKind::ChainKw,
|
Table => SyntaxKind::TableKw,
|
||||||
TokenKind::Rule => SyntaxKind::RuleKw,
|
Chain => SyntaxKind::ChainKw,
|
||||||
TokenKind::Set => SyntaxKind::SetKw,
|
Rule => SyntaxKind::RuleKw,
|
||||||
TokenKind::Map => SyntaxKind::MapKw,
|
Set => SyntaxKind::SetKw,
|
||||||
TokenKind::Element => SyntaxKind::ElementKw,
|
Map => SyntaxKind::MapKw,
|
||||||
TokenKind::Include => SyntaxKind::IncludeKw,
|
Element => SyntaxKind::ElementKw,
|
||||||
TokenKind::Define => SyntaxKind::DefineKw,
|
Include => SyntaxKind::IncludeKw,
|
||||||
TokenKind::Flush => SyntaxKind::FlushKw,
|
Define => SyntaxKind::DefineKw,
|
||||||
TokenKind::Add => SyntaxKind::AddKw,
|
Flush => SyntaxKind::FlushKw,
|
||||||
TokenKind::Delete => SyntaxKind::DeleteKw,
|
Add => SyntaxKind::AddKw,
|
||||||
TokenKind::Insert => SyntaxKind::InsertKw,
|
Delete => SyntaxKind::DeleteKw,
|
||||||
TokenKind::Replace => SyntaxKind::ReplaceKw,
|
Insert => SyntaxKind::InsertKw,
|
||||||
|
Replace => SyntaxKind::ReplaceKw,
|
||||||
|
|
||||||
TokenKind::Filter => SyntaxKind::FilterKw,
|
// Chain types and hooks
|
||||||
TokenKind::Nat => SyntaxKind::NatKw,
|
Filter => SyntaxKind::FilterKw,
|
||||||
TokenKind::Route => SyntaxKind::RouteKw,
|
Nat => SyntaxKind::NatKw,
|
||||||
|
Route => SyntaxKind::RouteKw,
|
||||||
|
Input => SyntaxKind::InputKw,
|
||||||
|
Output => SyntaxKind::OutputKw,
|
||||||
|
Forward => SyntaxKind::ForwardKw,
|
||||||
|
Prerouting => SyntaxKind::PreroutingKw,
|
||||||
|
Postrouting => SyntaxKind::PostroutingKw,
|
||||||
|
|
||||||
TokenKind::Input => SyntaxKind::InputKw,
|
// Protocols and families
|
||||||
TokenKind::Output => SyntaxKind::OutputKw,
|
Ip => SyntaxKind::IpKw,
|
||||||
TokenKind::Forward => SyntaxKind::ForwardKw,
|
Ip6 => SyntaxKind::Ip6Kw,
|
||||||
TokenKind::Prerouting => SyntaxKind::PreroutingKw,
|
Inet => SyntaxKind::InetKw,
|
||||||
TokenKind::Postrouting => SyntaxKind::PostroutingKw,
|
Arp => SyntaxKind::ArpKw,
|
||||||
|
Bridge => SyntaxKind::BridgeKw,
|
||||||
|
Netdev => SyntaxKind::NetdevKw,
|
||||||
|
Tcp => SyntaxKind::TcpKw,
|
||||||
|
Udp => SyntaxKind::UdpKw,
|
||||||
|
Icmp => SyntaxKind::IcmpKw,
|
||||||
|
Icmpv6 => SyntaxKind::Icmpv6Kw,
|
||||||
|
|
||||||
TokenKind::Ip => SyntaxKind::IpKw,
|
// Match keywords
|
||||||
TokenKind::Ip6 => SyntaxKind::Ip6Kw,
|
Sport => SyntaxKind::SportKw,
|
||||||
TokenKind::Inet => SyntaxKind::InetKw,
|
Dport => SyntaxKind::DportKw,
|
||||||
TokenKind::Arp => SyntaxKind::ArpKw,
|
Saddr => SyntaxKind::SaddrKw,
|
||||||
TokenKind::Bridge => SyntaxKind::BridgeKw,
|
Daddr => SyntaxKind::DaddrKw,
|
||||||
TokenKind::Netdev => SyntaxKind::NetdevKw,
|
Protocol => SyntaxKind::ProtocolKw,
|
||||||
TokenKind::Tcp => SyntaxKind::TcpKw,
|
Nexthdr => SyntaxKind::NexthdrKw,
|
||||||
TokenKind::Udp => SyntaxKind::UdpKw,
|
Type => SyntaxKind::TypeKw,
|
||||||
TokenKind::Icmp => SyntaxKind::IcmpKw,
|
Hook => SyntaxKind::HookKw,
|
||||||
TokenKind::Icmpv6 => SyntaxKind::Icmpv6Kw,
|
Priority => SyntaxKind::PriorityKw,
|
||||||
|
Policy => SyntaxKind::PolicyKw,
|
||||||
|
Iifname => SyntaxKind::IifnameKw,
|
||||||
|
Oifname => SyntaxKind::OifnameKw,
|
||||||
|
Ct => SyntaxKind::CtKw,
|
||||||
|
State => SyntaxKind::StateKw,
|
||||||
|
|
||||||
TokenKind::Sport => SyntaxKind::SportKw,
|
// Actions
|
||||||
TokenKind::Dport => SyntaxKind::DportKw,
|
Accept => SyntaxKind::AcceptKw,
|
||||||
TokenKind::Saddr => SyntaxKind::SaddrKw,
|
Drop => SyntaxKind::DropKw,
|
||||||
TokenKind::Daddr => SyntaxKind::DaddrKw,
|
Reject => SyntaxKind::RejectKw,
|
||||||
TokenKind::Protocol => SyntaxKind::ProtocolKw,
|
Return => SyntaxKind::ReturnKw,
|
||||||
TokenKind::Nexthdr => SyntaxKind::NexthdrKw,
|
Jump => SyntaxKind::JumpKw,
|
||||||
TokenKind::Type => SyntaxKind::TypeKw,
|
Goto => SyntaxKind::GotoKw,
|
||||||
TokenKind::Hook => SyntaxKind::HookKw,
|
Continue => SyntaxKind::ContinueKw,
|
||||||
TokenKind::Priority => SyntaxKind::PriorityKw,
|
Log => SyntaxKind::LogKw,
|
||||||
TokenKind::Policy => SyntaxKind::PolicyKw,
|
Comment => SyntaxKind::CommentKw,
|
||||||
TokenKind::Iifname => SyntaxKind::IifnameKw,
|
|
||||||
TokenKind::Oifname => SyntaxKind::OifnameKw,
|
|
||||||
TokenKind::Ct => SyntaxKind::CtKw,
|
|
||||||
TokenKind::State => SyntaxKind::StateKw,
|
|
||||||
|
|
||||||
TokenKind::Accept => SyntaxKind::AcceptKw,
|
// States
|
||||||
TokenKind::Drop => SyntaxKind::DropKw,
|
Established => SyntaxKind::EstablishedKw,
|
||||||
TokenKind::Reject => SyntaxKind::RejectKw,
|
Related => SyntaxKind::RelatedKw,
|
||||||
TokenKind::Return => SyntaxKind::ReturnKw,
|
New => SyntaxKind::NewKw,
|
||||||
TokenKind::Jump => SyntaxKind::JumpKw,
|
Invalid => SyntaxKind::InvalidKw,
|
||||||
TokenKind::Goto => SyntaxKind::GotoKw,
|
|
||||||
TokenKind::Continue => SyntaxKind::ContinueKw,
|
|
||||||
TokenKind::Log => SyntaxKind::LogKw,
|
|
||||||
TokenKind::Comment => SyntaxKind::CommentKw,
|
|
||||||
|
|
||||||
TokenKind::Established => SyntaxKind::EstablishedKw,
|
// Additional protocol keywords
|
||||||
TokenKind::Related => SyntaxKind::RelatedKw,
|
Vmap => SyntaxKind::VmapKw,
|
||||||
TokenKind::New => SyntaxKind::NewKw,
|
NdRouterAdvert => SyntaxKind::NdRouterAdvertKw,
|
||||||
TokenKind::Invalid => SyntaxKind::InvalidKw,
|
NdNeighborSolicit => SyntaxKind::NdNeighborSolicitKw,
|
||||||
|
NdNeighborAdvert => SyntaxKind::NdNeighborAdvertKw,
|
||||||
|
EchoRequest => SyntaxKind::EchoRequestKw,
|
||||||
|
DestUnreachable => SyntaxKind::DestUnreachableKw,
|
||||||
|
RouterAdvertisement => SyntaxKind::RouterAdvertisementKw,
|
||||||
|
TimeExceeded => SyntaxKind::TimeExceededKw,
|
||||||
|
ParameterProblem => SyntaxKind::ParameterProblemKw,
|
||||||
|
PacketTooBig => SyntaxKind::PacketTooBigKw,
|
||||||
|
|
||||||
TokenKind::Vmap => SyntaxKind::VmapKw,
|
// Operators - direct mapping
|
||||||
TokenKind::NdRouterAdvert => SyntaxKind::NdRouterAdvertKw,
|
Eq => SyntaxKind::EqOp,
|
||||||
TokenKind::NdNeighborSolicit => SyntaxKind::NdNeighborSolicitKw,
|
Ne => SyntaxKind::NeOp,
|
||||||
TokenKind::NdNeighborAdvert => SyntaxKind::NdNeighborAdvertKw,
|
Le => SyntaxKind::LeOp,
|
||||||
TokenKind::EchoRequest => SyntaxKind::EchoRequestKw,
|
Ge => SyntaxKind::GeOp,
|
||||||
TokenKind::DestUnreachable => SyntaxKind::DestUnreachableKw,
|
Lt => SyntaxKind::LtOp,
|
||||||
TokenKind::RouterAdvertisement => SyntaxKind::RouterAdvertisementKw,
|
Gt => SyntaxKind::GtOp,
|
||||||
TokenKind::TimeExceeded => SyntaxKind::TimeExceededKw,
|
|
||||||
TokenKind::ParameterProblem => SyntaxKind::ParameterProblemKw,
|
|
||||||
TokenKind::PacketTooBig => SyntaxKind::PacketTooBigKw,
|
|
||||||
|
|
||||||
TokenKind::Eq => SyntaxKind::EqOp,
|
// Punctuation - direct mapping
|
||||||
TokenKind::Ne => SyntaxKind::NeOp,
|
LeftBrace => SyntaxKind::LeftBrace,
|
||||||
TokenKind::Le => SyntaxKind::LeOp,
|
RightBrace => SyntaxKind::RightBrace,
|
||||||
TokenKind::Ge => SyntaxKind::GeOp,
|
LeftParen => SyntaxKind::LeftParen,
|
||||||
TokenKind::Lt => SyntaxKind::LtOp,
|
RightParen => SyntaxKind::RightParen,
|
||||||
TokenKind::Gt => SyntaxKind::GtOp,
|
LeftBracket => SyntaxKind::LeftBracket,
|
||||||
|
RightBracket => SyntaxKind::RightBracket,
|
||||||
|
Comma => SyntaxKind::Comma,
|
||||||
|
Semicolon => SyntaxKind::Semicolon,
|
||||||
|
Colon => SyntaxKind::Colon,
|
||||||
|
Assign => SyntaxKind::Assign,
|
||||||
|
Dash => SyntaxKind::Dash,
|
||||||
|
Slash => SyntaxKind::Slash,
|
||||||
|
Dot => SyntaxKind::Dot,
|
||||||
|
|
||||||
TokenKind::LeftBrace => SyntaxKind::LeftBrace,
|
// Literals - map data-carrying variants to their types
|
||||||
TokenKind::RightBrace => SyntaxKind::RightBrace,
|
StringLiteral(_) => SyntaxKind::StringLiteral,
|
||||||
TokenKind::LeftParen => SyntaxKind::LeftParen,
|
NumberLiteral(_) => SyntaxKind::NumberLiteral,
|
||||||
TokenKind::RightParen => SyntaxKind::RightParen,
|
IpAddress(_) => SyntaxKind::IpAddress,
|
||||||
TokenKind::LeftBracket => SyntaxKind::LeftBracket,
|
Ipv6Address(_) => SyntaxKind::Ipv6Address,
|
||||||
TokenKind::RightBracket => SyntaxKind::RightBracket,
|
MacAddress(_) => SyntaxKind::MacAddress,
|
||||||
TokenKind::Comma => SyntaxKind::Comma,
|
Identifier(_) => SyntaxKind::Identifier,
|
||||||
TokenKind::Semicolon => SyntaxKind::Semicolon,
|
|
||||||
TokenKind::Colon => SyntaxKind::Colon,
|
|
||||||
TokenKind::Assign => SyntaxKind::Assign,
|
|
||||||
TokenKind::Dash => SyntaxKind::Dash,
|
|
||||||
TokenKind::Slash => SyntaxKind::Slash,
|
|
||||||
TokenKind::Dot => SyntaxKind::Dot,
|
|
||||||
|
|
||||||
TokenKind::StringLiteral(_) => SyntaxKind::StringLiteral,
|
// Special tokens
|
||||||
TokenKind::NumberLiteral(_) => SyntaxKind::NumberLiteral,
|
Newline => SyntaxKind::Newline,
|
||||||
TokenKind::IpAddress(_) => SyntaxKind::IpAddress,
|
CommentLine(_) => SyntaxKind::Comment,
|
||||||
TokenKind::Ipv6Address(_) => SyntaxKind::Ipv6Address,
|
Shebang(_) => SyntaxKind::Shebang,
|
||||||
TokenKind::MacAddress(_) => SyntaxKind::MacAddress,
|
|
||||||
TokenKind::Identifier(_) => SyntaxKind::Identifier,
|
|
||||||
|
|
||||||
TokenKind::Newline => SyntaxKind::Newline,
|
// Error fallback
|
||||||
TokenKind::CommentLine(_) => SyntaxKind::Comment,
|
Error => SyntaxKind::Error,
|
||||||
TokenKind::Shebang(_) => SyntaxKind::Shebang,
|
|
||||||
|
|
||||||
TokenKind::Error => SyntaxKind::Error,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -324,13 +340,7 @@ impl SyntaxKind {
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn from_raw(raw: RawSyntaxKind) -> Self {
|
pub fn from_raw(raw: RawSyntaxKind) -> Self {
|
||||||
match raw.0 {
|
Self::try_from(raw.0 as u16).unwrap_or(SyntaxKind::Error)
|
||||||
0 => SyntaxKind::Root,
|
|
||||||
1 => SyntaxKind::Table,
|
|
||||||
// ... other variants ...
|
|
||||||
116 => SyntaxKind::PacketTooBigKw,
|
|
||||||
_ => SyntaxKind::Error, // Fallback to Error for invalid values
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -1237,7 +1247,7 @@ mod tests {
|
||||||
let mut lexer = NftablesLexer::new(source);
|
let mut lexer = NftablesLexer::new(source);
|
||||||
let tokens = lexer.tokenize().expect("Tokenization should succeed");
|
let tokens = lexer.tokenize().expect("Tokenization should succeed");
|
||||||
|
|
||||||
// CST is now implemented - test that it works
|
// Test CST construction with basic table syntax
|
||||||
let green_tree = CstBuilder::build_tree(&tokens);
|
let green_tree = CstBuilder::build_tree(&tokens);
|
||||||
|
|
||||||
// Verify the tree was created successfully
|
// Verify the tree was created successfully
|
||||||
|
|
@ -1254,4 +1264,88 @@ mod tests {
|
||||||
let cst_result = CstBuilder::parse_to_cst(&tokens);
|
let cst_result = CstBuilder::parse_to_cst(&tokens);
|
||||||
assert!(cst_result.is_ok());
|
assert!(cst_result.is_ok());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_num_enum_improvements() {
|
||||||
|
// Test that from_raw uses num_enum for conversion
|
||||||
|
// Invalid values fall back to Error variant
|
||||||
|
|
||||||
|
// Test valid conversions
|
||||||
|
assert_eq!(SyntaxKind::from_raw(RawSyntaxKind(0)), SyntaxKind::Root);
|
||||||
|
assert_eq!(SyntaxKind::from_raw(RawSyntaxKind(1)), SyntaxKind::Table);
|
||||||
|
assert_eq!(SyntaxKind::from_raw(RawSyntaxKind(25)), SyntaxKind::TableKw);
|
||||||
|
assert_eq!(SyntaxKind::from_raw(RawSyntaxKind(106)), SyntaxKind::Error);
|
||||||
|
assert_eq!(
|
||||||
|
SyntaxKind::from_raw(RawSyntaxKind(116)),
|
||||||
|
SyntaxKind::PacketTooBigKw
|
||||||
|
);
|
||||||
|
|
||||||
|
// Test invalid values automatically fall back to Error
|
||||||
|
assert_eq!(SyntaxKind::from_raw(RawSyntaxKind(999)), SyntaxKind::Error);
|
||||||
|
assert_eq!(SyntaxKind::from_raw(RawSyntaxKind(1000)), SyntaxKind::Error);
|
||||||
|
|
||||||
|
// Test bidirectional conversion
|
||||||
|
for variant in [
|
||||||
|
SyntaxKind::Root,
|
||||||
|
SyntaxKind::Table,
|
||||||
|
SyntaxKind::TableKw,
|
||||||
|
SyntaxKind::Error,
|
||||||
|
SyntaxKind::PacketTooBigKw,
|
||||||
|
] {
|
||||||
|
let raw = variant.to_raw();
|
||||||
|
let converted_back = SyntaxKind::from_raw(raw);
|
||||||
|
assert_eq!(variant, converted_back);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_token_kind_conversion_improvements() {
|
||||||
|
// Test that From<TokenKind> conversion is complete and correct
|
||||||
|
use crate::lexer::TokenKind;
|
||||||
|
|
||||||
|
// Test keyword mappings
|
||||||
|
assert_eq!(SyntaxKind::from(TokenKind::Table), SyntaxKind::TableKw);
|
||||||
|
assert_eq!(SyntaxKind::from(TokenKind::Chain), SyntaxKind::ChainKw);
|
||||||
|
assert_eq!(SyntaxKind::from(TokenKind::Accept), SyntaxKind::AcceptKw);
|
||||||
|
|
||||||
|
// Test operators
|
||||||
|
assert_eq!(SyntaxKind::from(TokenKind::Eq), SyntaxKind::EqOp);
|
||||||
|
assert_eq!(SyntaxKind::from(TokenKind::Lt), SyntaxKind::LtOp);
|
||||||
|
|
||||||
|
// Test punctuation
|
||||||
|
assert_eq!(
|
||||||
|
SyntaxKind::from(TokenKind::LeftBrace),
|
||||||
|
SyntaxKind::LeftBrace
|
||||||
|
);
|
||||||
|
assert_eq!(
|
||||||
|
SyntaxKind::from(TokenKind::Semicolon),
|
||||||
|
SyntaxKind::Semicolon
|
||||||
|
);
|
||||||
|
|
||||||
|
// Test literals (with data)
|
||||||
|
assert_eq!(
|
||||||
|
SyntaxKind::from(TokenKind::StringLiteral("test".to_string())),
|
||||||
|
SyntaxKind::StringLiteral
|
||||||
|
);
|
||||||
|
assert_eq!(
|
||||||
|
SyntaxKind::from(TokenKind::NumberLiteral(42)),
|
||||||
|
SyntaxKind::NumberLiteral
|
||||||
|
);
|
||||||
|
assert_eq!(
|
||||||
|
SyntaxKind::from(TokenKind::IpAddress("192.168.1.1".to_string())),
|
||||||
|
SyntaxKind::IpAddress
|
||||||
|
);
|
||||||
|
assert_eq!(
|
||||||
|
SyntaxKind::from(TokenKind::Identifier("test".to_string())),
|
||||||
|
SyntaxKind::Identifier
|
||||||
|
);
|
||||||
|
|
||||||
|
// Test special tokens
|
||||||
|
assert_eq!(SyntaxKind::from(TokenKind::Newline), SyntaxKind::Newline);
|
||||||
|
assert_eq!(
|
||||||
|
SyntaxKind::from(TokenKind::CommentLine("# comment".to_string())),
|
||||||
|
SyntaxKind::Comment
|
||||||
|
);
|
||||||
|
assert_eq!(SyntaxKind::from(TokenKind::Error), SyntaxKind::Error);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -607,6 +607,11 @@ impl AnalyzerModule for StyleAnalyzer {
|
||||||
fn analyze(&self, source: &str, config: &DiagnosticConfig) -> Vec<Diagnostic> {
|
fn analyze(&self, source: &str, config: &DiagnosticConfig) -> Vec<Diagnostic> {
|
||||||
let mut diagnostics = Vec::new();
|
let mut diagnostics = Vec::new();
|
||||||
|
|
||||||
|
// Only perform style analysis if enabled
|
||||||
|
if !config.enable_style_warnings {
|
||||||
|
return diagnostics;
|
||||||
|
}
|
||||||
|
|
||||||
// Check for missing shebang
|
// Check for missing shebang
|
||||||
if !source.starts_with("#!") {
|
if !source.starts_with("#!") {
|
||||||
let range = Range::new(Position::new(0, 0), Position::new(0, 0));
|
let range = Range::new(Position::new(0, 0), Position::new(0, 0));
|
||||||
|
|
@ -802,14 +807,29 @@ impl StyleAnalyzer {
|
||||||
pub struct SemanticAnalyzer;
|
pub struct SemanticAnalyzer;
|
||||||
|
|
||||||
impl AnalyzerModule for SemanticAnalyzer {
|
impl AnalyzerModule for SemanticAnalyzer {
|
||||||
fn analyze(&self, source: &str, _config: &DiagnosticConfig) -> Vec<Diagnostic> {
|
fn analyze(&self, source: &str, config: &DiagnosticConfig) -> Vec<Diagnostic> {
|
||||||
let mut diagnostics = Vec::new();
|
let mut diagnostics = Vec::new();
|
||||||
|
|
||||||
// Parse and validate nftables-specific constructs
|
// Always run semantic validation (syntax/semantic errors)
|
||||||
diagnostics.extend(self.validate_table_declarations(source));
|
diagnostics.extend(self.validate_table_declarations(source));
|
||||||
diagnostics.extend(self.validate_chain_declarations(source));
|
diagnostics.extend(self.validate_chain_declarations_semantic(source));
|
||||||
diagnostics.extend(self.validate_cidr_notation(source));
|
diagnostics.extend(self.validate_cidr_notation(source));
|
||||||
|
|
||||||
|
// Best practices checks (only if enabled)
|
||||||
|
if config.enable_best_practices {
|
||||||
|
diagnostics.extend(self.validate_chain_best_practices(source));
|
||||||
diagnostics.extend(self.check_for_redundant_rules(source));
|
diagnostics.extend(self.check_for_redundant_rules(source));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Performance hints (only if enabled)
|
||||||
|
if config.enable_performance_hints {
|
||||||
|
diagnostics.extend(self.check_performance_hints(source));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Security warnings (only if enabled)
|
||||||
|
if config.enable_security_warnings {
|
||||||
|
diagnostics.extend(self.check_security_warnings(source));
|
||||||
|
}
|
||||||
|
|
||||||
diagnostics
|
diagnostics
|
||||||
}
|
}
|
||||||
|
|
@ -880,7 +900,7 @@ impl SemanticAnalyzer {
|
||||||
diagnostics
|
diagnostics
|
||||||
}
|
}
|
||||||
|
|
||||||
fn validate_chain_declarations(&self, source: &str) -> Vec<Diagnostic> {
|
fn validate_chain_declarations_semantic(&self, source: &str) -> Vec<Diagnostic> {
|
||||||
let mut diagnostics = Vec::new();
|
let mut diagnostics = Vec::new();
|
||||||
|
|
||||||
for (line_idx, line) in source.lines().enumerate() {
|
for (line_idx, line) in source.lines().enumerate() {
|
||||||
|
|
@ -888,7 +908,7 @@ impl SemanticAnalyzer {
|
||||||
let trimmed = line.trim();
|
let trimmed = line.trim();
|
||||||
|
|
||||||
if trimmed.starts_with("type ") && trimmed.contains("hook") {
|
if trimmed.starts_with("type ") && trimmed.contains("hook") {
|
||||||
// Validate chain type and hook
|
// Validate chain type and hook (semantic validation)
|
||||||
if let Some(hook_pos) = trimmed.find("hook") {
|
if let Some(hook_pos) = trimmed.find("hook") {
|
||||||
let hook_part = &trimmed[hook_pos..];
|
let hook_part = &trimmed[hook_pos..];
|
||||||
let hook_words: Vec<&str> = hook_part.split_whitespace().collect();
|
let hook_words: Vec<&str> = hook_part.split_whitespace().collect();
|
||||||
|
|
@ -916,8 +936,20 @@ impl SemanticAnalyzer {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Check for missing policy in filter chains
|
diagnostics
|
||||||
|
}
|
||||||
|
|
||||||
|
fn validate_chain_best_practices(&self, source: &str) -> Vec<Diagnostic> {
|
||||||
|
let mut diagnostics = Vec::new();
|
||||||
|
|
||||||
|
for (line_idx, line) in source.lines().enumerate() {
|
||||||
|
let line_num = line_idx as u32;
|
||||||
|
let trimmed = line.trim();
|
||||||
|
|
||||||
|
// Check for missing policy in filter chains (best practice)
|
||||||
if trimmed.contains("type filter") && !trimmed.contains("policy") {
|
if trimmed.contains("type filter") && !trimmed.contains("policy") {
|
||||||
let range = Range::new(
|
let range = Range::new(
|
||||||
Position::new(line_num, 0),
|
Position::new(line_num, 0),
|
||||||
|
|
@ -932,6 +964,81 @@ impl SemanticAnalyzer {
|
||||||
diagnostics.push(diagnostic);
|
diagnostics.push(diagnostic);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
diagnostics
|
||||||
|
}
|
||||||
|
|
||||||
|
fn check_performance_hints(&self, source: &str) -> Vec<Diagnostic> {
|
||||||
|
let mut diagnostics = Vec::new();
|
||||||
|
|
||||||
|
for (line_idx, line) in source.lines().enumerate() {
|
||||||
|
let line_num = line_idx as u32;
|
||||||
|
let trimmed = line.trim();
|
||||||
|
|
||||||
|
// Check for large sets without timeout (performance hint)
|
||||||
|
if trimmed.contains("set ") && trimmed.contains("{") && !trimmed.contains("timeout") {
|
||||||
|
// Simple heuristic: if set definition is long, suggest timeout
|
||||||
|
if trimmed.len() > 100 {
|
||||||
|
let range = Range::new(
|
||||||
|
Position::new(line_num, 0),
|
||||||
|
Position::new(line_num, line.len() as u32),
|
||||||
|
);
|
||||||
|
let diagnostic = Diagnostic::new(
|
||||||
|
range,
|
||||||
|
DiagnosticSeverity::Hint,
|
||||||
|
DiagnosticCode::LargeSetWithoutTimeout,
|
||||||
|
"Consider adding a timeout to large sets for better performance"
|
||||||
|
.to_string(),
|
||||||
|
);
|
||||||
|
diagnostics.push(diagnostic);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check for missing counters (performance hint)
|
||||||
|
if (trimmed.contains(" accept") || trimmed.contains(" drop"))
|
||||||
|
&& !trimmed.contains("counter")
|
||||||
|
{
|
||||||
|
let range = Range::new(
|
||||||
|
Position::new(line_num, 0),
|
||||||
|
Position::new(line_num, line.len() as u32),
|
||||||
|
);
|
||||||
|
let diagnostic = Diagnostic::new(
|
||||||
|
range,
|
||||||
|
DiagnosticSeverity::Hint,
|
||||||
|
DiagnosticCode::MissingCounters,
|
||||||
|
"Consider adding counters to rules for monitoring and debugging".to_string(),
|
||||||
|
);
|
||||||
|
diagnostics.push(diagnostic);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
diagnostics
|
||||||
|
}
|
||||||
|
|
||||||
|
fn check_security_warnings(&self, source: &str) -> Vec<Diagnostic> {
|
||||||
|
let mut diagnostics = Vec::new();
|
||||||
|
|
||||||
|
for (line_idx, line) in source.lines().enumerate() {
|
||||||
|
let line_num = line_idx as u32;
|
||||||
|
let trimmed = line.trim();
|
||||||
|
|
||||||
|
// Check for overly permissive rules (security warning)
|
||||||
|
if trimmed.contains(" accept")
|
||||||
|
&& (trimmed.contains("0.0.0.0/0") || trimmed.contains("::/0"))
|
||||||
|
{
|
||||||
|
let range = Range::new(
|
||||||
|
Position::new(line_num, 0),
|
||||||
|
Position::new(line_num, line.len() as u32),
|
||||||
|
);
|
||||||
|
let diagnostic = Diagnostic::new(
|
||||||
|
range,
|
||||||
|
DiagnosticSeverity::Warning,
|
||||||
|
DiagnosticCode::SecurityRisk,
|
||||||
|
"Rule accepts traffic from anywhere - consider restricting source addresses"
|
||||||
|
.to_string(),
|
||||||
|
);
|
||||||
|
diagnostics.push(diagnostic);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
diagnostics
|
diagnostics
|
||||||
|
|
|
||||||
464
src/main.rs
464
src/main.rs
|
|
@ -34,24 +34,18 @@ enum FormatterError {
|
||||||
message: String,
|
message: String,
|
||||||
suggestion: Option<String>,
|
suggestion: Option<String>,
|
||||||
},
|
},
|
||||||
#[error("Unsupported nftables syntax at line {line}, column {column}: {feature}")]
|
|
||||||
UnsupportedSyntax {
|
|
||||||
line: usize,
|
|
||||||
column: usize,
|
|
||||||
feature: String,
|
|
||||||
suggestion: Option<String>,
|
|
||||||
},
|
|
||||||
#[error("Invalid nftables syntax at line {line}, column {column}: {message}")]
|
|
||||||
InvalidSyntax {
|
|
||||||
line: usize,
|
|
||||||
column: usize,
|
|
||||||
message: String,
|
|
||||||
suggestion: Option<String>,
|
|
||||||
},
|
|
||||||
#[error("IO error: {0}")]
|
#[error("IO error: {0}")]
|
||||||
Io(#[from] io::Error),
|
Io(#[from] io::Error),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Error, Debug)]
|
||||||
|
enum LintError {
|
||||||
|
#[error("Lint errors found in {file_count} file(s)")]
|
||||||
|
DiagnosticErrors { file_count: usize },
|
||||||
|
#[error("File discovery error: {0}")]
|
||||||
|
FileDiscovery(#[from] anyhow::Error),
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(Parser, Debug, Clone)]
|
#[derive(Parser, Debug, Clone)]
|
||||||
#[command(
|
#[command(
|
||||||
name = "nff",
|
name = "nff",
|
||||||
|
|
@ -107,19 +101,19 @@ enum Commands {
|
||||||
json: bool,
|
json: bool,
|
||||||
|
|
||||||
/// Include style warnings in diagnostics
|
/// Include style warnings in diagnostics
|
||||||
#[arg(long, default_value = "true")]
|
#[arg(long, action = clap::ArgAction::Set, default_value = "true")]
|
||||||
style_warnings: bool,
|
style_warnings: bool,
|
||||||
|
|
||||||
/// Include best practice recommendations in diagnostics
|
/// Include best practice recommendations in diagnostics
|
||||||
#[arg(long, default_value = "true")]
|
#[arg(long, action = clap::ArgAction::Set, default_value = "true")]
|
||||||
best_practices: bool,
|
best_practices: bool,
|
||||||
|
|
||||||
/// Include performance hints in diagnostics
|
/// Include performance hints in diagnostics
|
||||||
#[arg(long, default_value = "true")]
|
#[arg(long, action = clap::ArgAction::Set, default_value = "true")]
|
||||||
performance_hints: bool,
|
performance_hints: bool,
|
||||||
|
|
||||||
/// Include security warnings in diagnostics
|
/// Include security warnings in diagnostics
|
||||||
#[arg(long, default_value = "true")]
|
#[arg(long, action = clap::ArgAction::Set, default_value = "true")]
|
||||||
security_warnings: bool,
|
security_warnings: bool,
|
||||||
|
|
||||||
/// Diagnostic modules to run (comma-separated: lexical,syntax,style,semantic)
|
/// Diagnostic modules to run (comma-separated: lexical,syntax,style,semantic)
|
||||||
|
|
@ -289,7 +283,7 @@ fn process_single_file_format(
|
||||||
let mut parser = NftablesParser::new(tokens.clone());
|
let mut parser = NftablesParser::new(tokens.clone());
|
||||||
parser
|
parser
|
||||||
.parse()
|
.parse()
|
||||||
.map_err(|e| analyze_parse_error(&source, &tokens, &e.to_string()))?
|
.map_err(|e| convert_parse_error_to_formatter_error(&e, &source, &tokens))?
|
||||||
};
|
};
|
||||||
|
|
||||||
if debug {
|
if debug {
|
||||||
|
|
@ -351,7 +345,7 @@ fn process_lint_command(
|
||||||
};
|
};
|
||||||
|
|
||||||
let is_multiple_files = files.len() > 1;
|
let is_multiple_files = files.len() > 1;
|
||||||
let mut has_errors = false;
|
let mut error_file_count = 0;
|
||||||
|
|
||||||
for file_path in files {
|
for file_path in files {
|
||||||
if let Err(e) = process_single_file_lint(
|
if let Err(e) = process_single_file_lint(
|
||||||
|
|
@ -366,16 +360,18 @@ fn process_lint_command(
|
||||||
is_multiple_files,
|
is_multiple_files,
|
||||||
) {
|
) {
|
||||||
eprintln!("Error processing {}: {}", file_path, e);
|
eprintln!("Error processing {}: {}", file_path, e);
|
||||||
has_errors = true;
|
error_file_count += 1;
|
||||||
if !is_multiple_files {
|
if !is_multiple_files {
|
||||||
return Err(e);
|
return Err(e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Exit with non-zero code if any file had errors
|
if error_file_count > 0 {
|
||||||
if has_errors {
|
return Err(LintError::DiagnosticErrors {
|
||||||
std::process::exit(1);
|
file_count: error_file_count,
|
||||||
|
}
|
||||||
|
.into());
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
|
|
@ -475,77 +471,203 @@ fn process_single_file_lint(
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Intelligent error analysis to categorize parse errors and provide location information
|
/// Convert parser errors to formatter errors with proper location information
|
||||||
fn analyze_parse_error(source: &str, tokens: &[Token], error: &str) -> FormatterError {
|
fn convert_parse_error_to_formatter_error(
|
||||||
// Convert line/column position from token ranges
|
error: &crate::parser::ParseError,
|
||||||
let lines: Vec<&str> = source.lines().collect();
|
source: &str,
|
||||||
|
tokens: &[Token],
|
||||||
|
) -> FormatterError {
|
||||||
|
use crate::parser::ParseError;
|
||||||
|
|
||||||
// Look for common error patterns and provide specific messages
|
match error {
|
||||||
if error.contains("unexpected token") || error.contains("expected") {
|
ParseError::UnexpectedToken {
|
||||||
// Try to find the problematic token
|
|
||||||
if let Some(error_token) = find_error_token(tokens) {
|
|
||||||
let (line, column) = position_from_range(&error_token.range, source);
|
|
||||||
|
|
||||||
// Analyze the specific token to categorize the error
|
|
||||||
match categorize_syntax_error(&error_token, source, &lines) {
|
|
||||||
ErrorCategory::UnsupportedSyntax {
|
|
||||||
feature,
|
|
||||||
suggestion,
|
|
||||||
} => FormatterError::UnsupportedSyntax {
|
|
||||||
line,
|
line,
|
||||||
column,
|
column,
|
||||||
feature,
|
expected,
|
||||||
suggestion,
|
found,
|
||||||
},
|
|
||||||
ErrorCategory::InvalidSyntax {
|
|
||||||
message,
|
|
||||||
suggestion,
|
|
||||||
} => FormatterError::InvalidSyntax {
|
|
||||||
line,
|
|
||||||
column,
|
|
||||||
message,
|
|
||||||
suggestion,
|
|
||||||
},
|
|
||||||
ErrorCategory::SyntaxError {
|
|
||||||
message,
|
|
||||||
suggestion,
|
|
||||||
} => FormatterError::SyntaxError {
|
} => FormatterError::SyntaxError {
|
||||||
|
line: *line,
|
||||||
|
column: *column,
|
||||||
|
message: format!("Expected {}, found '{}'", expected, found),
|
||||||
|
suggestion: None,
|
||||||
|
},
|
||||||
|
ParseError::MissingToken { expected } => {
|
||||||
|
// Try to find current position from last token
|
||||||
|
let (line, column) = if let Some(last_token) = tokens.last() {
|
||||||
|
position_from_range(&last_token.range, source)
|
||||||
|
} else {
|
||||||
|
(1, 1)
|
||||||
|
};
|
||||||
|
FormatterError::SyntaxError {
|
||||||
line,
|
line,
|
||||||
column,
|
column,
|
||||||
message,
|
message: format!("Missing token: expected {}", expected),
|
||||||
|
suggestion: None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ParseError::InvalidExpression { message } => {
|
||||||
|
// Try to find the current token position
|
||||||
|
let (line, column) = find_current_parse_position(tokens, source);
|
||||||
|
FormatterError::SyntaxError {
|
||||||
|
line,
|
||||||
|
column,
|
||||||
|
message: format!("Invalid expression: {}", message),
|
||||||
|
suggestion: None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ParseError::InvalidStatement { message } => {
|
||||||
|
let (line, column) = find_current_parse_position(tokens, source);
|
||||||
|
FormatterError::SyntaxError {
|
||||||
|
line,
|
||||||
|
column,
|
||||||
|
message: format!("Invalid statement: {}", message),
|
||||||
|
suggestion: None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ParseError::SemanticError { message } => {
|
||||||
|
let (line, column) = find_current_parse_position(tokens, source);
|
||||||
|
FormatterError::SyntaxError {
|
||||||
|
line,
|
||||||
|
column,
|
||||||
|
message: format!("Semantic error: {}", message),
|
||||||
|
suggestion: None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ParseError::LexError(lex_error) => {
|
||||||
|
// Convert lexical errors to formatter errors with location
|
||||||
|
convert_lex_error_to_formatter_error(lex_error, source)
|
||||||
|
}
|
||||||
|
ParseError::AnyhowError(anyhow_error) => {
|
||||||
|
// For anyhow errors, try to extract location from error message and context
|
||||||
|
let error_msg = anyhow_error.to_string();
|
||||||
|
let (line, column) = find_error_location_from_context(&error_msg, tokens, source);
|
||||||
|
let suggestion = generate_suggestion_for_error(&error_msg);
|
||||||
|
|
||||||
|
FormatterError::SyntaxError {
|
||||||
|
line,
|
||||||
|
column,
|
||||||
|
message: error_msg,
|
||||||
suggestion,
|
suggestion,
|
||||||
},
|
|
||||||
}
|
}
|
||||||
} else {
|
|
||||||
// Fallback to generic parse error
|
|
||||||
FormatterError::ParseError(error.to_string())
|
|
||||||
}
|
}
|
||||||
} else {
|
|
||||||
FormatterError::ParseError(error.to_string())
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug)]
|
/// Find the current parsing position from tokens
|
||||||
enum ErrorCategory {
|
fn find_current_parse_position(tokens: &[Token], source: &str) -> (usize, usize) {
|
||||||
UnsupportedSyntax {
|
// Look for the last non-whitespace, non-comment token
|
||||||
feature: String,
|
for token in tokens.iter().rev() {
|
||||||
suggestion: Option<String>,
|
match token.kind {
|
||||||
},
|
TokenKind::Newline | TokenKind::CommentLine(_) => continue,
|
||||||
InvalidSyntax {
|
_ => return position_from_range(&token.range, source),
|
||||||
message: String,
|
}
|
||||||
suggestion: Option<String>,
|
}
|
||||||
},
|
(1, 1) // fallback
|
||||||
SyntaxError {
|
|
||||||
message: String,
|
|
||||||
suggestion: Option<String>,
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Find the first error token in the token stream
|
/// Convert lexical errors to formatter errors
|
||||||
fn find_error_token(tokens: &[Token]) -> Option<&Token> {
|
fn convert_lex_error_to_formatter_error(
|
||||||
tokens
|
lex_error: &crate::lexer::LexError,
|
||||||
.iter()
|
source: &str,
|
||||||
.find(|token| matches!(token.kind, TokenKind::Error))
|
) -> FormatterError {
|
||||||
|
use crate::lexer::LexError;
|
||||||
|
|
||||||
|
match lex_error {
|
||||||
|
LexError::InvalidToken { position, text } => {
|
||||||
|
let (line, column) = offset_to_line_column(*position, source);
|
||||||
|
FormatterError::SyntaxError {
|
||||||
|
line,
|
||||||
|
column,
|
||||||
|
message: format!("Invalid token: '{}'", text),
|
||||||
|
suggestion: None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
LexError::UnterminatedString { position } => {
|
||||||
|
let (line, column) = offset_to_line_column(*position, source);
|
||||||
|
FormatterError::SyntaxError {
|
||||||
|
line,
|
||||||
|
column,
|
||||||
|
message: "Unterminated string literal".to_string(),
|
||||||
|
suggestion: Some("Add closing quote".to_string()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
LexError::InvalidNumber { position, text } => {
|
||||||
|
let (line, column) = offset_to_line_column(*position, source);
|
||||||
|
FormatterError::SyntaxError {
|
||||||
|
line,
|
||||||
|
column,
|
||||||
|
message: format!("Invalid number: '{}'", text),
|
||||||
|
suggestion: Some("Check number format".to_string()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Convert byte offset to line/column position
|
||||||
|
fn offset_to_line_column(offset: usize, source: &str) -> (usize, usize) {
|
||||||
|
let mut line = 1;
|
||||||
|
let mut column = 1;
|
||||||
|
|
||||||
|
for (i, ch) in source.char_indices() {
|
||||||
|
if i >= offset {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
if ch == '\n' {
|
||||||
|
line += 1;
|
||||||
|
column = 1;
|
||||||
|
} else {
|
||||||
|
column += 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
(line, column)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Find error location from context clues in the error message
|
||||||
|
fn find_error_location_from_context(
|
||||||
|
error_msg: &str,
|
||||||
|
tokens: &[Token],
|
||||||
|
source: &str,
|
||||||
|
) -> (usize, usize) {
|
||||||
|
// Look for context clues in the error message
|
||||||
|
if error_msg.contains("Expected string or identifier, got:") {
|
||||||
|
// Find the problematic token mentioned in the error
|
||||||
|
if let Some(bad_token_text) = extract_token_from_error_message(error_msg) {
|
||||||
|
// Find this token in the token stream
|
||||||
|
for token in tokens {
|
||||||
|
if token.text == bad_token_text {
|
||||||
|
return position_from_range(&token.range, source);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fallback to finding last meaningful token
|
||||||
|
find_current_parse_position(tokens, source)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Extract the problematic token from error message
|
||||||
|
fn extract_token_from_error_message(error_msg: &str) -> Option<String> {
|
||||||
|
// Parse messages like "Expected string or identifier, got: {"
|
||||||
|
if let Some(got_part) = error_msg.split("got: ").nth(1) {
|
||||||
|
Some(got_part.trim().to_string())
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Generate helpful suggestions based on error message
|
||||||
|
fn generate_suggestion_for_error(error_msg: &str) -> Option<String> {
|
||||||
|
if error_msg.contains("Expected string or identifier") {
|
||||||
|
Some(
|
||||||
|
"Check if you're missing quotes around a string value or have an unexpected character"
|
||||||
|
.to_string(),
|
||||||
|
)
|
||||||
|
} else if error_msg.contains("Expected") && error_msg.contains("got:") {
|
||||||
|
Some("Check syntax and ensure proper nftables structure".to_string())
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Convert TextRange to line/column position
|
/// Convert TextRange to line/column position
|
||||||
|
|
@ -566,186 +688,6 @@ fn position_from_range(range: &text_size::TextRange, source: &str) -> (usize, us
|
||||||
(1, 1) // fallback
|
(1, 1) // fallback
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Categorize syntax errors based on token content and context
|
|
||||||
fn categorize_syntax_error(token: &Token, source: &str, lines: &[&str]) -> ErrorCategory {
|
|
||||||
let token_text = &token.text;
|
|
||||||
let (line_num, _) = position_from_range(&token.range, source);
|
|
||||||
let line_content = lines.get(line_num.saturating_sub(1)).unwrap_or(&"");
|
|
||||||
|
|
||||||
// Check for unsupported nftables features
|
|
||||||
if is_unsupported_feature(token_text, line_content) {
|
|
||||||
let (feature, suggestion) = classify_unsupported_feature(token_text, line_content);
|
|
||||||
return ErrorCategory::UnsupportedSyntax {
|
|
||||||
feature,
|
|
||||||
suggestion,
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check for invalid but supported syntax
|
|
||||||
if is_invalid_syntax(token_text, line_content) {
|
|
||||||
let (message, suggestion) = classify_invalid_syntax(token_text, line_content);
|
|
||||||
return ErrorCategory::InvalidSyntax {
|
|
||||||
message,
|
|
||||||
suggestion,
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
// Default to syntax error
|
|
||||||
ErrorCategory::SyntaxError {
|
|
||||||
message: format!("Unexpected token '{}'", token_text),
|
|
||||||
suggestion: suggest_correction(token_text, line_content),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Check if the token represents an unsupported nftables feature
|
|
||||||
fn is_unsupported_feature(token_text: &str, line_content: &str) -> bool {
|
|
||||||
// List of advanced nftables features that might not be fully supported yet
|
|
||||||
let unsupported_keywords = [
|
|
||||||
"quota", "limit", "counter", "meter", "socket", "fib", "rt", "ipsec", "tunnel", "comp",
|
|
||||||
"dccp", "sctp", "gre", "esp", "ah", "vlan", "arp", "rateest", "osf", "netdev", "meta",
|
|
||||||
"exthdr", "payload", "lookup", "dynset", "flow", "hash", "jhash", "symhash", "crc32",
|
|
||||||
];
|
|
||||||
|
|
||||||
unsupported_keywords
|
|
||||||
.iter()
|
|
||||||
.any(|&keyword| token_text.contains(keyword) || line_content.contains(keyword))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Check if the syntax is invalid (malformed but within supported features)
|
|
||||||
fn is_invalid_syntax(token_text: &str, line_content: &str) -> bool {
|
|
||||||
// Check for common syntax mistakes
|
|
||||||
if token_text.contains("..") || token_text.contains("::") {
|
|
||||||
return true; // Double operators usually indicate mistakes
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check for malformed addresses or ranges
|
|
||||||
if token_text.contains("/") && !is_valid_cidr(token_text) {
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check for malformed brackets/braces
|
|
||||||
let open_braces = line_content.matches('{').count();
|
|
||||||
let close_braces = line_content.matches('}').count();
|
|
||||||
if open_braces != close_braces {
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
false
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Classify unsupported feature and provide suggestion
|
|
||||||
fn classify_unsupported_feature(token_text: &str, line_content: &str) -> (String, Option<String>) {
|
|
||||||
let feature = if token_text.contains("quota") {
|
|
||||||
(
|
|
||||||
"quota management".to_string(),
|
|
||||||
Some("Use explicit rule counting instead".to_string()),
|
|
||||||
)
|
|
||||||
} else if token_text.contains("limit") {
|
|
||||||
(
|
|
||||||
"rate limiting".to_string(),
|
|
||||||
Some("Consider using simpler rule-based rate limiting".to_string()),
|
|
||||||
)
|
|
||||||
} else if token_text.contains("counter") {
|
|
||||||
(
|
|
||||||
"packet counters".to_string(),
|
|
||||||
Some("Use rule-level statistics instead".to_string()),
|
|
||||||
)
|
|
||||||
} else if line_content.contains("meta") {
|
|
||||||
(
|
|
||||||
"meta expressions".to_string(),
|
|
||||||
Some("Use explicit protocol matching instead".to_string()),
|
|
||||||
)
|
|
||||||
} else {
|
|
||||||
(format!("advanced feature '{}'", token_text), None)
|
|
||||||
};
|
|
||||||
|
|
||||||
feature
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Classify invalid syntax and provide suggestion
|
|
||||||
fn classify_invalid_syntax(token_text: &str, line_content: &str) -> (String, Option<String>) {
|
|
||||||
if token_text.contains("/") && !is_valid_cidr(token_text) {
|
|
||||||
return (
|
|
||||||
"Invalid CIDR notation".to_string(),
|
|
||||||
Some("Use format like '192.168.1.0/24' or '::1/128'".to_string()),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
if token_text.contains("..") {
|
|
||||||
return (
|
|
||||||
"Invalid range operator".to_string(),
|
|
||||||
Some("Use '-' for ranges like '1000-2000'".to_string()),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
if line_content.contains('{') && !line_content.contains('}') {
|
|
||||||
return (
|
|
||||||
"Unmatched opening brace".to_string(),
|
|
||||||
Some("Ensure all '{' have matching '}'".to_string()),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
(
|
|
||||||
format!("Malformed token '{}'", token_text),
|
|
||||||
Some("Check nftables syntax documentation".to_string()),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Suggest correction for common typos
|
|
||||||
fn suggest_correction(token_text: &str, line_content: &str) -> Option<String> {
|
|
||||||
// Common typos and their corrections
|
|
||||||
let corrections = [
|
|
||||||
("tabel", "table"),
|
|
||||||
("cahin", "chain"),
|
|
||||||
("accpet", "accept"),
|
|
||||||
("rejct", "reject"),
|
|
||||||
("prtocol", "protocol"),
|
|
||||||
("addres", "address"),
|
|
||||||
("pririty", "priority"),
|
|
||||||
("poicy", "policy"),
|
|
||||||
];
|
|
||||||
|
|
||||||
for (typo, correction) in &corrections {
|
|
||||||
if token_text.contains(typo) {
|
|
||||||
return Some(format!("Did you mean '{}'?", correction));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Context-based suggestions
|
|
||||||
if line_content.contains("type") && line_content.contains("hook") {
|
|
||||||
if !line_content.contains("filter")
|
|
||||||
&& !line_content.contains("nat")
|
|
||||||
&& !line_content.contains("route")
|
|
||||||
{
|
|
||||||
return Some("Chain type should be 'filter', 'nat', or 'route'".to_string());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
None
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Validate CIDR notation
|
|
||||||
fn is_valid_cidr(text: &str) -> bool {
|
|
||||||
if let Some(slash_pos) = text.find('/') {
|
|
||||||
let (addr, prefix) = text.split_at(slash_pos);
|
|
||||||
let prefix = &prefix[1..]; // Remove the '/'
|
|
||||||
|
|
||||||
// Check if prefix is a valid number
|
|
||||||
if let Ok(prefix_len) = prefix.parse::<u8>() {
|
|
||||||
// Basic validation - IPv4 should be <= 32, IPv6 <= 128
|
|
||||||
if addr.contains(':') {
|
|
||||||
prefix_len <= 128 // IPv6
|
|
||||||
} else {
|
|
||||||
prefix_len <= 32 // IPv4
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
false
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn main() -> Result<()> {
|
fn main() -> Result<()> {
|
||||||
let args = Args::parse();
|
let args = Args::parse();
|
||||||
|
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue