pinakes-server: consolidate helpers for the tests

Signed-off-by: NotAShelf <raf@notashelf.dev>
Change-Id: Ifbc07ced09014391bc264a36be27dc8c6a6a6964
This commit is contained in:
raf 2026-03-20 00:31:36 +03:00
commit 2f43279dd7
Signed by: NotAShelf
GPG key ID: 29D95B64378DB4BF
3 changed files with 343 additions and 413 deletions

View file

@ -1,314 +1,12 @@
use std::{net::SocketAddr, sync::Arc}; mod common;
use axum::{ use axum::{
body::Body, body::Body,
extract::ConnectInfo,
http::{Request, StatusCode}, http::{Request, StatusCode},
}; };
use common::*;
use http_body_util::BodyExt; use http_body_util::BodyExt;
use pinakes_core::{
cache::CacheLayer,
config::{
AccountsConfig,
AnalyticsConfig,
CloudConfig,
Config,
DirectoryConfig,
EnrichmentConfig,
JobsConfig,
ManagedStorageConfig,
PhotoConfig,
PluginsConfig,
RateLimitConfig,
ScanningConfig,
ServerConfig,
SharingConfig,
SqliteConfig,
StorageBackendType,
StorageConfig,
SyncConfig,
ThumbnailConfig,
TlsConfig,
TranscodingConfig,
TrashConfig,
UiConfig,
UserAccount,
UserRole,
WebhookConfig,
},
jobs::JobQueue,
storage::{StorageBackend, sqlite::SqliteBackend},
};
use tokio::sync::RwLock;
use tower::ServiceExt; use tower::ServiceExt;
/// Fake socket address for tests (governor needs `ConnectInfo`<SocketAddr>)
fn test_addr() -> ConnectInfo<SocketAddr> {
ConnectInfo("127.0.0.1:9999".parse().unwrap())
}
/// Build a GET request with `ConnectInfo` for rate limiter compatibility
fn get(uri: &str) -> Request<Body> {
let mut req = Request::builder().uri(uri).body(Body::empty()).unwrap();
req.extensions_mut().insert(test_addr());
req
}
/// Build a POST request with `ConnectInfo`
fn post_json(uri: &str, body: &str) -> Request<Body> {
let mut req = Request::builder()
.method("POST")
.uri(uri)
.header("content-type", "application/json")
.body(Body::from(body.to_string()))
.unwrap();
req.extensions_mut().insert(test_addr());
req
}
/// Build a GET request with Bearer auth
fn get_authed(uri: &str, token: &str) -> Request<Body> {
let mut req = Request::builder()
.uri(uri)
.header("authorization", format!("Bearer {token}"))
.body(Body::empty())
.unwrap();
req.extensions_mut().insert(test_addr());
req
}
/// Build a POST JSON request with Bearer auth
fn post_json_authed(uri: &str, body: &str, token: &str) -> Request<Body> {
let mut req = Request::builder()
.method("POST")
.uri(uri)
.header("content-type", "application/json")
.header("authorization", format!("Bearer {token}"))
.body(Body::from(body.to_string()))
.unwrap();
req.extensions_mut().insert(test_addr());
req
}
/// Build a DELETE request with Bearer auth
fn delete_authed(uri: &str, token: &str) -> Request<Body> {
let mut req = Request::builder()
.method("DELETE")
.uri(uri)
.header("authorization", format!("Bearer {token}"))
.body(Body::empty())
.unwrap();
req.extensions_mut().insert(test_addr());
req
}
/// Build a PATCH JSON request with Bearer auth
fn patch_json_authed(uri: &str, body: &str, token: &str) -> Request<Body> {
let mut req = Request::builder()
.method("PATCH")
.uri(uri)
.header("content-type", "application/json")
.header("authorization", format!("Bearer {token}"))
.body(Body::from(body.to_string()))
.unwrap();
req.extensions_mut().insert(test_addr());
req
}
fn default_config() -> Config {
Config {
storage: StorageConfig {
backend: StorageBackendType::Sqlite,
sqlite: Some(SqliteConfig {
path: ":memory:".into(),
}),
postgres: None,
},
directories: DirectoryConfig { roots: vec![] },
scanning: ScanningConfig {
watch: false,
poll_interval_secs: 300,
ignore_patterns: vec![],
import_concurrency: 8,
},
server: ServerConfig {
host: "127.0.0.1".to_string(),
port: 3000,
api_key: None,
tls: TlsConfig::default(),
authentication_disabled: true,
cors_enabled: false,
cors_origins: vec![],
},
rate_limits: RateLimitConfig::default(),
ui: UiConfig::default(),
accounts: AccountsConfig::default(),
jobs: JobsConfig::default(),
thumbnails: ThumbnailConfig::default(),
webhooks: Vec::<WebhookConfig>::new(),
scheduled_tasks: vec![],
plugins: PluginsConfig::default(),
transcoding: TranscodingConfig::default(),
enrichment: EnrichmentConfig::default(),
cloud: CloudConfig::default(),
analytics: AnalyticsConfig::default(),
photos: PhotoConfig::default(),
managed_storage: ManagedStorageConfig::default(),
sync: SyncConfig::default(),
sharing: SharingConfig::default(),
trash: TrashConfig::default(),
}
}
async fn setup_app() -> axum::Router {
let backend = SqliteBackend::in_memory().expect("in-memory SQLite");
backend.run_migrations().await.expect("migrations");
let storage = Arc::new(backend) as pinakes_core::storage::DynStorageBackend;
let config = default_config();
let job_queue =
JobQueue::new(1, 0, |_id, _kind, _cancel, _jobs| tokio::spawn(async {}));
let config = Arc::new(RwLock::new(config));
let scheduler = pinakes_core::scheduler::TaskScheduler::new(
job_queue.clone(),
tokio_util::sync::CancellationToken::new(),
config.clone(),
None,
);
let state = pinakes_server::state::AppState {
storage,
config,
config_path: None,
scan_progress: pinakes_core::scan::ScanProgress::new(),
job_queue,
cache: Arc::new(CacheLayer::new(60)),
scheduler: Arc::new(scheduler),
plugin_manager: None,
plugin_pipeline: None,
transcode_service: None,
managed_storage: None,
chunked_upload_manager: None,
session_semaphore: Arc::new(tokio::sync::Semaphore::new(64)),
webhook_dispatcher: None,
};
pinakes_server::app::create_router(state, &RateLimitConfig::default())
}
/// Hash a password for test user accounts
fn hash_password(password: &str) -> String {
pinakes_core::users::auth::hash_password(password).unwrap()
}
/// Set up an app with accounts enabled and three pre-seeded users.
/// Returns (Router, `admin_token`, `editor_token`, `viewer_token`).
async fn setup_app_with_auth() -> (axum::Router, String, String, String) {
let backend = SqliteBackend::in_memory().expect("in-memory SQLite");
backend.run_migrations().await.expect("migrations");
let storage = Arc::new(backend) as pinakes_core::storage::DynStorageBackend;
// Create users in database so resolve_user_id works
let users_to_create = vec![
("admin", "adminpass", UserRole::Admin),
("editor", "editorpass", UserRole::Editor),
("viewer", "viewerpass", UserRole::Viewer),
];
for (username, password, role) in &users_to_create {
let password_hash = hash_password(password);
storage
.create_user(username, &password_hash, *role, None)
.await
.expect("create user");
}
let mut config = default_config();
config.server.authentication_disabled = false; // Enable authentication for these tests
config.accounts.enabled = true;
config.accounts.users = vec![
UserAccount {
username: "admin".to_string(),
password_hash: hash_password("adminpass"),
role: UserRole::Admin,
},
UserAccount {
username: "editor".to_string(),
password_hash: hash_password("editorpass"),
role: UserRole::Editor,
},
UserAccount {
username: "viewer".to_string(),
password_hash: hash_password("viewerpass"),
role: UserRole::Viewer,
},
];
let job_queue =
JobQueue::new(1, 0, |_id, _kind, _cancel, _jobs| tokio::spawn(async {}));
let config = Arc::new(RwLock::new(config));
let scheduler = pinakes_core::scheduler::TaskScheduler::new(
job_queue.clone(),
tokio_util::sync::CancellationToken::new(),
config.clone(),
None,
);
let state = pinakes_server::state::AppState {
storage,
config,
config_path: None,
scan_progress: pinakes_core::scan::ScanProgress::new(),
job_queue,
cache: Arc::new(CacheLayer::new(60)),
scheduler: Arc::new(scheduler),
plugin_manager: None,
plugin_pipeline: None,
transcode_service: None,
managed_storage: None,
chunked_upload_manager: None,
session_semaphore: Arc::new(tokio::sync::Semaphore::new(64)),
webhook_dispatcher: None,
};
let app =
pinakes_server::app::create_router(state, &RateLimitConfig::default());
// Login each user to get tokens
let admin_token = login_user(app.clone(), "admin", "adminpass").await;
let editor_token = login_user(app.clone(), "editor", "editorpass").await;
let viewer_token = login_user(app.clone(), "viewer", "viewerpass").await;
(app, admin_token, editor_token, viewer_token)
}
async fn login_user(
app: axum::Router,
username: &str,
password: &str,
) -> String {
let body = format!(r#"{{"username":"{username}","password":"{password}"}}"#);
let response = app
.oneshot(post_json("/api/v1/auth/login", &body))
.await
.unwrap();
assert_eq!(
response.status(),
StatusCode::OK,
"login failed for user {username}"
);
let body = response.into_body().collect().await.unwrap().to_bytes();
let result: serde_json::Value = serde_json::from_slice(&body).unwrap();
result["token"].as_str().unwrap().to_string()
}
async fn response_body(
response: axum::response::Response,
) -> serde_json::Value {
let body = response.into_body().collect().await.unwrap().to_bytes();
serde_json::from_slice(&body).unwrap_or(serde_json::Value::Null)
}
#[tokio::test] #[tokio::test]
async fn test_list_media_empty() { async fn test_list_media_empty() {
let app = setup_app().await; let app = setup_app().await;

View file

@ -0,0 +1,323 @@
use std::{net::SocketAddr, sync::Arc};
use axum::{
body::Body,
extract::ConnectInfo,
http::{Request, StatusCode},
};
use http_body_util::BodyExt;
use pinakes_core::{
cache::CacheLayer,
config::{
AccountsConfig,
AnalyticsConfig,
CloudConfig,
Config,
DirectoryConfig,
EnrichmentConfig,
JobsConfig,
ManagedStorageConfig,
PhotoConfig,
PluginsConfig,
RateLimitConfig,
ScanningConfig,
ServerConfig,
SharingConfig,
SqliteConfig,
StorageBackendType,
StorageConfig,
SyncConfig,
ThumbnailConfig,
TlsConfig,
TranscodingConfig,
TrashConfig,
UiConfig,
UserAccount,
UserRole,
WebhookConfig,
},
jobs::JobQueue,
storage::{StorageBackend, sqlite::SqliteBackend},
};
use tokio::sync::RwLock;
use tower::ServiceExt;
/// Fake socket address for tests (governor needs
/// `ConnectInfo<SocketAddr>`)
pub fn test_addr() -> ConnectInfo<SocketAddr> {
ConnectInfo("127.0.0.1:9999".parse().unwrap())
}
/// Build a GET request with `ConnectInfo` for rate limiter
/// compatibility
pub fn get(uri: &str) -> Request<Body> {
let mut req = Request::builder().uri(uri).body(Body::empty()).unwrap();
req.extensions_mut().insert(test_addr());
req
}
/// Build a POST request with `ConnectInfo`
pub fn post_json(uri: &str, body: &str) -> Request<Body> {
let mut req = Request::builder()
.method("POST")
.uri(uri)
.header("content-type", "application/json")
.body(Body::from(body.to_string()))
.unwrap();
req.extensions_mut().insert(test_addr());
req
}
/// Build a GET request with Bearer auth
pub fn get_authed(uri: &str, token: &str) -> Request<Body> {
let mut req = Request::builder()
.uri(uri)
.header("authorization", format!("Bearer {token}"))
.body(Body::empty())
.unwrap();
req.extensions_mut().insert(test_addr());
req
}
/// Build a POST JSON request with Bearer auth
pub fn post_json_authed(uri: &str, body: &str, token: &str) -> Request<Body> {
let mut req = Request::builder()
.method("POST")
.uri(uri)
.header("content-type", "application/json")
.header("authorization", format!("Bearer {token}"))
.body(Body::from(body.to_string()))
.unwrap();
req.extensions_mut().insert(test_addr());
req
}
/// Build a PUT JSON request with Bearer auth
pub fn put_json_authed(uri: &str, body: &str, token: &str) -> Request<Body> {
let mut req = Request::builder()
.method("PUT")
.uri(uri)
.header("content-type", "application/json")
.header("authorization", format!("Bearer {token}"))
.body(Body::from(body.to_string()))
.unwrap();
req.extensions_mut().insert(test_addr());
req
}
/// Build a DELETE request with Bearer auth
pub fn delete_authed(uri: &str, token: &str) -> Request<Body> {
let mut req = Request::builder()
.method("DELETE")
.uri(uri)
.header("authorization", format!("Bearer {token}"))
.body(Body::empty())
.unwrap();
req.extensions_mut().insert(test_addr());
req
}
/// Build a PATCH JSON request with Bearer auth
pub fn patch_json_authed(uri: &str, body: &str, token: &str) -> Request<Body> {
let mut req = Request::builder()
.method("PATCH")
.uri(uri)
.header("content-type", "application/json")
.header("authorization", format!("Bearer {token}"))
.body(Body::from(body.to_string()))
.unwrap();
req.extensions_mut().insert(test_addr());
req
}
pub fn default_config() -> Config {
Config {
storage: StorageConfig {
backend: StorageBackendType::Sqlite,
sqlite: Some(SqliteConfig {
path: ":memory:".into(),
}),
postgres: None,
},
directories: DirectoryConfig { roots: vec![] },
scanning: ScanningConfig {
watch: false,
poll_interval_secs: 300,
ignore_patterns: vec![],
import_concurrency: 8,
},
server: ServerConfig {
host: "127.0.0.1".to_string(),
port: 3000,
api_key: None,
tls: TlsConfig::default(),
authentication_disabled: true,
cors_enabled: false,
cors_origins: vec![],
},
rate_limits: RateLimitConfig::default(),
ui: UiConfig::default(),
accounts: AccountsConfig::default(),
jobs: JobsConfig::default(),
thumbnails: ThumbnailConfig::default(),
webhooks: Vec::<WebhookConfig>::new(),
scheduled_tasks: vec![],
plugins: PluginsConfig::default(),
transcoding: TranscodingConfig::default(),
enrichment: EnrichmentConfig::default(),
cloud: CloudConfig::default(),
analytics: AnalyticsConfig::default(),
photos: PhotoConfig::default(),
managed_storage: ManagedStorageConfig::default(),
sync: SyncConfig::default(),
sharing: SharingConfig::default(),
trash: TrashConfig::default(),
}
}
pub async fn setup_app() -> axum::Router {
let backend = SqliteBackend::in_memory().expect("in-memory SQLite");
backend.run_migrations().await.expect("migrations");
let storage = Arc::new(backend) as pinakes_core::storage::DynStorageBackend;
let config = default_config();
let job_queue =
JobQueue::new(1, 0, |_id, _kind, _cancel, _jobs| tokio::spawn(async {}));
let config = Arc::new(RwLock::new(config));
let scheduler = pinakes_core::scheduler::TaskScheduler::new(
job_queue.clone(),
tokio_util::sync::CancellationToken::new(),
config.clone(),
None,
);
let state = pinakes_server::state::AppState {
storage,
config,
config_path: None,
scan_progress: pinakes_core::scan::ScanProgress::new(),
job_queue,
cache: Arc::new(CacheLayer::new(60)),
scheduler: Arc::new(scheduler),
plugin_manager: None,
plugin_pipeline: None,
transcode_service: None,
managed_storage: None,
chunked_upload_manager: None,
session_semaphore: Arc::new(tokio::sync::Semaphore::new(64)),
webhook_dispatcher: None,
};
pinakes_server::app::create_router(state, &RateLimitConfig::default())
}
/// Hash a password for test user accounts
pub fn hash_password(password: &str) -> String {
pinakes_core::users::auth::hash_password(password).unwrap()
}
/// Set up an app with accounts enabled and three pre-seeded users.
/// Returns (Router, `admin_token`, `editor_token`, `viewer_token`).
pub async fn setup_app_with_auth() -> (axum::Router, String, String, String) {
let backend = SqliteBackend::in_memory().expect("in-memory SQLite");
backend.run_migrations().await.expect("migrations");
let storage = Arc::new(backend) as pinakes_core::storage::DynStorageBackend;
let users_to_create = vec![
("admin", "adminpass", UserRole::Admin),
("editor", "editorpass", UserRole::Editor),
("viewer", "viewerpass", UserRole::Viewer),
];
for (username, password, role) in &users_to_create {
let password_hash = hash_password(password);
storage
.create_user(username, &password_hash, *role, None)
.await
.expect("create user");
}
let mut config = default_config();
config.server.authentication_disabled = false;
config.accounts.enabled = true;
config.accounts.users = vec![
UserAccount {
username: "admin".to_string(),
password_hash: hash_password("adminpass"),
role: UserRole::Admin,
},
UserAccount {
username: "editor".to_string(),
password_hash: hash_password("editorpass"),
role: UserRole::Editor,
},
UserAccount {
username: "viewer".to_string(),
password_hash: hash_password("viewerpass"),
role: UserRole::Viewer,
},
];
let job_queue =
JobQueue::new(1, 0, |_id, _kind, _cancel, _jobs| tokio::spawn(async {}));
let config = Arc::new(RwLock::new(config));
let scheduler = pinakes_core::scheduler::TaskScheduler::new(
job_queue.clone(),
tokio_util::sync::CancellationToken::new(),
config.clone(),
None,
);
let state = pinakes_server::state::AppState {
storage,
config,
config_path: None,
scan_progress: pinakes_core::scan::ScanProgress::new(),
job_queue,
cache: Arc::new(CacheLayer::new(60)),
scheduler: Arc::new(scheduler),
plugin_manager: None,
plugin_pipeline: None,
transcode_service: None,
managed_storage: None,
chunked_upload_manager: None,
session_semaphore: Arc::new(tokio::sync::Semaphore::new(64)),
webhook_dispatcher: None,
};
let app =
pinakes_server::app::create_router(state, &RateLimitConfig::default());
let admin_token = login_user(app.clone(), "admin", "adminpass").await;
let editor_token = login_user(app.clone(), "editor", "editorpass").await;
let viewer_token = login_user(app.clone(), "viewer", "viewerpass").await;
(app, admin_token, editor_token, viewer_token)
}
pub async fn login_user(
app: axum::Router,
username: &str,
password: &str,
) -> String {
let body = format!(r#"{{"username":"{username}","password":"{password}"}}"#);
let response = app
.oneshot(post_json("/api/v1/auth/login", &body))
.await
.unwrap();
assert_eq!(
response.status(),
StatusCode::OK,
"login failed for user {username}"
);
let body = response.into_body().collect().await.unwrap().to_bytes();
let result: serde_json::Value = serde_json::from_slice(&body).unwrap();
result["token"].as_str().unwrap().to_string()
}
pub async fn response_body(
response: axum::response::Response,
) -> serde_json::Value {
let body = response.into_body().collect().await.unwrap().to_bytes();
serde_json::from_slice(&body).unwrap_or(serde_json::Value::Null)
}

View file

@ -1,66 +1,26 @@
use std::{net::SocketAddr, sync::Arc}; mod common;
use std::sync::Arc;
use axum::{ use axum::{body::Body, http::StatusCode};
body::Body, use common::*;
extract::ConnectInfo,
http::{Request, StatusCode},
};
use http_body_util::BodyExt; use http_body_util::BodyExt;
use pinakes_core::{ use pinakes_core::{config::PluginsConfig, plugin::PluginManager};
cache::CacheLayer,
config::{
AccountsConfig,
AnalyticsConfig,
CloudConfig,
Config,
DirectoryConfig,
EnrichmentConfig,
JobsConfig,
ManagedStorageConfig,
PhotoConfig,
PluginsConfig,
RateLimitConfig,
ScanningConfig,
ServerConfig,
SharingConfig,
SqliteConfig,
StorageBackendType,
StorageConfig,
SyncConfig,
ThumbnailConfig,
TlsConfig,
TranscodingConfig,
TrashConfig,
UiConfig,
WebhookConfig,
},
jobs::JobQueue,
plugin::PluginManager,
storage::{StorageBackend, sqlite::SqliteBackend},
};
use tokio::sync::RwLock;
use tower::ServiceExt; use tower::ServiceExt;
/// Fake socket address for tests (governor needs `ConnectInfo`<SocketAddr>)
fn test_addr() -> ConnectInfo<SocketAddr> {
ConnectInfo("127.0.0.1:9999".parse().unwrap())
}
/// Build a GET request with `ConnectInfo` for rate limiter compatibility
fn get(uri: &str) -> Request<Body> {
let mut req = Request::builder().uri(uri).body(Body::empty()).unwrap();
req.extensions_mut().insert(test_addr());
req
}
async fn setup_app_with_plugins() async fn setup_app_with_plugins()
-> (axum::Router, Arc<PluginManager>, tempfile::TempDir) { -> (axum::Router, Arc<PluginManager>, tempfile::TempDir) {
use pinakes_core::{
cache::CacheLayer,
config::RateLimitConfig,
jobs::JobQueue,
storage::{StorageBackend, sqlite::SqliteBackend},
};
use tokio::sync::RwLock;
let backend = SqliteBackend::in_memory().expect("in-memory SQLite"); let backend = SqliteBackend::in_memory().expect("in-memory SQLite");
backend.run_migrations().await.expect("migrations"); backend.run_migrations().await.expect("migrations");
let storage = Arc::new(backend) as pinakes_core::storage::DynStorageBackend; let storage = Arc::new(backend) as pinakes_core::storage::DynStorageBackend;
// Create temp directories for plugin manager (automatically cleaned up when
// TempDir drops)
let temp_dir = tempfile::TempDir::new().expect("create temp dir"); let temp_dir = tempfile::TempDir::new().expect("create temp dir");
let data_dir = temp_dir.path().join("data"); let data_dir = temp_dir.path().join("data");
let cache_dir = temp_dir.path().join("cache"); let cache_dir = temp_dir.path().join("cache");
@ -87,48 +47,8 @@ async fn setup_app_with_plugins()
.expect("create plugin manager"); .expect("create plugin manager");
let plugin_manager = Arc::new(plugin_manager); let plugin_manager = Arc::new(plugin_manager);
let config = Config { let mut config = default_config();
storage: StorageConfig { config.plugins = plugin_config;
backend: StorageBackendType::Sqlite,
sqlite: Some(SqliteConfig {
path: ":memory:".into(),
}),
postgres: None,
},
directories: DirectoryConfig { roots: vec![] },
scanning: ScanningConfig {
watch: false,
poll_interval_secs: 300,
ignore_patterns: vec![],
import_concurrency: 8,
},
server: ServerConfig {
host: "127.0.0.1".to_string(),
port: 3000,
api_key: None,
tls: TlsConfig::default(),
authentication_disabled: true,
cors_enabled: false,
cors_origins: vec![],
},
rate_limits: RateLimitConfig::default(),
ui: UiConfig::default(),
accounts: AccountsConfig::default(),
jobs: JobsConfig::default(),
thumbnails: ThumbnailConfig::default(),
webhooks: Vec::<WebhookConfig>::new(),
scheduled_tasks: vec![],
plugins: plugin_config,
transcoding: TranscodingConfig::default(),
enrichment: EnrichmentConfig::default(),
cloud: CloudConfig::default(),
analytics: AnalyticsConfig::default(),
photos: PhotoConfig::default(),
managed_storage: ManagedStorageConfig::default(),
sync: SyncConfig::default(),
sharing: SharingConfig::default(),
trash: TrashConfig::default(),
};
let job_queue = let job_queue =
JobQueue::new(1, 0, |_id, _kind, _cancel, _jobs| tokio::spawn(async {})); JobQueue::new(1, 0, |_id, _kind, _cancel, _jobs| tokio::spawn(async {}));
@ -178,11 +98,9 @@ async fn test_list_plugins_empty() {
async fn test_plugin_manager_exists() { async fn test_plugin_manager_exists() {
let (app, _pm, _tmp) = setup_app_with_plugins().await; let (app, _pm, _tmp) = setup_app_with_plugins().await;
// Verify plugin manager is accessible
let plugins = _pm.list_plugins().await; let plugins = _pm.list_plugins().await;
assert_eq!(plugins.len(), 0); assert_eq!(plugins.len(), 0);
// Verify API endpoint works
let response = app.oneshot(get("/api/v1/plugins")).await.unwrap(); let response = app.oneshot(get("/api/v1/plugins")).await.unwrap();
assert_eq!(response.status(), StatusCode::OK); assert_eq!(response.status(), StatusCode::OK);
} }
@ -203,14 +121,9 @@ async fn test_plugin_not_found() {
async fn test_plugin_enable_disable() { async fn test_plugin_enable_disable() {
let (app, pm, _tmp) = setup_app_with_plugins().await; let (app, pm, _tmp) = setup_app_with_plugins().await;
// Verify plugin manager is initialized
assert!(pm.list_plugins().await.is_empty()); assert!(pm.list_plugins().await.is_empty());
// For this test, we would need to actually load a plugin first let mut req = axum::http::Request::builder()
// Since we don't have a real WASM plugin loaded, we'll just verify
// the endpoints exist and return appropriate errors
let mut req = Request::builder()
.method("POST") .method("POST")
.uri("/api/v1/plugins/test-plugin/enable") .uri("/api/v1/plugins/test-plugin/enable")
.body(Body::empty()) .body(Body::empty())
@ -219,11 +132,9 @@ async fn test_plugin_enable_disable() {
let response = app.clone().oneshot(req).await.unwrap(); let response = app.clone().oneshot(req).await.unwrap();
// Should be NOT_FOUND since plugin doesn't exist
assert_eq!(response.status(), StatusCode::NOT_FOUND); assert_eq!(response.status(), StatusCode::NOT_FOUND);
// Test disable endpoint let mut req = axum::http::Request::builder()
let mut req = Request::builder()
.method("POST") .method("POST")
.uri("/api/v1/plugins/test-plugin/disable") .uri("/api/v1/plugins/test-plugin/disable")
.body(Body::empty()) .body(Body::empty())
@ -232,7 +143,6 @@ async fn test_plugin_enable_disable() {
let response = app.oneshot(req).await.unwrap(); let response = app.oneshot(req).await.unwrap();
// Should also be NOT_FOUND
assert_eq!(response.status(), StatusCode::NOT_FOUND); assert_eq!(response.status(), StatusCode::NOT_FOUND);
} }
@ -240,7 +150,7 @@ async fn test_plugin_enable_disable() {
async fn test_plugin_uninstall_not_found() { async fn test_plugin_uninstall_not_found() {
let (app, _pm, _tmp) = setup_app_with_plugins().await; let (app, _pm, _tmp) = setup_app_with_plugins().await;
let mut req = Request::builder() let mut req = axum::http::Request::builder()
.method("DELETE") .method("DELETE")
.uri("/api/v1/plugins/nonexistent") .uri("/api/v1/plugins/nonexistent")
.body(Body::empty()) .body(Body::empty())
@ -249,7 +159,6 @@ async fn test_plugin_uninstall_not_found() {
let response = app.oneshot(req).await.unwrap(); let response = app.oneshot(req).await.unwrap();
// Expect 400 or 404 when plugin doesn't exist
assert!( assert!(
response.status() == StatusCode::BAD_REQUEST response.status() == StatusCode::BAD_REQUEST
|| response.status() == StatusCode::NOT_FOUND || response.status() == StatusCode::NOT_FOUND