diff --git a/modules/plugins/assistant/avante/avante-nvim.nix b/modules/plugins/assistant/avante/avante-nvim.nix index 7d52fab8..8dfa31fc 100644 --- a/modules/plugins/assistant/avante/avante-nvim.nix +++ b/modules/plugins/assistant/avante/avante-nvim.nix @@ -13,28 +13,44 @@ in { description = "The provider used in Aider mode or in the planning phase of Cursor Planning Mode."; }; - vendors = mkOption { + providers = mkOption { type = nullOr attrs; default = null; - description = "Define Your Custom providers."; + description = "Define settings for builtin and custom providers."; example = literalMD '' ```nix - ollama = { - __inherited_from = "openai"; - api_key_name = ""; - endpoint = "http://127.0.0.1:11434/v1"; - model = "qwen2.5u-coder:7b"; - max_tokens = 4096; - disable_tools = true; - }; - ollama_ds = { - __inherited_from = "openai"; - api_key_name = ""; - endpoint = "http://127.0.0.1:11434/v1"; - model = "deepseek-r1u:7b"; - max_tokens = 4096; - disable_tools = true; - }; + openai = { + endpoint = "https://api.openai.com/v1"; + model = "gpt-4o"; # your desired model (or use gpt-4o, etc.) + timeout = 30000; # Timeout in milliseconds, increase this for reasoning models + extra_request_body = { + temperature = 0; + max_completion_tokens = 8192; # Increase this to include reasoning tokens (for reasoning models) + reasoning_effort = "medium"; # low|medium|high, only used for reasoning models + }; + }; + ollama = { + endpoint = "http://127.0.0.1:11434"; + timeout = 30000; # Timeout in milliseconds + extra_request_body = { + options = { + temperature = 0.75; + num_ctx = 20480; + keep_alive = "5m"; + }; + }; + }; + groq = { + __inherited_from = "openai"; + api_key_name = "GROQ_API_KEY"; + endpoint = "https://api.groq.com/openai/v1/"; + model = "llama-3.3-70b-versatile"; + disable_tools = true; + extra_request_body = { + temperature = 1; + max_tokens = 32768; # remember to increase this value, otherwise it will stop generating halfway + }; + }; ``` ''; };