From 2cc4ed0d18ad98df5ef78b1aafbf328bec75d0ee Mon Sep 17 00:00:00 2001 From: Marcos Date: Sun, 22 Mar 2026 16:40:27 -0300 Subject: [PATCH] Fix Ollama endpoint: use http://ollama:11434 --- ai_agent.py | 2 +- llm_providers.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/ai_agent.py b/ai_agent.py index ae0d76f..cbb7f71 100644 --- a/ai_agent.py +++ b/ai_agent.py @@ -23,7 +23,7 @@ def get_llm_response(prompt: str, provider: str, cfg: dict) -> str: return f"Erro de Conexão Gemini: {str(e)}" elif provider == "ollama": - ollama_host = os.getenv("OLLAMA_HOST", "http://ollama-lw4s8g4gc8gss4gkc4gg0wk4:11434") + ollama_host = os.getenv("OLLAMA_HOST", "http://ollama:11434") try: res = requests.post(f"{ollama_host}/api/generate", json={ "model": os.getenv("OLLAMA_MODEL", "qwen2.5-coder:1.5b"), diff --git a/llm_providers.py b/llm_providers.py index e0e0e7c..a1fb928 100644 --- a/llm_providers.py +++ b/llm_providers.py @@ -37,7 +37,7 @@ LLM_PROVIDERS = { "ollama": { "name": "Ollama (Local)", "type": "local", - "endpoint": os.getenv("OLLAMA_HOST", "http://ollama-lw4s8g4gc8gss4gkc4gg0wk4:11434"), + "endpoint": os.getenv("OLLAMA_HOST", "http://ollama:11434"), "models": None, "default": "qwen2.5-coder:1.5b" }