Increase Ollama timeout to 180s and add num_ctx

This commit is contained in:
Marcos
2026-03-22 16:51:21 -03:00
parent 2cc4ed0d18
commit 17dcb9d178
2 changed files with 9 additions and 6 deletions

View File

@@ -24,12 +24,14 @@ def get_llm_response(prompt: str, provider: str, cfg: dict) -> str:
elif provider == "ollama":
ollama_host = os.getenv("OLLAMA_HOST", "http://ollama:11434")
model = os.getenv("OLLAMA_MODEL", "qwen2.5-coder:1.5b")
try:
res = requests.post(f"{ollama_host}/api/generate", json={
"model": os.getenv("OLLAMA_MODEL", "qwen2.5-coder:1.5b"),
"model": model,
"prompt": prompt,
"stream": False
}, timeout=30)
"stream": False,
"options": {"num_ctx": 4096}
}, timeout=180)
if res.status_code == 200:
return res.json().get("response", "")
return f"Erro Ollama (Status {res.status_code}): {res.text}"

View File

@@ -337,20 +337,21 @@ def _call_ollama(model: str, prompt: str, system_prompt: str = None) -> str:
payload = {
"model": model,
"prompt": prompt,
"stream": False
"stream": False,
"options": {"num_ctx": 4096}
}
if system_prompt:
payload["system"] = system_prompt
try:
res = requests.post(f"{endpoint}/api/generate", json=payload, timeout=120)
res = requests.post(f"{endpoint}/api/generate", json=payload, timeout=180)
if res.status_code == 200:
return res.json().get("response", "")
return f"Erro Ollama: {res.status_code} - {res.text}"
except requests.exceptions.Timeout:
return f"[TIMEOUT] Ollama não respondeu em 120s. Verifique se o serviço está rodando em {endpoint}"
return f"[TIMEOUT] Ollama não respondeu em 180s. Verifique se o modelo está carregado em {endpoint}"
except requests.exceptions.ConnectionError:
return f"[CONNECTION ERROR] Não conseguiu conectar ao Ollama em {endpoint}. Verifique se o container Ollama está na mesma rede Docker."
except Exception as e: