Increase Ollama timeout to 180s and add num_ctx
This commit is contained in:
@@ -24,12 +24,14 @@ def get_llm_response(prompt: str, provider: str, cfg: dict) -> str:
|
||||
|
||||
elif provider == "ollama":
|
||||
ollama_host = os.getenv("OLLAMA_HOST", "http://ollama:11434")
|
||||
model = os.getenv("OLLAMA_MODEL", "qwen2.5-coder:1.5b")
|
||||
try:
|
||||
res = requests.post(f"{ollama_host}/api/generate", json={
|
||||
"model": os.getenv("OLLAMA_MODEL", "qwen2.5-coder:1.5b"),
|
||||
"model": model,
|
||||
"prompt": prompt,
|
||||
"stream": False
|
||||
}, timeout=30)
|
||||
"stream": False,
|
||||
"options": {"num_ctx": 4096}
|
||||
}, timeout=180)
|
||||
if res.status_code == 200:
|
||||
return res.json().get("response", "")
|
||||
return f"Erro Ollama (Status {res.status_code}): {res.text}"
|
||||
|
||||
Reference in New Issue
Block a user