Change default Ollama model from qwen2.5-coder to llama3.2:1b for faster chat

This commit is contained in:
Marcos
2026-03-22 17:25:11 -03:00
parent 17dcb9d178
commit 8002262cf7
2 changed files with 6 additions and 6 deletions

View File

@@ -24,7 +24,7 @@ def get_llm_response(prompt: str, provider: str, cfg: dict) -> str:
elif provider == "ollama":
ollama_host = os.getenv("OLLAMA_HOST", "http://ollama:11434")
model = os.getenv("OLLAMA_MODEL", "qwen2.5-coder:1.5b")
model = os.getenv("OLLAMA_MODEL", "llama3.2:1b")
try:
res = requests.post(f"{ollama_host}/api/generate", json={
"model": model,