🚀 Auto-deploy: BotVPS atualizado em 16/04/2026 17:46:09
This commit is contained in:
@@ -15,7 +15,7 @@ async def get_llm_response_async(prompt: str, provider: str, cfg: dict) -> str:
|
||||
elif provider == "ollama":
|
||||
model = os.getenv("OLLAMA_MODEL", "llama3.2:1b")
|
||||
else:
|
||||
model = cfg.get("model") or "gemini-2.5-flash"
|
||||
model = cfg.get("model") or "qwen/qwen-2.5-72b-instruct"
|
||||
|
||||
return await call_llm(provider, model, prompt)
|
||||
|
||||
|
||||
Reference in New Issue
Block a user