🚀 Auto-deploy: BotVPS atualizado em 16/04/2026 17:46:09

This commit is contained in:
2026-04-16 17:46:09 +00:00
parent 8446c27d3b
commit f8c2ffbc79
2 changed files with 3 additions and 3 deletions

View File

@@ -15,7 +15,7 @@ async def get_llm_response_async(prompt: str, provider: str, cfg: dict) -> str:
elif provider == "ollama":
model = os.getenv("OLLAMA_MODEL", "llama3.2:1b")
else:
model = cfg.get("model") or "gemini-2.5-flash"
model = cfg.get("model") or "qwen/qwen-2.5-72b-instruct"
return await call_llm(provider, model, prompt)

View File

@@ -72,7 +72,7 @@ def set_planner(provider: str = None, model: str = None) -> dict:
}
save_config(cfg)
return cfg["orchestrator"].get("planner", {"provider": "gemini", "model": "gemini-2.5-flash"})
return cfg["orchestrator"].get("planner", {"provider": "openrouter", "model": "qwen/qwen-2.5-72b-instruct"})
def set_executor(provider: str = None, model: str = None) -> dict:
"""Define o provider e modelo do executor."""
@@ -363,7 +363,7 @@ def check_ollama_connection() -> dict:
def get_planner_llm() -> tuple:
cfg = get_orchestrator_config()
planner = cfg.get("planner", {"provider": "gemini", "model": "gemini-2.5-flash"})
planner = cfg.get("planner", {"provider": "openrouter", "model": "qwen/qwen-2.5-72b-instruct"})
return planner["provider"], planner["model"]
def get_executor_llm() -> tuple: