🚀 Auto-deploy: BotVPS atualizado em 16/04/2026 17:46:09
This commit is contained in:
@@ -15,7 +15,7 @@ async def get_llm_response_async(prompt: str, provider: str, cfg: dict) -> str:
|
||||
elif provider == "ollama":
|
||||
model = os.getenv("OLLAMA_MODEL", "llama3.2:1b")
|
||||
else:
|
||||
model = cfg.get("model") or "gemini-2.5-flash"
|
||||
model = cfg.get("model") or "qwen/qwen-2.5-72b-instruct"
|
||||
|
||||
return await call_llm(provider, model, prompt)
|
||||
|
||||
|
||||
@@ -72,7 +72,7 @@ def set_planner(provider: str = None, model: str = None) -> dict:
|
||||
}
|
||||
save_config(cfg)
|
||||
|
||||
return cfg["orchestrator"].get("planner", {"provider": "gemini", "model": "gemini-2.5-flash"})
|
||||
return cfg["orchestrator"].get("planner", {"provider": "openrouter", "model": "qwen/qwen-2.5-72b-instruct"})
|
||||
|
||||
def set_executor(provider: str = None, model: str = None) -> dict:
|
||||
"""Define o provider e modelo do executor."""
|
||||
@@ -363,7 +363,7 @@ def check_ollama_connection() -> dict:
|
||||
|
||||
def get_planner_llm() -> tuple:
|
||||
cfg = get_orchestrator_config()
|
||||
planner = cfg.get("planner", {"provider": "gemini", "model": "gemini-2.5-flash"})
|
||||
planner = cfg.get("planner", {"provider": "openrouter", "model": "qwen/qwen-2.5-72b-instruct"})
|
||||
return planner["provider"], planner["model"]
|
||||
|
||||
def get_executor_llm() -> tuple:
|
||||
|
||||
Reference in New Issue
Block a user