🚀 Auto-deploy: BotVPS atualizado em 29/04/2026 00:17:12

This commit is contained in:
2026-04-29 00:17:12 +00:00
parent b3b2fd9157
commit b8a23f6e18

View File

@@ -34,8 +34,8 @@ LLM_PROVIDERS = {
"openrouter": {
"name": "OpenRouter",
"type": "api",
"models": ["inclusionai/ling-2.6-flash:free", "google/gemini-2.0-flash-001", "deepseek/deepseek-chat"],
"default": "inclusionai/ling-2.6-flash:free",
"models": ["qwen/qwen-2.5-72b-instruct", "inclusionai/ling-2.6-flash:free", "google/gemini-2.0-flash-001"],
"default": "qwen/qwen-2.5-72b-instruct",
"endpoint": "https://openrouter.ai/api/v1"
},
"ollama": {
@@ -55,7 +55,7 @@ def get_orchestrator_config() -> dict:
"""Retorna config do orchestrator."""
cfg = get_config()
return cfg.get("orchestrator", {
"planner": {"provider": "openrouter", "model": "inclusionai/ling-2.6-flash:free"},
"planner": {"provider": "openrouter", "model": "qwen/qwen-2.5-72b-instruct"},
"executor": {"provider": "ollama", "model": "llama3.2:1b"}
})
@@ -382,7 +382,19 @@ def get_executor_llm() -> tuple:
async def call_planner_async(prompt: str, system_prompt: str = None) -> str:
provider, model = get_planner_llm()
return await call_llm(provider, model, prompt, system_prompt)
try:
response = await call_llm(provider, model, prompt, system_prompt)
# Se a resposta indicar um erro de API, disparamos o fallback
if response.startswith("Erro OpenRouter"):
raise Exception(response)
return response
except Exception as e:
# Lógica de FALLBACK: Se o Qwen falhar, tenta o Ling-2.6-flash
if provider == "openrouter" and model == "qwen/qwen-2.5-72b-instruct":
backup_model = "inclusionai/ling-2.6-flash:free"
print(f"⚠️ [FALLBACK] Falha no Qwen ({str(e)}). Tentando {backup_model}...")
return await call_llm("openrouter", backup_model, prompt, system_prompt)
return f"Erro Crítico no Planner: {str(e)}"
async def call_executor_async(prompt: str, system_prompt: str = None) -> str:
provider, model = get_executor_llm()