🚀 Auto-deploy: BotVPS atualizado em 24/03/2026 10:42:30

This commit is contained in:
2026-03-24 10:42:30 +00:00
parent bd4ce084a1
commit bdd1cedbf7
2 changed files with 69 additions and 2 deletions

70
main.py
View File

@@ -18,6 +18,7 @@ from orchestrator import (
get_llm_config, set_llm_config, format_confirmation_message,
format_completion_message
)
from llm_providers import get_available_models
load_dotenv()
@@ -51,10 +52,77 @@ async def get_system_status(is_auth: bool = Depends(verify_password)):
vm = psutil.virtual_memory()
return {
"cpu": psutil.cpu_percent(),
"ram": {"percent": vm.percent, "used": round(vm.used / (1024**3), 2)},
"ram": {
"percent": vm.percent,
"used": round(vm.used / (1024**3), 2),
"total": round(vm.total / (1024**3), 2)
},
"disk": {"percent": psutil.disk_usage('/').percent}
}
# --- CONFIGURAÇÃO GERAL ---
@app.get("/api/config")
async def read_config(is_auth: bool = Depends(verify_password)):
return get_config()
@app.post("/api/config")
async def update_config(cfg: dict, is_auth: bool = Depends(verify_password)):
save_config(cfg)
return {"status": "success"}
# --- CONFIGURAÇÃO LLM (ORQUESTRADOR) ---
@app.get("/api/llm-config")
async def read_llm_config(is_auth: bool = Depends(verify_password)):
return get_llm_config()
@app.post("/api/llm-config")
async def update_llm_config(cfg: dict, is_auth: bool = Depends(verify_password)):
set_llm_config(
planner_provider=cfg.get("planner_provider"),
planner_model=cfg.get("planner_model"),
executor_provider=cfg.get("executor_provider"),
executor_model=cfg.get("executor_model")
)
return {"status": "success"}
@app.get("/api/llm-models")
async def list_models(is_auth: bool = Depends(verify_password)):
return {"models": get_available_models()}
# --- SYNC & ACTIONS ---
@app.post("/api/sync-credentials")
async def sync_creds(is_auth: bool = Depends(verify_password)):
from credential_manager import sync_credentials
return sync_credentials()
@app.post("/api/sync-from-repo")
async def sync_from_repo(is_auth: bool = Depends(verify_password)):
await fetch_from_gitea_repo_async(force=True)
return {"status": "synced"}
@app.post("/api/action")
async def run_action(data: dict, is_auth: bool = Depends(verify_password)):
action_type = data.get("type")
if action_type == "ping":
return {"status": "success", "message": "Pong! Servidor respondendo."}
if action_type == "restart_bot":
# Simula reinício disparando sinal de término - o docker restart cuidará do resto
os.system("pkill -9 -f bot_logic.py")
return {"status": "success", "message": "Bot reiniciado."}
if action_type == "reboot_vps":
return {"status": "error", "message": "Reboot bloqueado via Web por segurança."}
return {"status": "error", "message": f"Ação {action_type} desconhecida."}
@app.get("/api/test_llm")
async def test_llm_latency(is_auth: bool = Depends(verify_password)):
t0 = time.time()
try:
reply = await query_agent_async("responda apenas 'pong'")
latency = round(time.time() - t0, 2)
return {"status": "success", "latency": latency, "reply": reply}
except Exception as e:
return {"status": "error", "message": str(e)}
# --- CHAT & ORCHESTRATION ---
@app.post("/api/chat")
async def web_chat(message: dict, is_auth: bool = Depends(verify_password)):