🚀 Initial deploy to Gitea with fixes and dashboard enhancements

This commit is contained in:
2026-03-21 19:16:10 +00:00
commit 5e8acefa9a
10 changed files with 1330 additions and 0 deletions

126
main.py Normal file
View File

@@ -0,0 +1,126 @@
import os
import psutil
from fastapi import FastAPI, Request
from fastapi.responses import HTMLResponse, JSONResponse
from fastapi.staticfiles import StaticFiles
from fastapi.templating import Jinja2Templates
from dotenv import load_dotenv
from ai_agent import query_agent
# Carrega as variáveis do .env
load_dotenv()
app = FastAPI(title="VpsTelegramBot API")
# Configura templates HTML
# Certifique-se de que a pasta 'templates' existe e tem o index.html
templates = Jinja2Templates(directory="templates")
@app.get("/favicon.ico", include_in_schema=False)
async def favicon():
return JSONResponse(content={"status": "ok"})
@app.get("/", response_class=HTMLResponse)
async def read_root(request: Request):
"""Renderiza o Dashboard Web."""
return templates.TemplateResponse("index.html", {"request": request})
from starlette.concurrency import run_in_threadpool
@app.get("/api/status")
async def get_system_status():
"""Retorna o status do sistema (CPU, RAM, Disco) sem travar o loop."""
def get_stats():
cpu_percent = psutil.cpu_percent(interval=0.1)
vm = psutil.virtual_memory()
disk = psutil.disk_usage('/')
return {
"cpu": cpu_percent,
"ram": {
"total": round(vm.total / (1024**3), 2),
"used": round(vm.used / (1024**3), 2),
"percent": vm.percent
},
"disk": {
"total": round(disk.total / (1024**3), 2),
"used": round(disk.used / (1024**3), 2),
"percent": disk.percent
}
}
data = await run_in_threadpool(get_stats)
return JSONResponse(content=data)
import subprocess
from config import get_config, save_config
@app.get("/api/config")
async def read_configuration():
return JSONResponse(content=get_config())
@app.post("/api/config")
async def update_configuration(req: dict):
save_config(req)
return JSONResponse(content={"status": "success"})
@app.post("/api/action")
async def execute_smart_action(action: dict):
"""Executa ações predefinidas no servidor (Smart Actions da Web UI)."""
action_type = action.get("type")
if action_type == "ping":
return JSONResponse(content={"status": "success", "message": "Pong! Servidor online e responsivo."})
elif action_type == "restart_bot":
# Dá um pequeno delay e depois reinicia o próprio container a partir de fora (pelo host docker)
subprocess.Popen("sleep 1 && docker restart vps-ai-agent", shell=True)
return JSONResponse(content={"status": "success", "message": "Reboot do Agente autorizado. Estará de volta em instantes!"})
elif action_type == "clear_cache":
# Roda um docker prune para deletar volumes perdidos e todos containers parados (limpeza profunda)
subprocess.Popen("docker system prune -af --volumes", shell=True)
return JSONResponse(content={"status": "success", "message": "Limpando caches obsoletos em background! Verifique o gráfico de disco em instantes."})
elif action_type == "reboot_vps":
# Hacker trick: Roda um container hiper-privilegiado descartável pra entrar no espaço do host (PID 1) e emitir comando de REBOOT físico
subprocess.Popen("sleep 2 && docker run --rm --privileged --pid=host alpine nsenter -t 1 -m -u -n -i reboot", shell=True)
return JSONResponse(content={"status": "success", "message": "🚨 O REBOOT CRÍTICO COMEÇOU. A VPS inteira desligará e religará agora."})
return JSONResponse(content={"status": "error", "message": "Ação desconhecida."}, status_code=400)
@app.post("/api/chat")
async def web_chat(message: dict):
"""Endpoint para interagir com a IA via Web UI."""
user_text = message.get("text", "")
if not user_text:
return JSONResponse(content={"reply": "Por favor, digite um comando válido."})
# Executa a IA em uma thread separada para não travar a UI/API de status
reply = await run_in_threadpool(query_agent, prompt=user_text)
return JSONResponse(content={"reply": reply})
import time
@app.get("/api/test_llm")
async def test_llm_speed():
"""Mede a velocidade de resposta da IA ativa."""
start_time = time.time()
try:
reply = await run_in_threadpool(query_agent, prompt="responda apenas com a palavra 'pong'")
latency = round(time.time() - start_time, 2)
return JSONResponse(content={"status": "success", "latency": latency, "reply": reply})
except Exception as e:
return JSONResponse(content={"status": "error", "message": str(e)}, status_code=500)
@app.post("/webhook")
async def telegram_webhook(request: Request):
"""Recebe as atualizações (mensagens) do Telegram."""
update = await request.json()
# O bot_logic.py lidará com o 'update' no futuro
print("Update recebido do Telegram:", update)
return {"ok": True}
if __name__ == "__main__":
import uvicorn
# Executa o servidor na porta 8000 acessível de qualquer lugar na rede
uvicorn.run("main:app", host="0.0.0.0", port=8000, reload=True)