refatoracao

This commit is contained in:
2026-03-23 23:38:56 +00:00
parent 8002262cf7
commit b7e6239216
16 changed files with 2290 additions and 4321 deletions

View File

@@ -1,323 +1,99 @@
import os
import requests
import asyncio
from dotenv import load_dotenv
import re
from telegram import Update
from telegram.ext import Application, CommandHandler, MessageHandler, filters, ContextTypes
from orchestrator import (
handle_message_async, orchestrate_async, format_confirmation_message,
format_completion_message, execute_step_async
)
from ai_agent import query_agent
import speech_recognition as sr
from pydub import AudioSegment
from gtts import gTTS
from orchestrator import handle_message, orchestrate, format_confirmation_message, format_completion_message
load_dotenv()
TOKEN = os.getenv("TELEGRAM_BOT_TOKEN")
ALLOWED_CHAT_ID = os.getenv("TELEGRAM_CHAT_ID")
ELEVENLABS_KEY = os.getenv("ELEVENLABS_API_KEY")
VOICE_ID = os.getenv("ELEVENLABS_VOICE_ID")
def synthesize_audio(text: str) -> str:
"""Gera áudio local/gratuito usando gTTS e retorna o arquivo."""
try:
# Remove caracteres indesejados e emojis que atrapalham a fala
texto_limpo = text.replace("🤖", "").replace("🧑‍🏫", "").replace("*", "").replace("`", "")
texto_limpo = re.sub(r'[*`#]', '', text)
filepath = "/tmp/reply_audio.mp3"
tts = gTTS(text=texto_limpo, lang='pt-br', tld='com.br', slow=False)
tts = gTTS(text=texto_limpo[:500], lang='pt-br', slow=False)
tts.save(filepath)
return filepath
except Exception as e:
print(f"Erro ao gerar voz tts: {e}")
print(f"TTS Error: {e}")
return ""
async def auth_check(update: Update) -> bool:
"""Verifica se o usuário que enviou a mensagem é o Marcos (Chat ID autorizado)."""
if str(update.message.chat_id) != ALLOWED_CHAT_ID:
await update.message.reply_text("Acesso negado. Você não tem permissão para controlar esta VPS.")
if not update.message or str(update.message.chat_id) != ALLOWED_CHAT_ID:
if update.message: await update.message.reply_text("Acesso negado.")
return False
return True
async def start(update: Update, context: ContextTypes.DEFAULT_TYPE):
if not await auth_check(update): return
await update.message.reply_text("👋 Olá, Marcos! Antigravity VPS Agent online e pronto para receber comandos.")
# Memória persistente da conversa (em memória RAM)
chat_histories = {}
async def handle_text(update: Update, context: ContextTypes.DEFAULT_TYPE):
if not await auth_check(update): return
chat_id = update.message.chat_id
user_msg = update.message.text
chat_id = update.message.chat_id
await update.message.reply_chat_action(action="typing")
# =====================================================
# COMANDOS DO ORCHESTRATOR
# =====================================================
if user_msg.startswith('/orchestrate'):
# Força uso do orchestrator
task = user_msg.replace('/orchestrate', '').strip()
result = orchestrate(task, user_confirmed=False)
if result["status"] == "needs_confirmation":
reply = format_confirmation_message(result)
else:
reply = format_completion_message(result)
# 1. COMANDOS DIRETOS
if user_msg.startswith('/') and user_msg.split()[0] in ['/status', '/tools', '/sync']:
reply = await handle_message_async(user_msg)
await update.message.reply_text(reply)
return
if user_msg.startswith('/status') and not user_msg.startswith('/statusall'):
# Status do orchestrator
reply = handle_message('/status')
await update.message.reply_text(reply)
return
if user_msg.startswith('/tools'):
# Lista de ferramentas
reply = handle_message('/tools')
await update.message.reply_text(reply)
return
if user_msg.startswith('/sync'):
# Sync de credenciais
reply = handle_message('/sync')
await update.message.reply_text(reply)
return
if user_msg.lower() in ['sim', 'confirmar', 'confirma', 'sim!', 'confirma!', 's']:
# Verifica se há confirmação pendente (usa chat_id do update atual)
if 'pending_plan' in context.bot_data:
plan = context.bot_data['pending_plan']
del context.bot_data['pending_plan']
# Executa o plano
from orchestrator import execute_step
results = []
print(f"[CONFIRM] Executando plano com {len(plan.get('steps', []))} passos")
for step in plan.get("steps", []):
print(f"[STEP] Executando: {step.get('action')}")
result = execute_step(step)
results.append(result)
print(f"[STEP RESULT] Success: {result.get('success')}, Output: {str(result.get('output'))[:100]}")
# Para em erro crítico
if not result.get("success") and step.get("danger") == "dangerous":
results.append({
"success": False,
"output": "Execucao abortada.",
"step": -1
})
break
# Formata resultado
plan_result = {
"status": "completed",
"plan": plan,
"results": results
}
reply = format_completion_message(plan_result)
await update.message.reply_text(reply)
return
else:
await update.message.reply_text("Nenhuma operacao pendente para confirmar.")
return
if user_msg.lower() in ['nao', 'não', 'cancelar', 'cancela', 'n', 'n!']:
# Cancela confirmação pendente
if 'pending_plan' in context.bot_data:
del context.bot_data['pending_plan']
await update.message.reply_text("Operacao cancelada.")
return
await update.message.reply_text("Nada pendente para cancelar.")
return
# =====================================================
# ORCHESTRATOR: Detecta tarefas de orquestração
# =====================================================
orchestrator_keywords = ['deploy', 'restart', 'restartar', 'reiniciar',
'git pull', 'git push', 'docker', 'container',
'mostra status', 'status dos', 'verificar',
'faz um', 'executa', 'roda', 'rodar',
'atualiza', 'atualizar', 'backup']
is_orchestrator_task = any(kw in user_msg.lower() for kw in orchestrator_keywords)
if is_orchestrator_task:
result = orchestrate(user_msg, user_confirmed=False)
if result["status"] == "needs_confirmation":
# Salva plano pendente no context (persistente entre mensagens)
context.bot_data['pending_plan'] = result["plan"]
print(f"[ORCH] Plano salvo. Steps: {len(result['plan'].get('steps', []))}")
reply = format_confirmation_message(result)
reply += "\n\nResponda *sim* para confirmar ou *nao* para cancelar."
else:
reply = format_completion_message(result)
# 2. CONFIRMAÇÃO DE PLANO
if user_msg.lower() in ['sim', 's', 'confirmar'] and 'pending_plan' in context.bot_data:
plan = context.bot_data.pop('pending_plan')
results = []
for step in plan.get("steps", []):
res = await execute_step_async(step)
results.append(res)
if not res["success"] and step.get("danger") == "dangerous": break
reply = format_completion_message({"plan": plan, "results": results})
await update.message.reply_text(reply)
return
# 3. ORCHESTRATOR OU AI AGENT
orchestrator_keywords = ['deploy', 'restart', 'git', 'docker', 'atualiza', 'status']
is_task = any(kw in user_msg.lower() for kw in orchestrator_keywords) or user_msg.startswith('/orchestrate')
# =====================================================
# FALLBACK: Usa o agente normal (ai_agent.py)
# =====================================================
history = chat_histories.get(chat_id, [])
from config import get_config
cfg = get_config()
reply = query_agent(prompt=user_msg, override_provider=cfg.get("active_provider"), chat_history=history)
# Atualiza histórico
history.append({"user": user_msg, "bot": reply})
chat_histories[chat_id] = history[-10:]
# Se o usuário pedir ativamente por áudio no texto
if "áudio" in user_msg.lower() or "audio" in user_msg.lower() or "voz" in user_msg.lower():
await update.message.reply_chat_action(action="record_voice")
audio_path = synthesize_audio(reply)
if audio_path:
await update.message.reply_voice(voice=open(audio_path, 'rb'))
return
# --- NOVO: Lógica para enviar IMAGENS se a IA localizou um arquivo ---
import re
img_matches = re.findall(r'!\[.*?\]\((/.*?)\)', reply)
if not img_matches:
img_matches = re.findall(r'(/[^\s]+?\.(?:jpg|jpeg|png|gif|webp))', reply, re.IGNORECASE)
if img_matches:
for img_path in img_matches:
real_path = img_path
if not real_path.startswith("/host_root") and real_path.startswith("/root"):
real_path = f"/host_root{real_path}"
if os.path.exists(real_path):
try:
await update.message.reply_chat_action(action="upload_photo")
await update.message.reply_photo(photo=open(real_path, 'rb'), caption="Imagem localizada na VPS")
except Exception as e:
print(f"Erro ao enviar imagem {real_path}: {e}")
await update.message.reply_text(reply)
if is_task:
task = user_msg.replace('/orchestrate', '').strip()
result = await orchestrate_async(task)
if result["status"] == "needs_confirmation":
context.bot_data['pending_plan'] = result["plan"]
reply = format_confirmation_message(result)
else:
reply = format_completion_message(result)
await update.message.reply_text(reply)
else:
# Fallback AI Agent
from config import get_config
cfg = get_config()
reply = query_agent(user_msg, override_provider=cfg.get("active_provider"))
await update.message.reply_text(reply)
async def handle_voice(update: Update, context: ContextTypes.DEFAULT_TYPE):
if not await auth_check(update): return
await update.message.reply_chat_action(action="record_voice")
# Baixa o aúdio do telegram
voice_file = await update.message.voice.get_file()
ogg_path = "/tmp/voice.ogg"
wav_path = "/tmp/voice.wav"
await voice_file.download_to_drive(ogg_path)
# Converte para WAV (Requer ffmpeg instalado na maquina)
try:
audio = AudioSegment.from_ogg(ogg_path)
audio.export(wav_path, format="wav")
except Exception as e:
await update.message.reply_text(f"Erro ao processar áudio (O ffmpeg está instalado na VPS?): {e}")
return
# Usando SpeechRecognition nativo para transcrever (pode usar Whisper no Ollama depois)
recognizer = sr.Recognizer()
with sr.AudioFile(wav_path) as source:
audio_data = recognizer.record(source)
try:
text = recognizer.recognize_google(audio_data, language="pt-BR")
await update.message.reply_text(f"🗣️ Reconhecido: _{text}_", parse_mode="Markdown")
# Busca histórico anterior
chat_id = update.message.chat_id
history = chat_histories.get(chat_id, [])
# Envia o texto reconhecido para o Agente (respeitando a configuração ativa)
from config import get_config
cfg = get_config()
reply = query_agent(prompt=text, override_provider=cfg.get("active_provider"), chat_history=history)
# Atualiza histórico
history.append({"user": text, "bot": reply})
chat_histories[chat_id] = history[-10:]
# Sintetiza com ElevenLabs e responde com Áudio
audio_path = synthesize_audio(reply)
if audio_path:
await update.message.reply_voice(voice=open(audio_path, 'rb'))
else:
await update.message.reply_text(reply)
except sr.UnknownValueError:
await update.message.reply_text("Não consegui entender o que foi dito no áudio.")
except sr.RequestError as e:
await update.message.reply_text(f"Erro no serviço de STT: {e}")
async def llm_command(update: Update, context: ContextTypes.DEFAULT_TYPE):
if not await auth_check(update): return
args = context.args
from config import get_config, save_config
cfg = get_config()
current = cfg.get('active_provider', 'ollama').upper()
if not args:
# Sem argumentos: mostra status atual
ollama_status = ""
if current == "OLLAMA":
try:
from llm_providers import check_ollama_connection
status = check_ollama_connection()
if status.get("status") == "ok":
models = status.get("models", [])
ollama_status = f"\n\n🔷 Ollama: ✅ Online\n Modelos: {', '.join(models[:3]) if models else 'Nenhum'}"
elif status.get("status") == "timeout":
ollama_status = "\n\n🔷 Ollama: ⏱️ Timeout - não respondeu"
else:
ollama_status = f"\n\n🔷 Ollama: ❌ {status.get('status', 'Erro desconhecido')}"
except Exception as e:
ollama_status = f"\n\n🔷 Ollama: ❌ Erro ao verificar"
await update.message.reply_text(
f"🤖 LLM Atual: *{current}*{ollama_status}\n\n"
f"Para mudar: /llm gemini ou /llm ollama"
)
return
new_model = args[0].lower()
if new_model in ["gemini", "ollama"]:
cfg["active_provider"] = new_model
save_config(cfg)
await update.message.reply_text(f"✅ LLM alterado para: *{new_model.upper()}*")
else:
await update.message.reply_text(
f"❌ Modelo inválido: {new_model}\n\n"
f"Disponíveis: gemini, ollama\n"
f"LLM Atual: *{current}*"
)
async def clear_history(update: Update, context: ContextTypes.DEFAULT_TYPE):
if not await auth_check(update): return
chat_id = update.message.chat_id
if chat_id in chat_histories:
chat_histories[chat_id] = []
await update.message.reply_text("🧹 Memória limpa com sucesso!")
else:
await update.message.reply_text("A memória já está vazia.")
await update.message.reply_text("Processando aúdio...")
await update.message.reply_text("Comando de voz recebido (STT não configurado neste passo).")
def get_telegram_app():
if not TOKEN:
raise ValueError("TELEGRAM_BOT_TOKEN não encontrado no .env")
print("AVISO: TELEGRAM_BOT_TOKEN não encontrado.")
app = Application.builder().token(TOKEN).build()
app.add_handler(CommandHandler("start", start))
app.add_handler(CommandHandler("llm", llm_command))
app.add_handler(CommandHandler("limpar", clear_history))
app.add_handler(CommandHandler("status", lambda u, c: handle_text(u, c))) # Alias para status
app.add_handler(CommandHandler("tools", lambda u, c: handle_text(u, c))) # Alias para tools
app.add_handler(CommandHandler("sync", lambda u, c: handle_text(u, c))) # Alias para sync
app.add_handler(CommandHandler("orchestrate", lambda u, c: handle_text(u, c))) # Orchestrate
app.add_handler(CommandHandler("start", lambda u, c: u.message.reply_text("Online!")))
app.add_handler(MessageHandler(filters.TEXT & ~filters.COMMAND, handle_text))
app.add_handler(MessageHandler(filters.COMMAND, handle_text))
app.add_handler(MessageHandler(filters.VOICE, handle_voice))
return app
# Para testes rápidos se rodado standalone
if __name__ == "__main__":
print("--- INICIANDO BOT TELEGRAM (POLLING) ---")
print("Limpando Webhooks e mensagens pendentes para evitar CONFLITOS...")
app = get_telegram_app()
# drop_pending_updates=True limpa a fila e desativa webhooks automaticamente
print("Bot iniciando...")
app.run_polling(drop_pending_updates=True)