This commit is contained in:
Marcos
2026-03-22 10:10:27 -03:00
parent f72677ef27
commit 6589c62b18
4 changed files with 327 additions and 153 deletions

View File

@@ -41,17 +41,28 @@ async def start(update: Update, context: ContextTypes.DEFAULT_TYPE):
if not await auth_check(update): return
await update.message.reply_text("👋 Olá, Marcos! Antigravity VPS Agent online e pronto para receber comandos.")
# Memória persistente da conversa (em memória RAM)
chat_histories = {}
async def handle_text(update: Update, context: ContextTypes.DEFAULT_TYPE):
if not await auth_check(update): return
chat_id = update.message.chat_id
user_msg = update.message.text
await update.message.reply_chat_action(action="typing")
# Busca histórico anterior
history = chat_histories.get(chat_id, [])
# Aciona o Agente de IA para processar o prompt e executar Tools se precisar
from config import get_config
cfg = get_config()
reply = query_agent(prompt=user_msg, override_provider=cfg.get("active_provider"))
reply = query_agent(prompt=user_msg, override_provider=cfg.get("active_provider"), chat_history=history)
# Atualiza histórico
history.append({"user": user_msg, "bot": reply})
chat_histories[chat_id] = history[-10:] # Mantém apenas as últimas 10
# Se o usuário pedir ativamente por áudio no texto
if "áudio" in user_msg.lower() or "audio" in user_msg.lower() or "voz" in user_msg.lower():
await update.message.reply_chat_action(action="record_voice")
@@ -59,8 +70,6 @@ async def handle_text(update: Update, context: ContextTypes.DEFAULT_TYPE):
if audio_path:
await update.message.reply_voice(voice=open(audio_path, 'rb'))
return
else:
reply += "\n\n*(Falha ao gerar áudio com a ElevenLabs. Serviço indisponível.)*"
# Responde no chat normalmente
await update.message.reply_text(reply)
@@ -93,10 +102,18 @@ async def handle_voice(update: Update, context: ContextTypes.DEFAULT_TYPE):
text = recognizer.recognize_google(audio_data, language="pt-BR")
await update.message.reply_text(f"🗣️ Reconhecido: _{text}_", parse_mode="Markdown")
# Busca histórico anterior
chat_id = update.message.chat_id
history = chat_histories.get(chat_id, [])
# Envia o texto reconhecido para o Agente (respeitando a configuração ativa)
from config import get_config
cfg = get_config()
reply = query_agent(prompt=text, override_provider=cfg.get("active_provider"))
reply = query_agent(prompt=text, override_provider=cfg.get("active_provider"), chat_history=history)
# Atualiza histórico
history.append({"user": text, "bot": reply})
chat_histories[chat_id] = history[-10:]
# Sintetiza com ElevenLabs e responde com Áudio
audio_path = synthesize_audio(reply)
@@ -129,12 +146,22 @@ async def llm_command(update: Update, context: ContextTypes.DEFAULT_TYPE):
else:
await update.message.reply_text("Modelos disponíveis: gemini ou ollama.")
async def clear_history(update: Update, context: ContextTypes.DEFAULT_TYPE):
if not await auth_check(update): return
chat_id = update.message.chat_id
if chat_id in chat_histories:
chat_histories[chat_id] = []
await update.message.reply_text("🧹 Memória limpa com sucesso!")
else:
await update.message.reply_text("A memória já está vazia.")
def get_telegram_app():
if not TOKEN:
raise ValueError("TELEGRAM_BOT_TOKEN não encontrado no .env")
app = Application.builder().token(TOKEN).build()
app.add_handler(CommandHandler("start", start))
app.add_handler(CommandHandler("llm", llm_command))
app.add_handler(CommandHandler("limpar", clear_history))
app.add_handler(MessageHandler(filters.TEXT & ~filters.COMMAND, handle_text))
app.add_handler(MessageHandler(filters.VOICE, handle_voice))
return app