From 22af78e6a5f5c11df8985c29d119930ce7f5c14a Mon Sep 17 00:00:00 2001 From: Marcos Date: Sun, 22 Mar 2026 16:24:06 -0300 Subject: [PATCH] Fix /llm command to show status when no args --- bot_logic.py | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) diff --git a/bot_logic.py b/bot_logic.py index 0388d68..662728d 100644 --- a/bot_logic.py +++ b/bot_logic.py @@ -252,19 +252,28 @@ async def llm_command(update: Update, context: ContextTypes.DEFAULT_TYPE): args = context.args from config import get_config, save_config + cfg = get_config() + current = cfg.get('active_provider', 'ollama').upper() + if not args: - cfg = get_config() - await update.message.reply_text(f"Comando incompleto. Use: /llm gemini ou /llm ollama.\n*Status Atual:* {cfg.get('active_provider').upper()}") + # Sem argumentos: mostra status atual + await update.message.reply_text( + f"đŸ€– LLM Atual: *{current}*\n\n" + f"Para mudar: /llm gemini ou /llm ollama" + ) return new_model = args[0].lower() if new_model in ["gemini", "ollama"]: - cfg = get_config() cfg["active_provider"] = new_model save_config(cfg) - await update.message.reply_text(f"✅ InteligĂȘncia Artificial comutada com sucesso para: *{new_model.upper()}*") + await update.message.reply_text(f"✅ LLM alterado para: *{new_model.upper()}*") else: - await update.message.reply_text("Modelos disponĂ­veis: gemini ou ollama.") + await update.message.reply_text( + f"❌ Modelo invĂĄlido: {new_model}\n\n" + f"DisponĂ­veis: gemini, ollama\n" + f"LLM Atual: *{current}*" + ) async def clear_history(update: Update, context: ContextTypes.DEFAULT_TYPE): if not await auth_check(update): return