From 11e41e44be33afd5c8a19ed6b8513574a7934926 Mon Sep 17 00:00:00 2001 From: admtracksteel Date: Tue, 24 Mar 2026 11:24:05 +0000 Subject: [PATCH] =?UTF-8?q?=F0=9F=9A=80=20Auto-deploy:=20BotVPS=20atualiza?= =?UTF-8?q?do=20em=2024/03/2026=2011:24:05?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- ai_agent.py | 31 +++++++------------------------ 1 file changed, 7 insertions(+), 24 deletions(-) diff --git a/ai_agent.py b/ai_agent.py index 7c05e2a..09b7a78 100644 --- a/ai_agent.py +++ b/ai_agent.py @@ -4,34 +4,17 @@ import httpx import asyncio import json from tools import AVAILABLE_TOOLS +from llm_providers import call_llm, get_available_models, get_planner_llm from config import get_config async def get_llm_response_async(prompt: str, provider: str, cfg: dict) -> str: - """Invoca o provedor de LLM configurado (async).""" - async with httpx.AsyncClient(timeout=60) as client: - if provider == "gemini": - api_key = cfg.get("gemini_api_key") or os.getenv("GEMINI_API_KEY") - url = f"https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:generateContent?key={api_key}" - payload = {"contents": [{"parts": [{"text": prompt}]}]} - try: - res = await client.post(url, json=payload) - if res.status_code == 200: - data = res.json() - return data["candidates"][0]["content"]["parts"][0]["text"] - return f"Erro Gemini: {res.text}" - except Exception as e: return f"Erro Gemini: {e}" - - elif provider == "ollama": - host = os.getenv("OLLAMA_HOST", "http://ollama:11434") - model = os.getenv("OLLAMA_MODEL", "llama3.2:1b") - try: - res = await client.post(f"{host}/api/generate", json={ - "model": model, "prompt": prompt, "stream": False - }) - return res.json().get("response", "") - except Exception as e: return f"Erro Ollama: {e}" + """Invoca o provedor de LLM centralizado em llm_providers.""" + # Garante o modelo gemini-2.5-flash como padrão para o agente Legado + model = cfg.get("model") or "gemini-2.5-flash" + if provider == "ollama": + model = os.getenv("OLLAMA_MODEL", "llama3.2:1b") - return "Provedor desconhecido." + return await call_llm(provider, model, prompt) def query_agent(prompt: str, override_provider=None, chat_history=None) -> str: """Wrapper síncrono para query_agent_async."""