🚀 Auto-deploy: BotVPS atualizado em 24/03/2026 11:24:05

This commit is contained in:
2026-03-24 11:24:05 +00:00
parent 9581def314
commit 11e41e44be

View File

@@ -4,34 +4,17 @@ import httpx
import asyncio
import json
from tools import AVAILABLE_TOOLS
from llm_providers import call_llm, get_available_models, get_planner_llm
from config import get_config
async def get_llm_response_async(prompt: str, provider: str, cfg: dict) -> str:
"""Invoca o provedor de LLM configurado (async)."""
async with httpx.AsyncClient(timeout=60) as client:
if provider == "gemini":
api_key = cfg.get("gemini_api_key") or os.getenv("GEMINI_API_KEY")
url = f"https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:generateContent?key={api_key}"
payload = {"contents": [{"parts": [{"text": prompt}]}]}
try:
res = await client.post(url, json=payload)
if res.status_code == 200:
data = res.json()
return data["candidates"][0]["content"]["parts"][0]["text"]
return f"Erro Gemini: {res.text}"
except Exception as e: return f"Erro Gemini: {e}"
elif provider == "ollama":
host = os.getenv("OLLAMA_HOST", "http://ollama:11434")
model = os.getenv("OLLAMA_MODEL", "llama3.2:1b")
try:
res = await client.post(f"{host}/api/generate", json={
"model": model, "prompt": prompt, "stream": False
})
return res.json().get("response", "")
except Exception as e: return f"Erro Ollama: {e}"
"""Invoca o provedor de LLM centralizado em llm_providers."""
# Garante o modelo gemini-2.5-flash como padrão para o agente Legado
model = cfg.get("model") or "gemini-2.5-flash"
if provider == "ollama":
model = os.getenv("OLLAMA_MODEL", "llama3.2:1b")
return "Provedor desconhecido."
return await call_llm(provider, model, prompt)
def query_agent(prompt: str, override_provider=None, chat_history=None) -> str:
"""Wrapper síncrono para query_agent_async."""