79 lines
3.2 KiB
Python
79 lines
3.2 KiB
Python
import os
|
|
import re
|
|
import httpx
|
|
import asyncio
|
|
import json
|
|
from tools import AVAILABLE_TOOLS
|
|
from config import get_config
|
|
|
|
async def get_llm_response_async(prompt: str, provider: str, cfg: dict) -> str:
|
|
"""Invoca o provedor de LLM configurado (async)."""
|
|
async with httpx.AsyncClient(timeout=60) as client:
|
|
if provider == "gemini":
|
|
api_key = cfg.get("gemini_api_key") or os.getenv("GEMINI_API_KEY")
|
|
url = f"https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:generateContent?key={api_key}"
|
|
payload = {"contents": [{"parts": [{"text": prompt}]}]}
|
|
try:
|
|
res = await client.post(url, json=payload)
|
|
if res.status_code == 200:
|
|
data = res.json()
|
|
return data["candidates"][0]["content"]["parts"][0]["text"]
|
|
return f"Erro Gemini: {res.text}"
|
|
except Exception as e: return f"Erro Gemini: {e}"
|
|
|
|
elif provider == "ollama":
|
|
host = os.getenv("OLLAMA_HOST", "http://ollama:11434")
|
|
model = os.getenv("OLLAMA_MODEL", "llama3.2:1b")
|
|
try:
|
|
res = await client.post(f"{host}/api/generate", json={
|
|
"model": model, "prompt": prompt, "stream": False
|
|
})
|
|
return res.json().get("response", "")
|
|
except Exception as e: return f"Erro Ollama: {e}"
|
|
|
|
return "Provedor desconhecido."
|
|
|
|
def query_agent(prompt: str, override_provider=None, chat_history=None) -> str:
|
|
"""Wrapper síncrono para query_agent_async."""
|
|
return asyncio.run(query_agent_async(prompt, override_provider, chat_history))
|
|
|
|
async def query_agent_async(prompt: str, override_provider=None, chat_history=None) -> str:
|
|
cfg = get_config()
|
|
provider = override_provider or cfg.get("active_provider", "gemini")
|
|
tools_desc = "\n".join([f"- {k}: {v['description']}" for k, v in AVAILABLE_TOOLS.items()])
|
|
|
|
system_prompt = f"""Você é o Antigravity VPS Agent. Root Admin da VPS do Marcos.
|
|
Responda em PORTUGUÊS. Seja técnico e direto.
|
|
|
|
### FERRAMENTAS:
|
|
{tools_desc}
|
|
|
|
### FORMATO:
|
|
Use [TOOL:nome] arg [/TOOL] para ações.
|
|
Finalize com <REFINED> resumo </REFINED>.
|
|
"""
|
|
history_str = ""
|
|
if chat_history:
|
|
for m in chat_history[-5:]:
|
|
history_str += f"\nUsuário: {m['user']}\nAgente: {m['bot']}\n"
|
|
history_str += f"\nUsuário: {prompt}\n"
|
|
|
|
current_history = history_str
|
|
for i in range(10):
|
|
response = await get_llm_response_async(system_prompt + current_history, provider, cfg)
|
|
match = re.search(r"\[TOOL:(.*?)\](.*?)\[/TOOL\]", response, re.I | re.S)
|
|
|
|
if match:
|
|
t_name, arg = match.group(1).strip(), match.group(2).strip()
|
|
if t_name in AVAILABLE_TOOLS:
|
|
func = AVAILABLE_TOOLS[t_name]["func"]
|
|
# Assume ferramentas são síncronas em tools.py (legado)
|
|
obs = func(arg) if arg else func()
|
|
current_history += f"\nAgente: {response}\nSISTEMA ({t_name}): {obs}\n"
|
|
else:
|
|
current_history += f"\nAgente: {response}\nSISTEMA: Erro: Ferramenta inexistente.\n"
|
|
else:
|
|
return response
|
|
|
|
return "Limite de pensamento atingido."
|