refatoracao

This commit is contained in:
2026-03-23 23:38:56 +00:00
parent 8002262cf7
commit b7e6239216
16 changed files with 2290 additions and 4321 deletions

View File

@@ -1,11 +1,7 @@
# ============================================================
# LLM_PROVIDERS.PY - Abstração de Provedores de LLM
# Suporta: Gemini, OpenAI, Anthropic, Ollama (Local)
# ============================================================
import os
import requests
import httpx
import json
import asyncio
from typing import Optional, Dict, List
# ============================================================
@@ -117,7 +113,6 @@ def set_executor(provider: str = None, model: str = None) -> dict:
save_config(cfg)
return cfg["orchestrator"].get("executor", {"provider": "ollama", "model": "llama3.2:1b"})
return cfg["orchestrator"]["executor"]
def set_api_key(provider: str, key: str):
"""Armazena API key de um provider."""
@@ -152,28 +147,29 @@ def get_api_key(provider: str) -> str:
# OLLAMA DISCOVERY
# ============================================================
def list_ollama_models() -> List[str]:
"""Busca modelos disponíveis no Ollama."""
async def list_ollama_models() -> List[str]:
"""Busca modelos disponíveis no Ollama em modo async."""
try:
endpoint = LLM_PROVIDERS["ollama"]["endpoint"]
response = requests.get(f"{endpoint}/api/tags", timeout=5)
if response.status_code == 200:
models = [m["name"] for m in response.json().get("models", [])]
LLM_PROVIDERS["ollama"]["models"] = models
return models
async with httpx.AsyncClient() as client:
response = await client.get(f"{endpoint}/api/tags", timeout=5)
if response.status_code == 200:
models = [m["name"] for m in response.json().get("models", [])]
LLM_PROVIDERS["ollama"]["models"] = models
return models
except Exception as e:
print(f"Erro ao buscar modelos Ollama: {e}")
return []
def get_available_models(provider: str = None) -> List[Dict]:
"""Retorna modelos disponíveis para um provider ou todos."""
async def get_available_models(provider: str = None) -> List[Dict]:
"""Retorna modelos disponíveis para um provider ou todos (async)."""
if provider:
p = LLM_PROVIDERS.get(provider)
if not p:
return []
if p["type"] == "local" and provider == "ollama":
models = list_ollama_models()
models = await list_ollama_models()
return [{"provider": provider, "models": models}]
else:
return [{"provider": provider, "models": p.get("models", [p["default"]])}]
@@ -182,7 +178,7 @@ def get_available_models(provider: str = None) -> List[Dict]:
result = []
for prov_id, prov in LLM_PROVIDERS.items():
if prov_id == "ollama":
models = list_ollama_models()
models = await list_ollama_models()
result.append({"provider": prov_id, "name": prov["name"], "models": models})
else:
result.append({"provider": prov_id, "name": prov["name"], "models": prov.get("models", [prov["default"]])})
@@ -190,42 +186,25 @@ def get_available_models(provider: str = None) -> List[Dict]:
return result
# ============================================================
# LLM CALL FUNCTIONS
# ASYNC LLM CALL FUNCTIONS
# ============================================================
def call_llm(provider: str, model: str, prompt: str, system_prompt: str = None, **kwargs) -> str:
"""
Chama o LLM especificado.
Args:
provider: Nome do provider (gemini, openai, anthropic, ollama)
model: Nome do modelo
prompt: Prompt do usuário
system_prompt: Prompt de sistema (opcional)
Returns:
Resposta do LLM como string
"""
async def call_llm(provider: str, model: str, prompt: str, system_prompt: str = None, **kwargs) -> str:
"""Suporte universal async para chamadas de LLM."""
if provider == "gemini":
return _call_gemini(model, prompt, system_prompt)
return await _call_gemini_async(model, prompt, system_prompt)
elif provider == "openai":
return _call_openai(model, prompt, system_prompt)
return await _call_openai_async(model, prompt, system_prompt)
elif provider == "anthropic":
return _call_anthropic(model, prompt, system_prompt)
return await _call_anthropic_async(model, prompt, system_prompt)
elif provider == "ollama":
return _call_ollama(model, prompt, system_prompt)
return await _call_ollama_async(model, prompt, system_prompt)
else:
return f"Erro: Provider '{provider}' não suportado."
# ----------------------------------------
# GEMINI
# ----------------------------------------
def _call_gemini(model: str, prompt: str, system_prompt: str = None) -> str:
"""Chama API do Google Gemini."""
async def _call_gemini_async(model: str, prompt: str, system_prompt: str = None) -> str:
"""Chama API do Google Gemini via httpx (async)."""
api_key = get_api_key("gemini")
if not api_key:
api_key = os.getenv("GEMINI_API_KEY", "")
url = f"https://generativelanguage.googleapis.com/v1beta/models/{model}:generateContent?key={api_key}"
contents = [{"parts": [{"text": prompt}]}]
@@ -235,22 +214,17 @@ def _call_gemini(model: str, prompt: str, system_prompt: str = None) -> str:
payload = {"contents": contents}
try:
res = requests.post(url, json=payload, timeout=60)
if res.status_code == 200:
return res.json()["candidates"][0]["content"]["parts"][0]["text"]
return f"Erro Gemini: {res.status_code} - {res.text}"
async with httpx.AsyncClient() as client:
res = await client.post(url, json=payload, timeout=60)
if res.status_code == 200:
return res.json()["candidates"][0]["content"]["parts"][0]["text"]
return f"Erro Gemini: {res.status_code} - {res.text}"
except Exception as e:
return f"Erro Gemini: {str(e)}"
# ----------------------------------------
# OPENAI
# ----------------------------------------
def _call_openai(model: str, prompt: str, system_prompt: str = None) -> str:
"""Chama API da OpenAI."""
async def _call_openai_async(model: str, prompt: str, system_prompt: str = None) -> str:
"""Chama API da OpenAI via httpx (async)."""
api_key = get_api_key("openai")
if not api_key:
api_key = os.getenv("OPENAI_API_KEY", "")
url = f"https://api.openai.com/v1/chat/completions"
messages = []
@@ -258,33 +232,21 @@ def _call_openai(model: str, prompt: str, system_prompt: str = None) -> str:
messages.append({"role": "system", "content": system_prompt})
messages.append({"role": "user", "content": prompt})
payload = {
"model": model,
"messages": messages,
"temperature": 0.7
}
payload = {"model": model, "messages": messages, "temperature": 0.7}
headers = {"Authorization": f"Bearer {api_key}", "Content-Type": "application/json"}
try:
res = requests.post(url, json=payload, headers={
"Authorization": f"Bearer {api_key}",
"Content-Type": "application/json"
}, timeout=60)
if res.status_code == 200:
return res.json()["choices"][0]["message"]["content"]
return f"Erro OpenAI: {res.status_code} - {res.text}"
async with httpx.AsyncClient() as client:
res = await client.post(url, json=payload, headers=headers, timeout=60)
if res.status_code == 200:
return res.json()["choices"][0]["message"]["content"]
return f"Erro OpenAI: {res.status_code} - {res.text}"
except Exception as e:
return f"Erro OpenAI: {str(e)}"
# ----------------------------------------
# ANTHROPIC
# ----------------------------------------
def _call_anthropic(model: str, prompt: str, system_prompt: str = None) -> str:
"""Chama API da Anthropic (Claude)."""
async def _call_anthropic_async(model: str, prompt: str, system_prompt: str = None) -> str:
"""Chama API da Anthropic via httpx (async)."""
api_key = get_api_key("anthropic")
if not api_key:
api_key = os.getenv("ANTHROPIC_API_KEY", "")
url = "https://api.anthropic.com/v1/messages"
headers = {
@@ -298,24 +260,40 @@ def _call_anthropic(model: str, prompt: str, system_prompt: str = None) -> str:
"max_tokens": 4096,
"messages": [{"role": "user", "content": prompt}]
}
if system_prompt:
payload["system"] = system_prompt
if system_prompt: payload["system"] = system_prompt
try:
res = requests.post(url, json=payload, headers=headers, timeout=60)
if res.status_code == 200:
return res.json()["content"][0]["text"]
return f"Erro Anthropic: {res.status_code} - {res.text}"
async with httpx.AsyncClient() as client:
res = await client.post(url, json=payload, headers=headers, timeout=60)
if res.status_code == 200:
return res.json()["content"][0]["text"]
return f"Erro Anthropic: {res.status_code} - {res.text}"
except Exception as e:
return f"Erro Anthropic: {str(e)}"
# ----------------------------------------
# OLLAMA (LOCAL)
# ----------------------------------------
async def _call_ollama_async(model: str, prompt: str, system_prompt: str = None) -> str:
"""Chama Ollama local via httpx (async)."""
endpoint = LLM_PROVIDERS["ollama"]["endpoint"]
payload = {
"model": model,
"prompt": prompt,
"stream": False,
"options": {"num_ctx": 4096}
}
if system_prompt: payload["system"] = system_prompt
try:
async with httpx.AsyncClient() as client:
res = await client.post(f"{endpoint}/api/generate", json=payload, timeout=180)
if res.status_code == 200:
return res.json().get("response", "")
return f"Erro Ollama: {res.status_code} - {res.text}"
except Exception as e:
return f"Erro Ollama: {str(e)}"
def check_ollama_connection() -> dict:
"""Verifica se Ollama está acessível."""
"""Versão síncrona mantida para compatibilidade rápida de status."""
import requests
endpoint = LLM_PROVIDERS["ollama"]["endpoint"]
try:
res = requests.get(f"{endpoint}/api/tags", timeout=10)
@@ -323,62 +301,34 @@ def check_ollama_connection() -> dict:
models = [m.get("name") for m in res.json().get("models", [])]
return {"status": "ok", "models": models, "endpoint": endpoint}
return {"status": "error", "code": res.status_code, "endpoint": endpoint}
except requests.exceptions.Timeout:
return {"status": "timeout", "endpoint": endpoint}
except requests.exceptions.ConnectionError:
return {"status": "unreachable", "endpoint": endpoint}
except Exception as e:
return {"status": "error", "message": str(e), "endpoint": endpoint}
def _call_ollama(model: str, prompt: str, system_prompt: str = None) -> str:
"""Chama Ollama local."""
endpoint = LLM_PROVIDERS["ollama"]["endpoint"]
payload = {
"model": model,
"prompt": prompt,
"stream": False,
"options": {"num_ctx": 4096}
}
if system_prompt:
payload["system"] = system_prompt
try:
res = requests.post(f"{endpoint}/api/generate", json=payload, timeout=180)
if res.status_code == 200:
return res.json().get("response", "")
return f"Erro Ollama: {res.status_code} - {res.text}"
except requests.exceptions.Timeout:
return f"[TIMEOUT] Ollama não respondeu em 180s. Verifique se o modelo está carregado em {endpoint}"
except requests.exceptions.ConnectionError:
return f"[CONNECTION ERROR] Não conseguiu conectar ao Ollama em {endpoint}. Verifique se o container Ollama está na mesma rede Docker."
except Exception as e:
return f"Erro Ollama: {str(e)}"
# ============================================================
# HELPER FUNCTIONS
# PLANNER & EXECUTOR WRAPPERS (PROMETE SER ASYNC)
# ============================================================
def get_planner_llm() -> tuple:
"""Retorna provider e modelo do planner configurado."""
cfg = get_orchestrator_config()
planner = cfg.get("planner", {"provider": "gemini", "model": "gemini-2.5-flash"})
return planner["provider"], planner["model"]
def get_executor_llm() -> tuple:
"""Retorna provider e modelo do executor configurado."""
cfg = get_orchestrator_config()
executor = cfg.get("executor", {"provider": "ollama", "model": "llama3.2:1b"})
return executor["provider"], executor["model"]
def call_planner(prompt: str, system_prompt: str = None) -> str:
"""Chama o LLM do planner com a config atual."""
async def call_planner_async(prompt: str, system_prompt: str = None) -> str:
provider, model = get_planner_llm()
return call_llm(provider, model, prompt, system_prompt)
return await call_llm(provider, model, prompt, system_prompt)
async def call_executor_async(prompt: str, system_prompt: str = None) -> str:
provider, model = get_executor_llm()
return await call_llm(provider, model, prompt, system_prompt)
# --- BACKWARD COMPATIBILITY SHIMS (SYNC WRAPPERS) ---
def call_planner(prompt: str, system_prompt: str = None) -> str:
return asyncio.run(call_planner_async(prompt, system_prompt))
def call_executor(prompt: str, system_prompt: str = None) -> str:
"""Chama o LLM do executor com a config atual."""
provider, model = get_executor_llm()
return call_llm(provider, model, prompt, system_prompt)
return asyncio.run(call_executor_async(prompt, system_prompt))