Files
BotVPS/llm_providers.py

364 lines
12 KiB
Python

# ============================================================
# LLM_PROVIDERS.PY - Abstração de Provedores de LLM
# Suporta: Gemini, OpenAI, Anthropic, Ollama (Local)
# ============================================================
import os
import requests
import json
from typing import Optional, Dict, List
# ============================================================
# CONFIGURAÇÃO DE PROVIDERS
# ============================================================
LLM_PROVIDERS = {
"gemini": {
"name": "Google Gemini",
"type": "api",
"models": ["gemini-2.5-flash", "gemini-2.0-pro", "gemini-1.5-flash"],
"default": "gemini-2.5-flash",
"endpoint": "https://generativelanguage.googleapis.com/v1beta/models"
},
"openai": {
"name": "OpenAI GPT",
"type": "api",
"models": ["gpt-4o", "gpt-4-turbo", "gpt-3.5-turbo"],
"default": "gpt-4o",
"endpoint": "https://api.openai.com/v1"
},
"anthropic": {
"name": "Anthropic Claude",
"type": "api",
"models": ["claude-3-5-sonnet-20241022", "claude-3-5-haiku-20241022", "claude-3-opus-20240229"],
"default": "claude-3-5-sonnet-20241022",
"endpoint": "https://api.anthropic.com/v1"
},
"ollama": {
"name": "Ollama (Local)",
"type": "local",
"endpoint": os.getenv("OLLAMA_HOST", "http://localhost:11434"),
"models": None,
"default": "qwen2.5-coder:1.5b"
}
}
# ============================================================
# CONFIG MANAGER
# ============================================================
CONFIG_FILE = "/app/data/config.json"
def get_config() -> dict:
"""Carrega configuração do orchestrator."""
if not os.path.exists("/app/data"):
os.makedirs("/app/data", exist_ok=True)
if os.path.exists(CONFIG_FILE):
try:
with open(CONFIG_FILE, "r") as f:
return json.load(f)
except Exception:
pass
return {
"orchestrator": {
"planner": {"provider": "gemini", "model": "gemini-2.5-flash"},
"executor": {"provider": "ollama", "model": "qwen2.5-coder:1.5b"}
},
"api_keys": {
"openai": "",
"anthropic": "",
"gemini": ""
}
}
def save_config(cfg: dict):
"""Salva configuração do orchestrator."""
if not os.path.exists("/app/data"):
os.makedirs("/app/data", exist_ok=True)
with open(CONFIG_FILE, "w") as f:
json.dump(cfg, f, indent=4)
def get_orchestrator_config() -> dict:
"""Retorna config do orchestrator."""
cfg = get_config()
return cfg.get("orchestrator", {
"planner": {"provider": "gemini", "model": "gemini-2.5-flash"},
"executor": {"provider": "ollama", "model": "qwen2.5-coder:1.5b"}
})
def set_planner(provider: str = None, model: str = None) -> dict:
"""Define o provider e modelo do planner."""
cfg = get_config()
if "orchestrator" not in cfg:
cfg["orchestrator"] = {}
if provider:
cfg["orchestrator"]["planner"] = {
"provider": provider,
"model": model or LLM_PROVIDERS[provider]["default"]
}
save_config(cfg)
return cfg["orchestrator"].get("planner", {"provider": "gemini", "model": "gemini-2.5-flash"})
def set_executor(provider: str = None, model: str = None) -> dict:
"""Define o provider e modelo do executor."""
cfg = get_config()
if "orchestrator" not in cfg:
cfg["orchestrator"] = {}
if provider:
cfg["orchestrator"]["executor"] = {
"provider": provider,
"model": model or LLM_PROVIDERS[provider]["default"]
}
save_config(cfg)
return cfg["orchestrator"].get("executor", {"provider": "ollama", "model": "qwen2.5-coder:1.5b"})
return cfg["orchestrator"]["executor"]
def set_api_key(provider: str, key: str):
"""Armazena API key de um provider."""
cfg = get_config()
if "api_keys" not in cfg:
cfg["api_keys"] = {}
cfg["api_keys"][provider] = key
save_config(cfg)
def get_api_key(provider: str) -> str:
"""Busca API key de um provider (config ou env var)."""
cfg = get_config()
# Primeiro verifica config
api_keys = cfg.get("api_keys", {})
if api_keys.get(provider):
return api_keys[provider]
# Fallback para environment variable
env_vars = {
"openai": "OPENAI_API_KEY",
"anthropic": "ANTHROPIC_API_KEY",
"gemini": "GEMINI_API_KEY"
}
if provider in env_vars:
return os.getenv(env_vars[provider], "")
return ""
# ============================================================
# OLLAMA DISCOVERY
# ============================================================
def list_ollama_models() -> List[str]:
"""Busca modelos disponíveis no Ollama."""
try:
endpoint = LLM_PROVIDERS["ollama"]["endpoint"]
response = requests.get(f"{endpoint}/api/tags", timeout=5)
if response.status_code == 200:
models = [m["name"] for m in response.json().get("models", [])]
LLM_PROVIDERS["ollama"]["models"] = models
return models
except Exception as e:
print(f"Erro ao buscar modelos Ollama: {e}")
return []
def get_available_models(provider: str = None) -> List[Dict]:
"""Retorna modelos disponíveis para um provider ou todos."""
if provider:
p = LLM_PROVIDERS.get(provider)
if not p:
return []
if p["type"] == "local" and provider == "ollama":
models = list_ollama_models()
return [{"provider": provider, "models": models}]
else:
return [{"provider": provider, "models": p.get("models", [p["default"]])}]
# Todos os providers
result = []
for prov_id, prov in LLM_PROVIDERS.items():
if prov_id == "ollama":
models = list_ollama_models()
result.append({"provider": prov_id, "name": prov["name"], "models": models})
else:
result.append({"provider": prov_id, "name": prov["name"], "models": prov.get("models", [prov["default"]])})
return result
# ============================================================
# LLM CALL FUNCTIONS
# ============================================================
def call_llm(provider: str, model: str, prompt: str, system_prompt: str = None, **kwargs) -> str:
"""
Chama o LLM especificado.
Args:
provider: Nome do provider (gemini, openai, anthropic, ollama)
model: Nome do modelo
prompt: Prompt do usuário
system_prompt: Prompt de sistema (opcional)
Returns:
Resposta do LLM como string
"""
if provider == "gemini":
return _call_gemini(model, prompt, system_prompt)
elif provider == "openai":
return _call_openai(model, prompt, system_prompt)
elif provider == "anthropic":
return _call_anthropic(model, prompt, system_prompt)
elif provider == "ollama":
return _call_ollama(model, prompt, system_prompt)
else:
return f"Erro: Provider '{provider}' não suportado."
# ----------------------------------------
# GEMINI
# ----------------------------------------
def _call_gemini(model: str, prompt: str, system_prompt: str = None) -> str:
"""Chama API do Google Gemini."""
api_key = get_api_key("gemini")
if not api_key:
api_key = os.getenv("GEMINI_API_KEY", "")
url = f"https://generativelanguage.googleapis.com/v1beta/models/{model}:generateContent?key={api_key}"
contents = [{"parts": [{"text": prompt}]}]
if system_prompt:
contents.insert(0, {"role": "model", "parts": [{"text": system_prompt}]})
payload = {"contents": contents}
try:
res = requests.post(url, json=payload, timeout=60)
if res.status_code == 200:
return res.json()["candidates"][0]["content"]["parts"][0]["text"]
return f"Erro Gemini: {res.status_code} - {res.text}"
except Exception as e:
return f"Erro Gemini: {str(e)}"
# ----------------------------------------
# OPENAI
# ----------------------------------------
def _call_openai(model: str, prompt: str, system_prompt: str = None) -> str:
"""Chama API da OpenAI."""
api_key = get_api_key("openai")
if not api_key:
api_key = os.getenv("OPENAI_API_KEY", "")
url = f"https://api.openai.com/v1/chat/completions"
messages = []
if system_prompt:
messages.append({"role": "system", "content": system_prompt})
messages.append({"role": "user", "content": prompt})
payload = {
"model": model,
"messages": messages,
"temperature": 0.7
}
try:
res = requests.post(url, json=payload, headers={
"Authorization": f"Bearer {api_key}",
"Content-Type": "application/json"
}, timeout=60)
if res.status_code == 200:
return res.json()["choices"][0]["message"]["content"]
return f"Erro OpenAI: {res.status_code} - {res.text}"
except Exception as e:
return f"Erro OpenAI: {str(e)}"
# ----------------------------------------
# ANTHROPIC
# ----------------------------------------
def _call_anthropic(model: str, prompt: str, system_prompt: str = None) -> str:
"""Chama API da Anthropic (Claude)."""
api_key = get_api_key("anthropic")
if not api_key:
api_key = os.getenv("ANTHROPIC_API_KEY", "")
url = "https://api.anthropic.com/v1/messages"
headers = {
"x-api-key": api_key,
"anthropic-version": "2023-06-01",
"content-type": "application/json"
}
payload = {
"model": model,
"max_tokens": 4096,
"messages": [{"role": "user", "content": prompt}]
}
if system_prompt:
payload["system"] = system_prompt
try:
res = requests.post(url, json=payload, headers=headers, timeout=60)
if res.status_code == 200:
return res.json()["content"][0]["text"]
return f"Erro Anthropic: {res.status_code} - {res.text}"
except Exception as e:
return f"Erro Anthropic: {str(e)}"
# ----------------------------------------
# OLLAMA (LOCAL)
# ----------------------------------------
def _call_ollama(model: str, prompt: str, system_prompt: str = None) -> str:
"""Chama Ollama local."""
endpoint = LLM_PROVIDERS["ollama"]["endpoint"]
payload = {
"model": model,
"prompt": prompt,
"stream": False
}
if system_prompt:
payload["system"] = system_prompt
try:
res = requests.post(f"{endpoint}/api/generate", json=payload, timeout=120)
if res.status_code == 200:
return res.json().get("response", "")
return f"Erro Ollama: {res.status_code} - {res.text}"
except Exception as e:
return f"Erro Ollama: {str(e)}"
# ============================================================
# HELPER FUNCTIONS
# ============================================================
def get_planner_llm() -> tuple:
"""Retorna provider e modelo do planner configurado."""
cfg = get_orchestrator_config()
planner = cfg.get("planner", {"provider": "gemini", "model": "gemini-2.5-flash"})
return planner["provider"], planner["model"]
def get_executor_llm() -> tuple:
"""Retorna provider e modelo do executor configurado."""
cfg = get_orchestrator_config()
executor = cfg.get("executor", {"provider": "ollama", "model": "qwen2.5-coder:1.5b"})
return executor["provider"], executor["model"]
def call_planner(prompt: str, system_prompt: str = None) -> str:
"""Chama o LLM do planner com a config atual."""
provider, model = get_planner_llm()
return call_llm(provider, model, prompt, system_prompt)
def call_executor(prompt: str, system_prompt: str = None) -> str:
"""Chama o LLM do executor com a config atual."""
provider, model = get_executor_llm()
return call_llm(provider, model, prompt, system_prompt)