import os import httpx import json import asyncio from typing import Optional, Dict, List # ============================================================ # CONFIGURAÇÃO DE PROVIDERS # ============================================================ LLM_PROVIDERS = { "gemini": { "name": "Google Gemini", "type": "api", "models": ["gemini-2.5-flash", "gemini-2.0-pro", "gemini-1.5-flash"], "default": "gemini-2.5-flash", "endpoint": "https://generativelanguage.googleapis.com/v1beta/models" }, "openai": { "name": "OpenAI GPT", "type": "api", "models": ["gpt-4o", "gpt-4-turbo", "gpt-3.5-turbo"], "default": "gpt-4o", "endpoint": "https://api.openai.com/v1" }, "anthropic": { "name": "Anthropic Claude", "type": "api", "models": ["claude-3-5-sonnet-20241022", "claude-3-5-haiku-20241022", "claude-3-opus-20240229"], "default": "claude-3-5-sonnet-20241022", "endpoint": "https://api.anthropic.com/v1" }, "ollama": { "name": "Ollama (Local)", "type": "local", "endpoint": os.getenv("OLLAMA_HOST", "http://ollama:11434"), "models": None, "default": "llama3.2:1b" } } # ============================================================ # CONFIG MANAGER # ============================================================ CONFIG_FILE = "/app/data/config.json" def get_config() -> dict: """Carrega configuração do orchestrator.""" if not os.path.exists("/app/data"): os.makedirs("/app/data", exist_ok=True) if os.path.exists(CONFIG_FILE): try: with open(CONFIG_FILE, "r") as f: return json.load(f) except Exception: pass return { "orchestrator": { "planner": {"provider": "gemini", "model": "gemini-2.5-flash"}, "executor": {"provider": "ollama", "model": "llama3.2:1b"} }, "api_keys": { "openai": "", "anthropic": "", "gemini": "" } } def save_config(cfg: dict): """Salva configuração do orchestrator.""" if not os.path.exists("/app/data"): os.makedirs("/app/data", exist_ok=True) with open(CONFIG_FILE, "w") as f: json.dump(cfg, f, indent=4) def get_orchestrator_config() -> dict: """Retorna config do orchestrator.""" cfg = get_config() return cfg.get("orchestrator", { "planner": {"provider": "gemini", "model": "gemini-2.5-flash"}, "executor": {"provider": "ollama", "model": "llama3.2:1b"} }) def set_planner(provider: str = None, model: str = None) -> dict: """Define o provider e modelo do planner.""" cfg = get_config() if "orchestrator" not in cfg: cfg["orchestrator"] = {} if provider: cfg["orchestrator"]["planner"] = { "provider": provider, "model": model or LLM_PROVIDERS[provider]["default"] } save_config(cfg) return cfg["orchestrator"].get("planner", {"provider": "gemini", "model": "gemini-2.5-flash"}) def set_executor(provider: str = None, model: str = None) -> dict: """Define o provider e modelo do executor.""" cfg = get_config() if "orchestrator" not in cfg: cfg["orchestrator"] = {} if provider: cfg["orchestrator"]["executor"] = { "provider": provider, "model": model or LLM_PROVIDERS[provider]["default"] } save_config(cfg) return cfg["orchestrator"].get("executor", {"provider": "ollama", "model": "llama3.2:1b"}) def set_api_key(provider: str, key: str): """Armazena API key de um provider.""" cfg = get_config() if "api_keys" not in cfg: cfg["api_keys"] = {} cfg["api_keys"][provider] = key save_config(cfg) def get_api_key(provider: str) -> str: """Busca API key de um provider (config ou env var).""" cfg = get_config() # Primeiro verifica config api_keys = cfg.get("api_keys", {}) if api_keys.get(provider): return api_keys[provider] # Fallback para environment variable env_vars = { "openai": "OPENAI_API_KEY", "anthropic": "ANTHROPIC_API_KEY", "gemini": "GEMINI_API_KEY" } if provider in env_vars: return os.getenv(env_vars[provider], "") return "" # ============================================================ # OLLAMA DISCOVERY # ============================================================ async def list_ollama_models() -> List[str]: """Busca modelos disponíveis no Ollama em modo async.""" try: endpoint = LLM_PROVIDERS["ollama"]["endpoint"] async with httpx.AsyncClient() as client: response = await client.get(f"{endpoint}/api/tags", timeout=5) if response.status_code == 200: models = [m["name"] for m in response.json().get("models", [])] LLM_PROVIDERS["ollama"]["models"] = models return models except Exception as e: print(f"Erro ao buscar modelos Ollama: {e}") return [] async def get_available_models(provider: str = None) -> List[Dict]: """Retorna modelos disponíveis para um provider ou todos (async).""" if provider: p = LLM_PROVIDERS.get(provider) if not p: return [] if p["type"] == "local" and provider == "ollama": models = await list_ollama_models() return [{"provider": provider, "models": models}] else: return [{"provider": provider, "models": p.get("models", [p["default"]])}] # Todos os providers result = [] for prov_id, prov in LLM_PROVIDERS.items(): if prov_id == "ollama": models = await list_ollama_models() result.append({"provider": prov_id, "name": prov["name"], "models": models}) else: result.append({"provider": prov_id, "name": prov["name"], "models": prov.get("models", [prov["default"]])}) return result # ============================================================ # ASYNC LLM CALL FUNCTIONS # ============================================================ async def call_llm(provider: str, model: str, prompt: str, system_prompt: str = None, **kwargs) -> str: """Suporte universal async para chamadas de LLM.""" if provider == "gemini": return await _call_gemini_async(model, prompt, system_prompt) elif provider == "openai": return await _call_openai_async(model, prompt, system_prompt) elif provider == "anthropic": return await _call_anthropic_async(model, prompt, system_prompt) elif provider == "ollama": return await _call_ollama_async(model, prompt, system_prompt) else: return f"Erro: Provider '{provider}' não suportado." async def _call_gemini_async(model: str, prompt: str, system_prompt: str = None) -> str: """Chama API do Google Gemini via httpx (async).""" api_key = get_api_key("gemini") url = f"https://generativelanguage.googleapis.com/v1beta/models/{model}:generateContent?key={api_key}" contents = [{"parts": [{"text": prompt}]}] if system_prompt: contents.insert(0, {"role": "model", "parts": [{"text": system_prompt}]}) payload = {"contents": contents} try: async with httpx.AsyncClient() as client: res = await client.post(url, json=payload, timeout=60) if res.status_code == 200: return res.json()["candidates"][0]["content"]["parts"][0]["text"] return f"Erro Gemini: {res.status_code} - {res.text}" except Exception as e: return f"Erro Gemini: {str(e)}" async def _call_openai_async(model: str, prompt: str, system_prompt: str = None) -> str: """Chama API da OpenAI via httpx (async).""" api_key = get_api_key("openai") url = f"https://api.openai.com/v1/chat/completions" messages = [] if system_prompt: messages.append({"role": "system", "content": system_prompt}) messages.append({"role": "user", "content": prompt}) payload = {"model": model, "messages": messages, "temperature": 0.7} headers = {"Authorization": f"Bearer {api_key}", "Content-Type": "application/json"} try: async with httpx.AsyncClient() as client: res = await client.post(url, json=payload, headers=headers, timeout=60) if res.status_code == 200: return res.json()["choices"][0]["message"]["content"] return f"Erro OpenAI: {res.status_code} - {res.text}" except Exception as e: return f"Erro OpenAI: {str(e)}" async def _call_anthropic_async(model: str, prompt: str, system_prompt: str = None) -> str: """Chama API da Anthropic via httpx (async).""" api_key = get_api_key("anthropic") url = "https://api.anthropic.com/v1/messages" headers = { "x-api-key": api_key, "anthropic-version": "2023-06-01", "content-type": "application/json" } payload = { "model": model, "max_tokens": 4096, "messages": [{"role": "user", "content": prompt}] } if system_prompt: payload["system"] = system_prompt try: async with httpx.AsyncClient() as client: res = await client.post(url, json=payload, headers=headers, timeout=60) if res.status_code == 200: return res.json()["content"][0]["text"] return f"Erro Anthropic: {res.status_code} - {res.text}" except Exception as e: return f"Erro Anthropic: {str(e)}" async def _call_ollama_async(model: str, prompt: str, system_prompt: str = None) -> str: """Chama Ollama local via httpx (async).""" endpoint = LLM_PROVIDERS["ollama"]["endpoint"] payload = { "model": model, "prompt": prompt, "stream": False, "options": {"num_ctx": 4096} } if system_prompt: payload["system"] = system_prompt try: async with httpx.AsyncClient() as client: res = await client.post(f"{endpoint}/api/generate", json=payload, timeout=180) if res.status_code == 200: return res.json().get("response", "") return f"Erro Ollama: {res.status_code} - {res.text}" except Exception as e: return f"Erro Ollama: {str(e)}" def check_ollama_connection() -> dict: """Versão síncrona mantida para compatibilidade rápida de status.""" import requests endpoint = LLM_PROVIDERS["ollama"]["endpoint"] try: res = requests.get(f"{endpoint}/api/tags", timeout=10) if res.status_code == 200: models = [m.get("name") for m in res.json().get("models", [])] return {"status": "ok", "models": models, "endpoint": endpoint} return {"status": "error", "code": res.status_code, "endpoint": endpoint} except Exception as e: return {"status": "error", "message": str(e), "endpoint": endpoint} # ============================================================ # PLANNER & EXECUTOR WRAPPERS (PROMETE SER ASYNC) # ============================================================ def get_planner_llm() -> tuple: cfg = get_orchestrator_config() planner = cfg.get("planner", {"provider": "gemini", "model": "gemini-2.5-flash"}) return planner["provider"], planner["model"] def get_executor_llm() -> tuple: cfg = get_orchestrator_config() executor = cfg.get("executor", {"provider": "ollama", "model": "llama3.2:1b"}) return executor["provider"], executor["model"] async def call_planner_async(prompt: str, system_prompt: str = None) -> str: provider, model = get_planner_llm() return await call_llm(provider, model, prompt, system_prompt) async def call_executor_async(prompt: str, system_prompt: str = None) -> str: provider, model = get_executor_llm() return await call_llm(provider, model, prompt, system_prompt) # --- BACKWARD COMPATIBILITY SHIMS (SYNC WRAPPERS) --- def call_planner(prompt: str, system_prompt: str = None) -> str: return asyncio.run(call_planner_async(prompt, system_prompt)) def call_executor(prompt: str, system_prompt: str = None) -> str: return asyncio.run(call_executor_async(prompt, system_prompt))