🚀 Auto-deploy: BotVPS atualizado em 02/05/2026 15:37:40
This commit is contained in:
@@ -88,6 +88,13 @@ LLM_PROVIDERS = {
|
||||
"default": "qwen/qwen-2.5-72b-instruct",
|
||||
"endpoint": "https://openrouter.ai/api/v1"
|
||||
},
|
||||
"minimax": {
|
||||
"name": "MiniMax (Hermes)",
|
||||
"type": "api",
|
||||
"models": ["abab7-preview", "abab6.5s-chat", "minimax-text-01"],
|
||||
"default": "abab7-preview",
|
||||
"endpoint": "https://api.minimax.io/v1/text/chatcompletion_v2"
|
||||
},
|
||||
"ollama": {
|
||||
"name": "Ollama (Local)",
|
||||
"type": "local",
|
||||
@@ -166,7 +173,8 @@ def get_api_key(provider: str) -> str:
|
||||
"openai": "OPENAI_API_KEY",
|
||||
"anthropic": "ANTHROPIC_API_KEY",
|
||||
"gemini": "GEMINI_API_KEY",
|
||||
"openrouter": "OPENROUTER_API_KEY"
|
||||
"openrouter": "OPENROUTER_API_KEY",
|
||||
"minimax": "MINIMAX_API_KEY"
|
||||
}
|
||||
|
||||
# 3.1 Busca específica do provider
|
||||
@@ -248,6 +256,8 @@ async def call_llm(provider: str, model: str, prompt: str, system_prompt: str =
|
||||
res = await _call_ollama_async(model, prompt, system_prompt)
|
||||
elif provider == "openrouter":
|
||||
res = await _call_openrouter_async(model, prompt, system_prompt)
|
||||
elif provider == "minimax":
|
||||
res = await _call_minimax_async(model, prompt, system_prompt)
|
||||
else:
|
||||
return {"content": f"Erro: Provider '{provider}' não suportado.", "usage": {}}
|
||||
|
||||
@@ -339,6 +349,45 @@ async def _call_gemini_async(model: str, prompt: str, system_prompt: str = None)
|
||||
except Exception as e:
|
||||
return f"Erro Gemini: {str(e)}"
|
||||
|
||||
async def _call_minimax_async(model: str, prompt: str, system_prompt: str = None) -> dict:
|
||||
"""Chama API do MiniMax (V2) via httpx (async)."""
|
||||
api_key = get_api_key("minimax")
|
||||
url = "https://api.minimax.io/v1/text/chatcompletion_v2"
|
||||
|
||||
messages = []
|
||||
if system_prompt:
|
||||
messages.append({"role": "system", "content": system_prompt})
|
||||
messages.append({"role": "user", "content": prompt})
|
||||
|
||||
payload = {
|
||||
"model": model,
|
||||
"messages": messages,
|
||||
"tools": [],
|
||||
"tool_choice": "none",
|
||||
"stream": False
|
||||
}
|
||||
headers = {
|
||||
"Authorization": f"Bearer {api_key}",
|
||||
"Content-Type": "application/json"
|
||||
}
|
||||
|
||||
try:
|
||||
async with httpx.AsyncClient() as client:
|
||||
res = await client.post(url, json=payload, headers=headers, timeout=120)
|
||||
if res.status_code == 200:
|
||||
data = res.json()
|
||||
# MiniMax V2 structure
|
||||
if "choices" in data and len(data["choices"]) > 0:
|
||||
return {
|
||||
"content": data["choices"][0]["message"]["content"],
|
||||
"usage": data.get("usage", {}),
|
||||
"model": data.get("base_resp", {}).get("model") or model
|
||||
}
|
||||
return {"content": f"Erro MiniMax (Resposta inesperada): {json.dumps(data)}", "usage": {}}
|
||||
return {"content": f"Erro MiniMax {res.status_code}: {res.text}", "usage": {}}
|
||||
except Exception as e:
|
||||
return {"content": f"Erro MiniMax: {str(e)}", "usage": {}}
|
||||
|
||||
async def _call_openai_async(model: str, prompt: str, system_prompt: str = None) -> str:
|
||||
"""Chama API da OpenAI via httpx (async)."""
|
||||
api_key = get_api_key("openai")
|
||||
|
||||
Reference in New Issue
Block a user