diff --git a/ai_agent.py b/ai_agent.py index bef9ea1..3b562ec 100644 --- a/ai_agent.py +++ b/ai_agent.py @@ -15,7 +15,7 @@ async def get_llm_response_async(prompt: str, provider: str, cfg: dict) -> str: elif provider == "ollama": model = os.getenv("OLLAMA_MODEL", "llama3.2:1b") else: - model = cfg.get("model") or "gemini-2.5-flash" + model = cfg.get("model") or "qwen/qwen-2.5-72b-instruct" return await call_llm(provider, model, prompt) diff --git a/llm_providers.py b/llm_providers.py index 7b1ca18..c31f558 100644 --- a/llm_providers.py +++ b/llm_providers.py @@ -72,7 +72,7 @@ def set_planner(provider: str = None, model: str = None) -> dict: } save_config(cfg) - return cfg["orchestrator"].get("planner", {"provider": "gemini", "model": "gemini-2.5-flash"}) + return cfg["orchestrator"].get("planner", {"provider": "openrouter", "model": "qwen/qwen-2.5-72b-instruct"}) def set_executor(provider: str = None, model: str = None) -> dict: """Define o provider e modelo do executor.""" @@ -363,7 +363,7 @@ def check_ollama_connection() -> dict: def get_planner_llm() -> tuple: cfg = get_orchestrator_config() - planner = cfg.get("planner", {"provider": "gemini", "model": "gemini-2.5-flash"}) + planner = cfg.get("planner", {"provider": "openrouter", "model": "qwen/qwen-2.5-72b-instruct"}) return planner["provider"], planner["model"] def get_executor_llm() -> tuple: