feat: add Ollama local provider support

- Added Ollama (local) as AI provider option
- Configure VPS endpoint for Ollama connection
- Auto-detect available models from Ollama server
- Support for vision-capable models (llama3.2-vision, etc)
This commit is contained in:
2026-04-04 19:46:14 +00:00
parent 97eb42c243
commit a395f0d696
5 changed files with 213 additions and 77 deletions

View File

@@ -1,4 +1,6 @@
export type AIProvider = 'gemini' | 'openai' | 'anthropic' | 'azure';
import { AIProvider } from './providers';
export type AIProvider = 'gemini' | 'openai' | 'anthropic' | 'azure' | 'ollama';
export interface ProviderConfig {
id: AIProvider;
@@ -38,5 +40,13 @@ export const PROVIDERS: ProviderConfig[] = [
models: ['gpt-4', 'gpt-4-32k', 'gpt-35-turbo'],
requiresEndpoint: true,
defaultModel: 'gpt-4'
},
{
id: 'ollama',
name: 'Ollama (Local)',
description: 'LLMs rodando localmente na sua VPS',
models: [],
requiresEndpoint: true,
defaultModel: 'llama3.2-vision'
}
];