feat: add Ollama local provider support
- Added Ollama (local) as AI provider option - Configure VPS endpoint for Ollama connection - Auto-detect available models from Ollama server - Support for vision-capable models (llama3.2-vision, etc)
This commit is contained in:
@@ -1,4 +1,6 @@
|
||||
export type AIProvider = 'gemini' | 'openai' | 'anthropic' | 'azure';
|
||||
import { AIProvider } from './providers';
|
||||
|
||||
export type AIProvider = 'gemini' | 'openai' | 'anthropic' | 'azure' | 'ollama';
|
||||
|
||||
export interface ProviderConfig {
|
||||
id: AIProvider;
|
||||
@@ -38,5 +40,13 @@ export const PROVIDERS: ProviderConfig[] = [
|
||||
models: ['gpt-4', 'gpt-4-32k', 'gpt-35-turbo'],
|
||||
requiresEndpoint: true,
|
||||
defaultModel: 'gpt-4'
|
||||
},
|
||||
{
|
||||
id: 'ollama',
|
||||
name: 'Ollama (Local)',
|
||||
description: 'LLMs rodando localmente na sua VPS',
|
||||
models: [],
|
||||
requiresEndpoint: true,
|
||||
defaultModel: 'llama3.2-vision'
|
||||
}
|
||||
];
|
||||
Reference in New Issue
Block a user