feat: add Ollama local provider support

- Added Ollama (local) as AI provider option
- Configure VPS endpoint for Ollama connection
- Auto-detect available models from Ollama server
- Support for vision-capable models (llama3.2-vision, etc)
This commit is contained in:
2026-04-04 19:46:14 +00:00
parent 97eb42c243
commit a395f0d696
5 changed files with 213 additions and 77 deletions

View File

@@ -22,6 +22,8 @@ export const testApiKey = async (provider: AIProvider, apiKey: string, endpoint?
return await testAnthropic(apiKey);
case 'azure':
return await testAzure(apiKey, endpoint);
case 'ollama':
return await testOllama(endpoint);
default:
return { success: false, error: 'Provedor não suportado' };
}
@@ -33,6 +35,44 @@ export const testApiKey = async (provider: AIProvider, apiKey: string, endpoint?
}
};
const testOllama = async (endpoint?: string): Promise<TestResult> => {
if (!endpoint) {
return { success: false, error: 'Endereço do Ollama é obrigatório (ex: http://192.168.1.100:11434)' };
}
try {
const response = await fetch(`${endpoint}/api/tags`);
if (!response.ok) {
return { success: false, error: 'Não foi possível conectar ao Ollama. Verifique o endereço.' };
}
const data = await response.json();
const models = data.models?.map((m: any) => ({
id: m.name,
name: m.name
})) || [];
const visionModels = models.filter((m: ModelInfo) =>
m.id.includes('vision') ||
m.id.includes('llama3') ||
m.id.includes('qwen2') ||
m.id.includes('moondream')
);
if (visionModels.length > 0) {
return { success: true, models: visionModels };
}
return {
success: true,
models: models.length > 0 ? models : [{ id: 'llama3.2', name: 'Llama 3.2 (Padrão)' }]
};
} catch (error: any) {
return { success: false, error: 'Não foi possível conectar ao Ollama. Verifique o endereço e certifique-se que o Ollama está rodando.' };
}
};
const testGemini = async (apiKey: string): Promise<TestResult> => {
const { GoogleGenAI } = await import('@google/genai');