feat: add Ollama local provider support

- Added Ollama (local) as AI provider option
- Configure VPS endpoint for Ollama connection
- Auto-detect available models from Ollama server
- Support for vision-capable models (llama3.2-vision, etc)
This commit is contained in:
2026-04-04 19:46:14 +00:00
parent 97eb42c243
commit a395f0d696
5 changed files with 213 additions and 77 deletions

View File

@@ -347,7 +347,67 @@ export const analyzeCertificate = async (options: AnalyzeOptions): Promise<Repor
return analyzeWithAnthropic(file, apiKey, model);
case 'azure':
return analyzeWithAzure(file, apiKey, endpoint!, model);
case 'ollama':
return analyzeWithOllama(file, endpoint!, model);
default:
throw new Error(`Provedor não suportado: ${provider}`);
}
};
export const analyzeWithOllama = async (file: File, endpoint: string, model: string = 'llama3.2-vision'): Promise<ReportData> => {
if (!endpoint) {
throw new Error("O endpoint do Ollama é necessário. Configure o endereço da sua VPS.");
}
const base64Data = await new Promise<string>((resolve) => {
const reader = new FileReader();
reader.onloadend = () => resolve((reader.result as string).split(',')[1]);
reader.readAsDataURL(file);
});
const url = `${endpoint}/api/chat`;
const response = await fetch(url, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify({
model,
messages: [
{
role: 'user',
content: [
{
type: 'image',
data: base64Data
},
{
type: 'text',
text: PROMPT_BASE + "\n\nRetorne apenas JSON válido sem formatação markdown."
}
]
}
],
format: 'json',
options: {
temperature: 0.1,
num_predict: 4096
}
})
});
if (!response.ok) {
const error = await response.text();
throw new Error(`Erro do Ollama: ${error}`);
}
const data = await response.json();
const content = data.message?.content;
if (!content) {
throw new Error("Resposta vazia do Ollama");
}
return cleanAndParseJson(content) as ReportData;
};