Update notification-settings.tsx

This commit is contained in:
MacRimi
2026-03-19 19:36:39 +01:00
parent 1949aeb10f
commit 9484f78fb6
+11 -12
View File
@@ -605,12 +605,15 @@ export function NotificationSettings() {
method: "POST", method: "POST",
body: JSON.stringify({ ollama_url: url }), body: JSON.stringify({ ollama_url: url }),
}) })
if (data.success) { if (data.success && data.models && data.models.length > 0) {
setOllamaModels(data.models) setOllamaModels(data.models)
// If current model not in list and there are models available, select first one // Auto-select first model if current selection is empty or not in the list
if (data.models.length > 0 && !data.models.includes(config.ai_model)) { updateConfig(prev => {
updateConfig(p => ({ ...p, ai_model: data.models[0] })) if (!prev.ai_model || !data.models.includes(prev.ai_model)) {
} return { ...prev, ai_model: data.models[0] }
}
return prev
})
} else { } else {
setOllamaModels([]) setOllamaModels([])
} }
@@ -619,14 +622,10 @@ export function NotificationSettings() {
} finally { } finally {
setLoadingOllamaModels(false) setLoadingOllamaModels(false)
} }
}, [config.ai_model]) }, [])
// Fetch Ollama models when provider is ollama and URL changes // Note: We removed the automatic useEffect that fetched models on URL change
useEffect(() => { // because it caused infinite loops. Users now use the "Load" button explicitly.
if (config.ai_provider === 'ollama' && config.ai_ollama_url) {
fetchOllamaModels(config.ai_ollama_url)
}
}, [config.ai_provider, config.ai_ollama_url, fetchOllamaModels])
const handleTestAI = async () => { const handleTestAI = async () => {
setTestingAI(true) setTestingAI(true)