From 08bc354fa5f1483dff289b32f7af9afcca507e7c Mon Sep 17 00:00:00 2001 From: MacRimi Date: Sun, 22 Mar 2026 20:47:51 +0100 Subject: [PATCH] Update notification service --- AppImage/config/verified_ai_models.json | 8 ++-- .../scripts/ai_providers/gemini_provider.py | 44 ++++++++++++++++--- 2 files changed, 43 insertions(+), 9 deletions(-) diff --git a/AppImage/config/verified_ai_models.json b/AppImage/config/verified_ai_models.json index f29e3841..e6d446cc 100644 --- a/AppImage/config/verified_ai_models.json +++ b/AppImage/config/verified_ai_models.json @@ -17,10 +17,12 @@ "gemini": { "models": [ - "gemini-2.5-flash-lite", - "gemini-flash-lite-latest" + "gemini-2.5-flash", + "gemini-2.5-pro", + "gemini-2.0-flash-lite" ], - "recommended": "gemini-2.5-flash-lite" + "recommended": "gemini-2.5-flash", + "_deprecated": ["gemini-2.0-flash", "gemini-1.0-pro", "gemini-pro"] }, "openai": { diff --git a/AppImage/scripts/ai_providers/gemini_provider.py b/AppImage/scripts/ai_providers/gemini_provider.py index 04b91d05..62a3e1c7 100644 --- a/AppImage/scripts/ai_providers/gemini_provider.py +++ b/AppImage/scripts/ai_providers/gemini_provider.py @@ -24,6 +24,13 @@ class GeminiProvider(AIProvider): 'learnlm', 'imagen', 'veo' ] + # Deprecated models that may still appear in API but return 404 + DEPRECATED_MODELS = [ + 'gemini-2.0-flash', + 'gemini-1.0-pro', + 'gemini-pro', + ] + def list_models(self) -> List[str]: """List available Gemini models that support generateContent. @@ -65,6 +72,10 @@ class GeminiProvider(AIProvider): if any(pattern in model_lower for pattern in self.EXCLUDED_PATTERNS): continue + # Exclude deprecated models that return 404 + if model_id in self.DEPRECATED_MODELS: + continue + models.append(model_id) # Sort with recommended models first (flash-lite, flash, pro) @@ -132,11 +143,32 @@ class GeminiProvider(AIProvider): try: # Gemini returns candidates array with content parts candidates = result.get('candidates', []) - if candidates: - content = candidates[0].get('content', {}) - parts = content.get('parts', []) - if parts: - return parts[0].get('text', '').strip() - raise AIProviderError("No content in response") + if not candidates: + # Check for blocked content or other issues + prompt_feedback = result.get('promptFeedback', {}) + block_reason = prompt_feedback.get('blockReason', '') + if block_reason: + raise AIProviderError(f"Content blocked by Gemini: {block_reason}") + raise AIProviderError("No candidates in response - model may be overloaded") + + # Check if response was blocked + finish_reason = candidates[0].get('finishReason', '') + if finish_reason == 'SAFETY': + safety_ratings = candidates[0].get('safetyRatings', []) + blocked_categories = [r.get('category', 'UNKNOWN') for r in safety_ratings + if r.get('blocked', False)] + raise AIProviderError(f"Response blocked by safety filter: {blocked_categories}") + + content = candidates[0].get('content', {}) + parts = content.get('parts', []) + if parts: + text = parts[0].get('text', '').strip() + if text: + return text + + # No text content - provide detailed error + raise AIProviderError(f"No text in response (finishReason: {finish_reason})") + except AIProviderError: + raise except (KeyError, IndexError) as e: raise AIProviderError(f"Unexpected response format: {e}")