Update notification service

This commit is contained in:
MacRimi
2026-03-17 14:07:47 +01:00
parent 9a51a9e635
commit 0cb8900374
14 changed files with 1390 additions and 112 deletions

View File

@@ -8,6 +8,7 @@ import { Label } from "./ui/label"
import { Badge } from "./ui/badge"
import { Select, SelectContent, SelectItem, SelectTrigger, SelectValue } from "./ui/select"
import { Dialog, DialogContent, DialogHeader, DialogTitle } from "./ui/dialog"
import { fetchApi } from "../lib/api-config"
import {
Bell, BellOff, Send, CheckCircle2, XCircle, Loader2,
@@ -57,6 +58,9 @@ interface NotificationConfig {
ai_provider: string
ai_api_key: string
ai_model: string
ai_language: string
ai_ollama_url: string
channel_ai_detail: Record<string, string>
hostname: string
webhook_secret: string
webhook_allowed_ips: string
@@ -102,8 +106,63 @@ const EVENT_CATEGORIES = [
const CHANNEL_TYPES = ["telegram", "gotify", "discord", "email"] as const
const AI_PROVIDERS = [
{ value: "openai", label: "OpenAI" },
{ value: "groq", label: "Groq" },
{
value: "groq",
label: "Groq",
model: "llama-3.3-70b-versatile",
description: "Very fast, generous free tier (30 req/min). Ideal to start."
},
{
value: "openai",
label: "OpenAI",
model: "gpt-4o-mini",
description: "Industry standard. Very accurate and widely used."
},
{
value: "anthropic",
label: "Anthropic (Claude)",
model: "claude-3-haiku-20240307",
description: "Excellent for writing and translation. Fast and economical."
},
{
value: "gemini",
label: "Google Gemini",
model: "gemini-1.5-flash",
description: "Free tier available, great quality/price ratio."
},
{
value: "ollama",
label: "Ollama (Local)",
model: "llama3.2",
description: "100% local execution. No costs, total privacy, no internet required."
},
{
value: "openrouter",
label: "OpenRouter",
model: "meta-llama/llama-3.3-70b-instruct",
description: "Aggregator with access to 100+ models using a single API key. Maximum flexibility."
},
]
const AI_LANGUAGES = [
{ value: "en", label: "English" },
{ value: "es", label: "Espanol" },
{ value: "fr", label: "Francais" },
{ value: "de", label: "Deutsch" },
{ value: "pt", label: "Portugues" },
{ value: "it", label: "Italiano" },
{ value: "ru", label: "Russkiy" },
{ value: "sv", label: "Svenska" },
{ value: "no", label: "Norsk" },
{ value: "ja", label: "Nihongo" },
{ value: "zh", label: "Zhongwen" },
{ value: "nl", label: "Nederlands" },
]
const AI_DETAIL_LEVELS = [
{ value: "brief", label: "Brief", desc: "2-3 lines, essential only" },
{ value: "standard", label: "Standard", desc: "Concise with basic context" },
{ value: "detailed", label: "Detailed", desc: "Complete technical details" },
]
const DEFAULT_CONFIG: NotificationConfig = {
@@ -128,9 +187,17 @@ const DEFAULT_CONFIG: NotificationConfig = {
email: { categories: {}, events: {} },
},
ai_enabled: false,
ai_provider: "openai",
ai_provider: "groq",
ai_api_key: "",
ai_model: "",
ai_language: "en",
ai_ollama_url: "http://localhost:11434",
channel_ai_detail: {
telegram: "brief",
gotify: "brief",
discord: "brief",
email: "detailed",
},
hostname: "",
webhook_secret: "",
webhook_allowed_ips: "",
@@ -155,6 +222,9 @@ export function NotificationSettings() {
const [hasChanges, setHasChanges] = useState(false)
const [expandedCategories, setExpandedCategories] = useState<Set<string>>(new Set())
const [originalConfig, setOriginalConfig] = useState<NotificationConfig>(DEFAULT_CONFIG)
const [showProviderInfo, setShowProviderInfo] = useState(false)
const [testingAI, setTestingAI] = useState(false)
const [aiTestResult, setAiTestResult] = useState<{ success: boolean; message: string; model?: string } | null>(null)
const [webhookSetup, setWebhookSetup] = useState<{
status: "idle" | "running" | "success" | "failed"
fallback_commands: string[]
@@ -373,6 +443,8 @@ export function NotificationSettings() {
ai_provider: cfg.ai_provider,
ai_api_key: cfg.ai_api_key,
ai_model: cfg.ai_model,
ai_language: cfg.ai_language,
ai_ollama_url: cfg.ai_ollama_url,
hostname: cfg.hostname,
webhook_secret: cfg.webhook_secret,
webhook_allowed_ips: cfg.webhook_allowed_ips,
@@ -402,6 +474,12 @@ export function NotificationSettings() {
}
}
}
// Per-channel AI detail level
if (cfg.channel_ai_detail) {
for (const [chName, level] of Object.entries(cfg.channel_ai_detail)) {
flat[`${chName}.ai_detail_level`] = level
}
}
return flat
}
@@ -493,6 +571,28 @@ export function NotificationSettings() {
}
}
const handleTestAI = async () => {
setTestingAI(true)
setAiTestResult(null)
try {
const data = await fetchApi<{ success: boolean; message: string; model: string }>("/api/notifications/test-ai", {
method: "POST",
body: JSON.stringify({
provider: config.ai_provider,
api_key: config.ai_api_key,
model: config.ai_model,
ollama_url: config.ai_ollama_url,
}),
})
setAiTestResult(data)
} catch (err) {
setAiTestResult({ success: false, message: String(err) })
} finally {
setTestingAI(false)
setTimeout(() => setAiTestResult(null), 8000)
}
}
const handleClearHistory = async () => {
try {
await fetchApi("/api/notifications/history", { method: "DELETE" })
@@ -1228,8 +1328,17 @@ export function NotificationSettings() {
{config.ai_enabled && (
<>
{/* Provider + Info button */}
<div className="space-y-1.5">
<Label className="text-[11px] text-muted-foreground">Provider</Label>
<div className="flex items-center gap-2">
<Label className="text-[11px] text-muted-foreground">Provider</Label>
<button
onClick={() => setShowProviderInfo(true)}
className="text-[10px] text-blue-400 hover:text-blue-300 transition-colors"
>
+info
</button>
</div>
<Select
value={config.ai_provider}
onValueChange={v => updateConfig(p => ({ ...p, ai_provider: v }))}
@@ -1245,39 +1354,143 @@ export function NotificationSettings() {
</SelectContent>
</Select>
</div>
<div className="space-y-1.5">
<Label className="text-[11px] text-muted-foreground">API Key</Label>
<div className="flex items-center gap-1.5">
{/* Ollama URL (conditional) */}
{config.ai_provider === "ollama" && (
<div className="space-y-1.5">
<Label className="text-[11px] text-muted-foreground">Ollama URL</Label>
<Input
type={showSecrets["ai_key"] ? "text" : "password"}
className="h-7 text-xs font-mono"
placeholder="sk-..."
value={config.ai_api_key}
onChange={e => updateConfig(p => ({ ...p, ai_api_key: e.target.value }))}
placeholder="http://localhost:11434"
value={config.ai_ollama_url}
onChange={e => updateConfig(p => ({ ...p, ai_ollama_url: e.target.value }))}
disabled={!editMode}
/>
<button
className="h-7 w-7 flex items-center justify-center rounded-md border border-border hover:bg-muted transition-colors shrink-0"
onClick={() => toggleSecret("ai_key")}
>
{showSecrets["ai_key"] ? <EyeOff className="h-3 w-3" /> : <Eye className="h-3 w-3" />}
</button>
</div>
</div>
)}
{/* API Key (not shown for Ollama) */}
{config.ai_provider !== "ollama" && (
<div className="space-y-1.5">
<Label className="text-[11px] text-muted-foreground">API Key</Label>
<div className="flex items-center gap-1.5">
<Input
type={showSecrets["ai_key"] ? "text" : "password"}
className="h-7 text-xs font-mono"
placeholder="sk-..."
value={config.ai_api_key}
onChange={e => updateConfig(p => ({ ...p, ai_api_key: e.target.value }))}
disabled={!editMode}
/>
<button
className="h-7 w-7 flex items-center justify-center rounded-md border border-border hover:bg-muted transition-colors shrink-0"
onClick={() => toggleSecret("ai_key")}
>
{showSecrets["ai_key"] ? <EyeOff className="h-3 w-3" /> : <Eye className="h-3 w-3" />}
</button>
</div>
</div>
)}
{/* Model (optional) */}
<div className="space-y-1.5">
<Label className="text-[11px] text-muted-foreground">Model (optional)</Label>
<Input
className="h-7 text-xs font-mono"
placeholder={config.ai_provider === "openai" ? "gpt-4o-mini" : "llama-3.3-70b-versatile"}
placeholder={AI_PROVIDERS.find(p => p.value === config.ai_provider)?.model || ""}
value={config.ai_model}
onChange={e => updateConfig(p => ({ ...p, ai_model: e.target.value }))}
disabled={!editMode}
/>
</div>
{/* Language selector */}
<div className="space-y-1.5">
<Label className="text-[11px] text-muted-foreground">Language</Label>
<Select
value={config.ai_language}
onValueChange={v => updateConfig(p => ({ ...p, ai_language: v }))}
disabled={!editMode}
>
<SelectTrigger className="h-7 text-xs">
<SelectValue />
</SelectTrigger>
<SelectContent>
{AI_LANGUAGES.map(l => (
<SelectItem key={l.value} value={l.value}>{l.label}</SelectItem>
))}
</SelectContent>
</Select>
</div>
{/* Test Connection button */}
<button
onClick={handleTestAI}
disabled={!editMode || testingAI || (config.ai_provider !== "ollama" && !config.ai_api_key)}
className="w-full h-7 flex items-center justify-center gap-1.5 rounded-md text-xs font-medium bg-purple-600 hover:bg-purple-700 text-white disabled:opacity-50 disabled:cursor-not-allowed transition-colors"
>
{testingAI ? (
<><Loader2 className="h-3 w-3 animate-spin" /> Testing...</>
) : (
<><Zap className="h-3 w-3" /> Test Connection</>
)}
</button>
{/* Test result */}
{aiTestResult && (
<div className={`flex items-start gap-2 p-2 rounded-md ${
aiTestResult.success
? "bg-green-500/10 border border-green-500/20"
: "bg-red-500/10 border border-red-500/20"
}`}>
{aiTestResult.success
? <CheckCircle2 className="h-3.5 w-3.5 text-green-400 shrink-0 mt-0.5" />
: <XCircle className="h-3.5 w-3.5 text-red-400 shrink-0 mt-0.5" />
}
<p className={`text-[10px] leading-relaxed ${
aiTestResult.success ? "text-green-400/90" : "text-red-400/90"
}`}>
{aiTestResult.message}
{aiTestResult.model && ` (${aiTestResult.model})`}
</p>
</div>
)}
{/* Per-channel detail level */}
<div className="space-y-2 pt-2 border-t border-border/50">
<Label className="text-[11px] text-muted-foreground">Detail Level per Channel</Label>
<div className="grid grid-cols-2 gap-2">
{CHANNEL_TYPES.map(ch => (
<div key={ch} className="flex items-center justify-between gap-2 px-2 py-1 rounded bg-muted/30">
<span className="text-[10px] text-muted-foreground capitalize">{ch}</span>
<Select
value={config.channel_ai_detail?.[ch] || "standard"}
onValueChange={v => updateConfig(p => ({
...p,
channel_ai_detail: { ...p.channel_ai_detail, [ch]: v }
}))}
disabled={!editMode}
>
<SelectTrigger className="h-5 w-[80px] text-[10px] px-1.5">
<SelectValue />
</SelectTrigger>
<SelectContent>
{AI_DETAIL_LEVELS.map(l => (
<SelectItem key={l.value} value={l.value} className="text-[10px]">
{l.label}
</SelectItem>
))}
</SelectContent>
</Select>
</div>
))}
</div>
</div>
<div className="flex items-start gap-2 p-2 rounded-md bg-purple-500/10 border border-purple-500/20">
<Info className="h-3.5 w-3.5 text-purple-400 shrink-0 mt-0.5" />
<p className="text-[10px] text-purple-400/90 leading-relaxed">
AI enhancement is optional. When enabled, notifications include contextual analysis and recommended actions. If the AI service is unavailable, standard templates are used as fallback.
AI enhancement translates and formats notifications to your selected language. Each channel can have different detail levels. If the AI service is unavailable, standard templates are used as fallback.
</p>
</div>
</>
@@ -1301,5 +1514,35 @@ export function NotificationSettings() {
</div>
</CardContent>
</Card>
{/* AI Provider Information Modal */}
<Dialog open={showProviderInfo} onOpenChange={setShowProviderInfo}>
<DialogContent className="max-w-md">
<DialogHeader>
<DialogTitle className="text-base">AI Providers Information</DialogTitle>
</DialogHeader>
<div className="space-y-3 max-h-[60vh] overflow-y-auto pr-1">
{AI_PROVIDERS.map(provider => (
<div
key={provider.value}
className="p-3 rounded-lg bg-muted/50 border border-border hover:border-muted-foreground/40 transition-colors"
>
<div className="flex items-center justify-between">
<span className="font-medium text-sm">{provider.label}</span>
{provider.value === "ollama" && (
<Badge variant="outline" className="text-[9px] px-1.5 py-0">Local</Badge>
)}
</div>
<div className="text-[11px] text-muted-foreground mt-1">
Default model: <code className="text-[10px] bg-muted px-1 py-0.5 rounded font-mono">{provider.model}</code>
</div>
<p className="text-[11px] text-muted-foreground mt-2 leading-relaxed">
{provider.description}
</p>
</div>
))}
</div>
</DialogContent>
</Dialog>
)
}

View File

@@ -0,0 +1,111 @@
"""AI Providers for ProxMenux notification enhancement.
This module provides a pluggable architecture for different AI providers
to enhance and translate notification messages.
Supported providers:
- Groq: Fast inference, generous free tier (30 req/min)
- OpenAI: Industry standard, widely used
- Anthropic: Excellent for text generation, Claude Haiku is fast and affordable
- Gemini: Google's model, free tier available, good quality/price ratio
- Ollama: 100% local execution, no costs, complete privacy
- OpenRouter: Aggregator with access to 100+ models using a single API key
"""
from .base import AIProvider, AIProviderError
from .groq_provider import GroqProvider
from .openai_provider import OpenAIProvider
from .anthropic_provider import AnthropicProvider
from .gemini_provider import GeminiProvider
from .ollama_provider import OllamaProvider
from .openrouter_provider import OpenRouterProvider
PROVIDERS = {
'groq': GroqProvider,
'openai': OpenAIProvider,
'anthropic': AnthropicProvider,
'gemini': GeminiProvider,
'ollama': OllamaProvider,
'openrouter': OpenRouterProvider,
}
# Provider metadata for UI display
PROVIDER_INFO = {
'groq': {
'name': 'Groq',
'default_model': 'llama-3.3-70b-versatile',
'description': 'Fast inference, generous free tier (30 req/min). Ideal to get started.',
'requires_api_key': True,
},
'openai': {
'name': 'OpenAI',
'default_model': 'gpt-4o-mini',
'description': 'Industry standard. Very accurate and widely used.',
'requires_api_key': True,
},
'anthropic': {
'name': 'Anthropic (Claude)',
'default_model': 'claude-3-haiku-20240307',
'description': 'Excellent for writing and translation. Fast and affordable.',
'requires_api_key': True,
},
'gemini': {
'name': 'Google Gemini',
'default_model': 'gemini-1.5-flash',
'description': 'Free tier available, very good quality/price ratio.',
'requires_api_key': True,
},
'ollama': {
'name': 'Ollama (Local)',
'default_model': 'llama3.2',
'description': '100% local execution. No costs, complete privacy, no internet required.',
'requires_api_key': False,
},
'openrouter': {
'name': 'OpenRouter',
'default_model': 'meta-llama/llama-3.3-70b-instruct',
'description': 'Aggregator with access to 100+ models using a single API key. Maximum flexibility.',
'requires_api_key': True,
},
}
def get_provider(name: str, **kwargs) -> AIProvider:
"""Factory function to get provider instance.
Args:
name: Provider name (groq, openai, anthropic, gemini, ollama, openrouter)
**kwargs: Provider-specific arguments (api_key, model, base_url)
Returns:
AIProvider instance
Raises:
AIProviderError: If provider name is unknown
"""
if name not in PROVIDERS:
raise AIProviderError(f"Unknown provider: {name}. Available: {list(PROVIDERS.keys())}")
return PROVIDERS[name](**kwargs)
def get_provider_info(name: str = None) -> dict:
"""Get provider metadata for UI display.
Args:
name: Optional provider name. If None, returns all providers info.
Returns:
Provider info dict or dict of all providers
"""
if name:
return PROVIDER_INFO.get(name, {})
return PROVIDER_INFO
__all__ = [
'AIProvider',
'AIProviderError',
'PROVIDERS',
'PROVIDER_INFO',
'get_provider',
'get_provider_info',
]

View File

@@ -0,0 +1,65 @@
"""Anthropic (Claude) provider implementation.
Anthropic's Claude models are excellent for text generation and translation.
Claude Haiku is particularly fast and affordable for notification enhancement.
"""
from typing import Optional
from .base import AIProvider, AIProviderError
class AnthropicProvider(AIProvider):
"""Anthropic provider using their Messages API."""
NAME = "anthropic"
DEFAULT_MODEL = "claude-3-haiku-20240307"
REQUIRES_API_KEY = True
API_URL = "https://api.anthropic.com/v1/messages"
API_VERSION = "2023-06-01"
def generate(self, system_prompt: str, user_message: str,
max_tokens: int = 200) -> Optional[str]:
"""Generate a response using Anthropic's API.
Note: Anthropic uses a different API format than OpenAI.
The system prompt goes in a separate field, not in messages.
Args:
system_prompt: System instructions
user_message: User message to process
max_tokens: Maximum response length
Returns:
Generated text or None if failed
Raises:
AIProviderError: If API key is missing or request fails
"""
if not self.api_key:
raise AIProviderError("API key required for Anthropic")
# Anthropic uses a different format - system is a top-level field
payload = {
'model': self.model,
'system': system_prompt,
'messages': [
{'role': 'user', 'content': user_message},
],
'max_tokens': max_tokens,
}
headers = {
'Content-Type': 'application/json',
'x-api-key': self.api_key,
'anthropic-version': self.API_VERSION,
}
result = self._make_request(self.API_URL, payload, headers)
try:
# Anthropic returns content as array of content blocks
content = result['content']
if isinstance(content, list) and len(content) > 0:
return content[0].get('text', '').strip()
return str(content).strip()
except (KeyError, IndexError) as e:
raise AIProviderError(f"Unexpected response format: {e}")

View File

@@ -0,0 +1,141 @@
"""Base class for AI providers."""
from abc import ABC, abstractmethod
from typing import Optional, Dict, Any
class AIProviderError(Exception):
"""Exception for AI provider errors."""
pass
class AIProvider(ABC):
"""Abstract base class for AI providers.
All provider implementations must inherit from this class and implement
the generate() method.
"""
# Provider metadata (override in subclasses)
NAME = "base"
DEFAULT_MODEL = ""
REQUIRES_API_KEY = True
def __init__(self, api_key: str = "", model: str = "", base_url: str = ""):
"""Initialize the AI provider.
Args:
api_key: API key for authentication (not required for local providers)
model: Model name to use (defaults to DEFAULT_MODEL if empty)
base_url: Base URL for API calls (used by Ollama and custom endpoints)
"""
self.api_key = api_key
self.model = model or self.DEFAULT_MODEL
self.base_url = base_url
@abstractmethod
def generate(self, system_prompt: str, user_message: str,
max_tokens: int = 200) -> Optional[str]:
"""Generate a response from the AI model.
Args:
system_prompt: System instructions for the model
user_message: User message/query to process
max_tokens: Maximum tokens in the response
Returns:
Generated text or None if failed
Raises:
AIProviderError: If there's an error communicating with the provider
"""
pass
def test_connection(self) -> Dict[str, Any]:
"""Test the connection to the AI provider.
Sends a simple test message to verify the provider is accessible
and the API key is valid.
Returns:
Dictionary with:
- success: bool indicating if connection succeeded
- message: Human-readable status message
- model: Model name being used
"""
try:
response = self.generate(
system_prompt="You are a test assistant. Respond with exactly: CONNECTION_OK",
user_message="Test connection",
max_tokens=20
)
if response:
# Check if response contains our expected text
if "CONNECTION_OK" in response.upper() or "CONNECTION" in response.upper():
return {
'success': True,
'message': 'Connection successful',
'model': self.model
}
# Even if different response, connection worked
return {
'success': True,
'message': f'Connected (response received)',
'model': self.model
}
return {
'success': False,
'message': 'No response received from provider',
'model': self.model
}
except AIProviderError as e:
return {
'success': False,
'message': str(e),
'model': self.model
}
except Exception as e:
return {
'success': False,
'message': f'Unexpected error: {str(e)}',
'model': self.model
}
def _make_request(self, url: str, payload: dict, headers: dict,
timeout: int = 15) -> dict:
"""Make HTTP request to AI provider API.
Args:
url: API endpoint URL
payload: JSON payload to send
headers: HTTP headers
timeout: Request timeout in seconds
Returns:
Parsed JSON response
Raises:
AIProviderError: If request fails
"""
import json
import urllib.request
import urllib.error
data = json.dumps(payload).encode('utf-8')
req = urllib.request.Request(url, data=data, headers=headers, method='POST')
try:
with urllib.request.urlopen(req, timeout=timeout) as resp:
return json.loads(resp.read().decode('utf-8'))
except urllib.error.HTTPError as e:
error_body = ""
try:
error_body = e.read().decode('utf-8')
except Exception:
pass
raise AIProviderError(f"HTTP {e.code}: {error_body or e.reason}")
except urllib.error.URLError as e:
raise AIProviderError(f"Connection error: {e.reason}")
except json.JSONDecodeError as e:
raise AIProviderError(f"Invalid JSON response: {e}")
except Exception as e:
raise AIProviderError(f"Request failed: {str(e)}")

View File

@@ -0,0 +1,74 @@
"""Google Gemini provider implementation.
Google's Gemini models offer a free tier and excellent quality/price ratio.
Gemini 1.5 Flash is particularly fast and cost-effective.
"""
from typing import Optional
from .base import AIProvider, AIProviderError
class GeminiProvider(AIProvider):
"""Google Gemini provider using the Generative Language API."""
NAME = "gemini"
DEFAULT_MODEL = "gemini-1.5-flash"
REQUIRES_API_KEY = True
API_BASE = "https://generativelanguage.googleapis.com/v1beta/models"
def generate(self, system_prompt: str, user_message: str,
max_tokens: int = 200) -> Optional[str]:
"""Generate a response using Google's Gemini API.
Note: Gemini uses a different API format. System instructions
go in a separate systemInstruction field.
Args:
system_prompt: System instructions
user_message: User message to process
max_tokens: Maximum response length
Returns:
Generated text or None if failed
Raises:
AIProviderError: If API key is missing or request fails
"""
if not self.api_key:
raise AIProviderError("API key required for Gemini")
url = f"{self.API_BASE}/{self.model}:generateContent?key={self.api_key}"
# Gemini uses a specific format with contents array
payload = {
'systemInstruction': {
'parts': [{'text': system_prompt}]
},
'contents': [
{
'role': 'user',
'parts': [{'text': user_message}]
}
],
'generationConfig': {
'maxOutputTokens': max_tokens,
'temperature': 0.3,
}
}
headers = {
'Content-Type': 'application/json',
}
result = self._make_request(url, payload, headers)
try:
# Gemini returns candidates array with content parts
candidates = result.get('candidates', [])
if candidates:
content = candidates[0].get('content', {})
parts = content.get('parts', [])
if parts:
return parts[0].get('text', '').strip()
raise AIProviderError("No content in response")
except (KeyError, IndexError) as e:
raise AIProviderError(f"Unexpected response format: {e}")

View File

@@ -0,0 +1,56 @@
"""Groq AI provider implementation.
Groq provides fast inference with a generous free tier (30 requests/minute).
Uses the OpenAI-compatible API format.
"""
from typing import Optional
from .base import AIProvider, AIProviderError
class GroqProvider(AIProvider):
"""Groq AI provider using their OpenAI-compatible API."""
NAME = "groq"
DEFAULT_MODEL = "llama-3.3-70b-versatile"
REQUIRES_API_KEY = True
API_URL = "https://api.groq.com/openai/v1/chat/completions"
def generate(self, system_prompt: str, user_message: str,
max_tokens: int = 200) -> Optional[str]:
"""Generate a response using Groq's API.
Args:
system_prompt: System instructions
user_message: User message to process
max_tokens: Maximum response length
Returns:
Generated text or None if failed
Raises:
AIProviderError: If API key is missing or request fails
"""
if not self.api_key:
raise AIProviderError("API key required for Groq")
payload = {
'model': self.model,
'messages': [
{'role': 'system', 'content': system_prompt},
{'role': 'user', 'content': user_message},
],
'max_tokens': max_tokens,
'temperature': 0.3,
}
headers = {
'Content-Type': 'application/json',
'Authorization': f'Bearer {self.api_key}',
}
result = self._make_request(self.API_URL, payload, headers)
try:
return result['choices'][0]['message']['content'].strip()
except (KeyError, IndexError) as e:
raise AIProviderError(f"Unexpected response format: {e}")

View File

@@ -0,0 +1,118 @@
"""Ollama provider implementation.
Ollama enables 100% local AI execution with no costs and complete privacy.
No internet connection required - perfect for sensitive enterprise environments.
"""
from typing import Optional
from .base import AIProvider, AIProviderError
class OllamaProvider(AIProvider):
"""Ollama provider for local AI execution."""
NAME = "ollama"
DEFAULT_MODEL = "llama3.2"
REQUIRES_API_KEY = False
DEFAULT_URL = "http://localhost:11434"
def __init__(self, api_key: str = "", model: str = "", base_url: str = ""):
"""Initialize Ollama provider.
Args:
api_key: Not used for Ollama (local execution)
model: Model name (default: llama3.2)
base_url: Ollama server URL (default: http://localhost:11434)
"""
super().__init__(api_key, model, base_url)
# Use default URL if not provided
if not self.base_url:
self.base_url = self.DEFAULT_URL
def generate(self, system_prompt: str, user_message: str,
max_tokens: int = 200) -> Optional[str]:
"""Generate a response using local Ollama server.
Args:
system_prompt: System instructions
user_message: User message to process
max_tokens: Maximum response length (maps to num_predict)
Returns:
Generated text or None if failed
Raises:
AIProviderError: If Ollama server is unreachable
"""
url = f"{self.base_url.rstrip('/')}/api/chat"
payload = {
'model': self.model,
'messages': [
{'role': 'system', 'content': system_prompt},
{'role': 'user', 'content': user_message},
],
'stream': False,
'options': {
'num_predict': max_tokens,
'temperature': 0.3,
}
}
headers = {
'Content-Type': 'application/json',
}
try:
result = self._make_request(url, payload, headers, timeout=30)
except AIProviderError as e:
if "Connection" in str(e) or "refused" in str(e).lower():
raise AIProviderError(
f"Cannot connect to Ollama at {self.base_url}. "
"Make sure Ollama is running (ollama serve)"
)
raise
try:
message = result.get('message', {})
return message.get('content', '').strip()
except (KeyError, AttributeError) as e:
raise AIProviderError(f"Unexpected response format: {e}")
def test_connection(self):
"""Test connection to Ollama server.
Also checks if the specified model is available.
"""
import json
import urllib.request
import urllib.error
# First check if server is running
try:
url = f"{self.base_url.rstrip('/')}/api/tags"
req = urllib.request.Request(url, method='GET')
with urllib.request.urlopen(req, timeout=5) as resp:
data = json.loads(resp.read().decode('utf-8'))
models = [m.get('name', '').split(':')[0] for m in data.get('models', [])]
if self.model not in models and f"{self.model}:latest" not in [m.get('name', '') for m in data.get('models', [])]:
return {
'success': False,
'message': f"Model '{self.model}' not found. Available: {', '.join(models[:5])}...",
'model': self.model
}
except urllib.error.URLError:
return {
'success': False,
'message': f"Cannot connect to Ollama at {self.base_url}. Make sure Ollama is running.",
'model': self.model
}
except Exception as e:
return {
'success': False,
'message': f"Error checking Ollama: {str(e)}",
'model': self.model
}
# If server is up and model exists, do the actual test
return super().test_connection()

View File

@@ -0,0 +1,56 @@
"""OpenAI provider implementation.
OpenAI is the industry standard for AI APIs. gpt-4o-mini provides
excellent quality at a reasonable price point.
"""
from typing import Optional
from .base import AIProvider, AIProviderError
class OpenAIProvider(AIProvider):
"""OpenAI provider using their Chat Completions API."""
NAME = "openai"
DEFAULT_MODEL = "gpt-4o-mini"
REQUIRES_API_KEY = True
API_URL = "https://api.openai.com/v1/chat/completions"
def generate(self, system_prompt: str, user_message: str,
max_tokens: int = 200) -> Optional[str]:
"""Generate a response using OpenAI's API.
Args:
system_prompt: System instructions
user_message: User message to process
max_tokens: Maximum response length
Returns:
Generated text or None if failed
Raises:
AIProviderError: If API key is missing or request fails
"""
if not self.api_key:
raise AIProviderError("API key required for OpenAI")
payload = {
'model': self.model,
'messages': [
{'role': 'system', 'content': system_prompt},
{'role': 'user', 'content': user_message},
],
'max_tokens': max_tokens,
'temperature': 0.3,
}
headers = {
'Content-Type': 'application/json',
'Authorization': f'Bearer {self.api_key}',
}
result = self._make_request(self.API_URL, payload, headers)
try:
return result['choices'][0]['message']['content'].strip()
except (KeyError, IndexError) as e:
raise AIProviderError(f"Unexpected response format: {e}")

View File

@@ -0,0 +1,62 @@
"""OpenRouter provider implementation.
OpenRouter is an aggregator that provides access to 100+ AI models
using a single API key. Maximum flexibility for choosing models.
Uses OpenAI-compatible API format.
"""
from typing import Optional
from .base import AIProvider, AIProviderError
class OpenRouterProvider(AIProvider):
"""OpenRouter provider for multi-model access."""
NAME = "openrouter"
DEFAULT_MODEL = "meta-llama/llama-3.3-70b-instruct"
REQUIRES_API_KEY = True
API_URL = "https://openrouter.ai/api/v1/chat/completions"
def generate(self, system_prompt: str, user_message: str,
max_tokens: int = 200) -> Optional[str]:
"""Generate a response using OpenRouter's API.
OpenRouter uses OpenAI-compatible format with additional
headers for app identification.
Args:
system_prompt: System instructions
user_message: User message to process
max_tokens: Maximum response length
Returns:
Generated text or None if failed
Raises:
AIProviderError: If API key is missing or request fails
"""
if not self.api_key:
raise AIProviderError("API key required for OpenRouter")
payload = {
'model': self.model,
'messages': [
{'role': 'system', 'content': system_prompt},
{'role': 'user', 'content': user_message},
],
'max_tokens': max_tokens,
'temperature': 0.3,
}
headers = {
'Content-Type': 'application/json',
'Authorization': f'Bearer {self.api_key}',
'HTTP-Referer': 'https://github.com/MacRimi/ProxMenux',
'X-Title': 'ProxMenux Monitor',
}
result = self._make_request(self.API_URL, payload, headers)
try:
return result['choices'][0]['message']['content'].strip()
except (KeyError, IndexError) as e:
raise AIProviderError(f"Unexpected response format: {e}")

View File

@@ -100,6 +100,16 @@ cp "$SCRIPT_DIR/oci_manager.py" "$APP_DIR/usr/bin/" 2>/dev/null || echo "⚠️
cp "$SCRIPT_DIR/flask_oci_routes.py" "$APP_DIR/usr/bin/" 2>/dev/null || echo "⚠️ flask_oci_routes.py not found"
cp "$SCRIPT_DIR/oci/description_templates.py" "$APP_DIR/usr/bin/" 2>/dev/null || echo "⚠️ description_templates.py not found"
# Copy AI providers module for notification enhancement
echo "📋 Copying AI providers module..."
if [ -d "$SCRIPT_DIR/ai_providers" ]; then
mkdir -p "$APP_DIR/usr/bin/ai_providers"
cp "$SCRIPT_DIR/ai_providers/"*.py "$APP_DIR/usr/bin/ai_providers/"
echo "✅ AI providers module copied"
else
echo "⚠️ ai_providers directory not found"
fi
echo "📋 Adding translation support..."
cat > "$APP_DIR/usr/bin/translate_cli.py" << 'PYEOF'
#!/usr/bin/env python3

View File

@@ -101,6 +101,83 @@ def test_notification():
return jsonify({'error': str(e)}), 500
@notification_bp.route('/api/notifications/test-ai', methods=['POST'])
def test_ai_connection():
"""Test AI provider connection and configuration.
Request body:
{
"provider": "groq" | "openai" | "anthropic" | "gemini" | "ollama" | "openrouter",
"api_key": "...",
"model": "..." (optional),
"ollama_url": "http://localhost:11434" (optional, for ollama)
}
Returns:
{
"success": true/false,
"message": "Connection successful" or error message,
"model": "model used for test"
}
"""
try:
data = request.get_json() or {}
provider = data.get('provider', 'groq')
api_key = data.get('api_key', '')
model = data.get('model', '')
ollama_url = data.get('ollama_url', 'http://localhost:11434')
# Validate required fields
if provider != 'ollama' and not api_key:
return jsonify({
'success': False,
'message': 'API key is required',
'model': ''
}), 400
if provider == 'ollama' and not ollama_url:
return jsonify({
'success': False,
'message': 'Ollama URL is required',
'model': ''
}), 400
# Import and use the AI providers module
import sys
import os
script_dir = os.path.dirname(os.path.abspath(__file__))
if script_dir not in sys.path:
sys.path.insert(0, script_dir)
from ai_providers import get_provider, AIProviderError
try:
ai_provider = get_provider(
provider,
api_key=api_key,
model=model,
base_url=ollama_url
)
result = ai_provider.test_connection()
return jsonify(result)
except AIProviderError as e:
return jsonify({
'success': False,
'message': str(e),
'model': model
}), 400
except Exception as e:
return jsonify({
'success': False,
'message': f'Unexpected error: {str(e)}',
'model': ''
}), 500
@notification_bp.route('/api/notifications/status', methods=['GET'])
def get_notification_status():
"""Get notification service status."""

View File

@@ -90,6 +90,60 @@ def _hostname() -> str:
return 'proxmox'
def capture_journal_context(keywords: list, lines: int = 30,
since: str = "5 minutes ago") -> str:
"""Capture relevant journal lines for AI context enrichment.
Searches recent journald entries for lines matching any of the
provided keywords and returns them for AI analysis.
Args:
keywords: List of terms to filter (e.g., ['sdh', 'ata8', 'I/O error'])
lines: Maximum number of lines to return (default: 30)
since: Time window for journalctl (default: "5 minutes ago")
Returns:
Filtered journal output as string, or empty string if none found
Example:
context = capture_journal_context(
keywords=['sdh', 'ata8', 'exception'],
lines=30
)
"""
if not keywords:
return ""
try:
# Build grep pattern from keywords
pattern = "|".join(re.escape(k) for k in keywords if k)
if not pattern:
return ""
# Use journalctl with grep to filter relevant lines
cmd = (
f"journalctl --since='{since}' --no-pager -n 500 2>/dev/null | "
f"grep -iE '{pattern}' | tail -n {lines}"
)
result = subprocess.run(
cmd,
shell=True,
capture_output=True,
text=True,
timeout=5
)
if result.returncode == 0 and result.stdout.strip():
return result.stdout.strip()
return ""
except subprocess.TimeoutExpired:
return ""
except Exception as e:
# Silently fail - journal context is optional
return ""
# ─── Journal Watcher (Real-time) ─────────────────────────────────
class JournalWatcher:
@@ -725,11 +779,18 @@ class JournalWatcher:
enriched = '\n'.join(parts)
dev_display = f'/dev/{resolved}'
# Capture journal context for AI enrichment
journal_ctx = capture_journal_context(
keywords=[resolved, ata_port, 'I/O error', 'exception', 'SMART'],
lines=30
)
self._emit('disk_io_error', 'CRITICAL', {
'device': dev_display,
'reason': enriched,
'hostname': self._hostname,
'smart_status': 'FAILED',
'_journal_context': journal_ctx,
}, entity='disk', entity_id=resolved)
return
@@ -2239,6 +2300,21 @@ class ProxmoxHookWatcher:
if dur_m:
data['duration'] = dur_m.group(1).strip()
# Capture journal context for critical/warning events (helps AI provide better context)
if severity in ('CRITICAL', 'WARNING') and event_type not in ('backup_complete', 'update_available'):
# Build keywords from available data for journal search
keywords = ['error', 'fail', 'warning']
if 'smartd' in message.lower() or 'smart' in title.lower():
keywords.extend(['smartd', 'SMART', 'ata'])
if pve_type == 'system-mail':
keywords.append('smartd')
if entity_id:
keywords.append(entity_id)
journal_ctx = capture_journal_context(keywords=keywords, lines=20)
if journal_ctx:
data['_journal_context'] = journal_ctx
event = NotificationEvent(
event_type=event_type,
severity=severity,

View File

@@ -615,17 +615,6 @@ class NotificationManager:
# Render message from template (structured output)
rendered = render_template(event.event_type, event.data)
# Optional AI enhancement (on text body only)
ai_config = {
'enabled': self._config.get('ai_enabled', 'false'),
'provider': self._config.get('ai_provider', ''),
'api_key': self._config.get('ai_api_key', ''),
'model': self._config.get('ai_model', ''),
}
body = format_with_ai(
rendered['title'], rendered['body'], severity, ai_config
)
# Enrich data with structured fields for channels that support them
enriched_data = dict(event.data)
enriched_data['_rendered_fields'] = rendered.get('fields', [])
@@ -633,9 +622,13 @@ class NotificationManager:
enriched_data['_event_type'] = event.event_type
enriched_data['_group'] = TEMPLATES.get(event.event_type, {}).get('group', 'other')
# Send through all active channels
# Pass journal context if available (for AI enrichment)
if '_journal_context' in event.data:
enriched_data['_journal_context'] = event.data['_journal_context']
# Send through all active channels (AI applied per-channel with detail_level)
self._dispatch_to_channels(
rendered['title'], body, severity,
rendered['title'], rendered['body'], severity,
event.event_type, enriched_data, event.source
)
@@ -647,6 +640,9 @@ class NotificationManager:
- {channel}.events.{group} = "true"/"false" (category toggle, default "true")
- {channel}.event.{type} = "true"/"false" (per-event toggle, default from template)
No global fallback -- each channel decides independently what it receives.
AI enhancement is applied per-channel with configurable detail level:
- {channel}.ai_detail_level = "brief" | "standard" | "detailed"
"""
with self._lock:
channels = dict(self._channels)
@@ -655,6 +651,19 @@ class NotificationManager:
event_group = template.get('group', 'other')
default_event_enabled = 'true' if template.get('default_enabled', True) else 'false'
# Build AI config once (shared across channels, detail_level varies)
ai_config = {
'ai_enabled': self._config.get('ai_enabled', 'false'),
'ai_provider': self._config.get('ai_provider', 'groq'),
'ai_api_key': self._config.get('ai_api_key', ''),
'ai_model': self._config.get('ai_model', ''),
'ai_language': self._config.get('ai_language', 'en'),
'ai_ollama_url': self._config.get('ai_ollama_url', ''),
}
# Get journal context if available
journal_context = data.get('_journal_context', '')
for ch_name, channel in channels.items():
# ── Per-channel category check ──
# Default: category enabled (true) unless explicitly disabled.
@@ -669,12 +678,33 @@ class NotificationManager:
continue # Channel has this specific event disabled
try:
# Per-channel emoji enrichment (opt-in via {channel}.rich_format)
ch_title, ch_body = title, body
# ── Per-channel settings ──
detail_level_key = f'{ch_name}.ai_detail_level'
detail_level = self._config.get(detail_level_key, 'standard')
rich_key = f'{ch_name}.rich_format'
if self._config.get(rich_key, 'false') == 'true':
use_rich_format = self._config.get(rich_key, 'false') == 'true'
# ── Per-channel AI enhancement ──
# Apply AI with channel-specific detail level and emoji setting
# If AI is enabled AND rich_format is on, AI will include emojis directly
ch_body = format_with_ai(
ch_title, ch_body, severity, ai_config,
detail_level=detail_level,
journal_context=journal_context,
use_emojis=use_rich_format
)
# Fallback emoji enrichment only if AI is disabled but rich_format is on
# (If AI processed the message with emojis, this is skipped)
ai_enabled_str = ai_config.get('ai_enabled', 'false')
ai_enabled = ai_enabled_str == 'true' if isinstance(ai_enabled_str, str) else bool(ai_enabled_str)
if use_rich_format and not ai_enabled:
ch_title, ch_body = enrich_with_emojis(
event_type, title, body, data
event_type, ch_title, ch_body, data
)
result = channel.send(ch_title, ch_body, severity, data)
@@ -946,14 +976,15 @@ class NotificationManager:
message = rendered['body']
severity = severity or rendered['severity']
# AI enhancement
# AI config for enhancement
ai_config = {
'enabled': self._config.get('ai_enabled', 'false'),
'provider': self._config.get('ai_provider', ''),
'api_key': self._config.get('ai_api_key', ''),
'model': self._config.get('ai_model', ''),
'ai_enabled': self._config.get('ai_enabled', 'false'),
'ai_provider': self._config.get('ai_provider', 'groq'),
'ai_api_key': self._config.get('ai_api_key', ''),
'ai_model': self._config.get('ai_model', ''),
'ai_language': self._config.get('ai_language', 'en'),
'ai_ollama_url': self._config.get('ai_ollama_url', ''),
}
message = format_with_ai(title, message, severity, ai_config)
results = {}
channels_sent = []
@@ -964,11 +995,24 @@ class NotificationManager:
for ch_name, channel in channels.items():
try:
result = channel.send(title, message, severity, data)
# Apply AI enhancement per channel with its detail level and emoji setting
detail_level_key = f'{ch_name}.ai_detail_level'
detail_level = self._config.get(detail_level_key, 'standard')
rich_key = f'{ch_name}.rich_format'
use_rich_format = self._config.get(rich_key, 'false') == 'true'
ch_message = format_with_ai(
title, message, severity, ai_config,
detail_level=detail_level,
use_emojis=use_rich_format
)
result = channel.send(title, ch_message, severity, data)
results[ch_name] = result
self._record_history(
event_type, ch_name, title, message, severity,
event_type, ch_name, title, ch_message, severity,
result.get('success', False),
result.get('error', ''),
source

View File

@@ -1215,107 +1215,252 @@ def enrich_with_emojis(event_type: str, title: str, body: str,
# ─── AI Enhancement (Optional) ───────────────────────────────────
# Supported languages for AI translation
AI_LANGUAGES = {
'en': 'English',
'es': 'Spanish',
'fr': 'French',
'de': 'German',
'pt': 'Portuguese',
'it': 'Italian',
'ru': 'Russian',
'sv': 'Swedish',
'no': 'Norwegian',
'ja': 'Japanese',
'zh': 'Chinese',
'nl': 'Dutch',
}
# Token limits for different detail levels
AI_DETAIL_TOKENS = {
'brief': 100, # 2-3 lines, essential only
'standard': 200, # Concise paragraph with context
'detailed': 400, # Complete technical details
}
# System prompt template - informative, no recommendations
AI_SYSTEM_PROMPT = """You are a technical assistant for ProxMenux Monitor, a Proxmox server monitoring system.
Your task is to translate and format system alerts to {language}.
STRICT RULES:
1. Translate the message to the requested language
2. Maintain an INFORMATIVE and OBJECTIVE tone
3. DO NOT use formal introductions ("Dear...", "Esteemed...")
4. DO NOT give recommendations or action suggestions
5. DO NOT interpret data subjectively
6. Present only FACTS and TECHNICAL DATA
7. Respect the requested detail level: {detail_level}
{emoji_instructions}
DETAIL LEVELS:
- brief: 2-3 lines maximum, only essential information
- standard: Concise paragraph with basic context
- detailed: Complete information with all available technical details
MESSAGE TYPES:
- Some messages come from Proxmox VE webhooks with raw system data (backup logs, update lists, SMART errors)
- Parse and present this data clearly, extracting key information (VM IDs, sizes, durations, errors)
- For backup messages: highlight status (OK/ERROR), VM names, sizes, and duration
- For update messages: list package names and counts
- For disk/SMART errors: highlight affected device and error type
If journal log context is provided, use it for more precise event information."""
# Emoji instructions for rich format channels
AI_EMOJI_INSTRUCTIONS = """
8. ENRICH with contextual emojis and icons:
- Use appropriate emojis at the START of the title/message to indicate severity and type
- Severity indicators: Use a colored circle at the start (info=blue, warning=yellow, critical=red)
- Add relevant technical emojis: disk, server, network, security, backup, etc.
- Keep emojis contextual and professional, not decorative
- Examples of appropriate emojis:
* Disk/Storage: disk, folder, file
* Network: globe, signal, connection
* Security: shield, lock, key, warning
* System: gear, server, computer
* Status: checkmark, cross, warning, info
* Backup: save, sync, cloud
* Performance: chart, speedometer"""
# No emoji instructions for email/plain channels
AI_NO_EMOJI_INSTRUCTIONS = """
8. DO NOT use emojis or special icons - plain text only for email compatibility"""
class AIEnhancer:
"""Optional AI message enhancement using external LLM API.
"""AI message enhancement using pluggable providers.
Enriches template-generated messages with context and suggestions.
Falls back to original message if AI is unavailable or fails.
Supports 6 providers: Groq, OpenAI, Anthropic, Gemini, Ollama, OpenRouter.
Translates and formats notifications based on configured language and detail level.
"""
SYSTEM_PROMPT = """You are a Proxmox system administrator assistant.
You receive a notification message about a server event and must enhance it with:
1. A brief explanation of what this means in practical terms
2. A suggested action if applicable (1-2 sentences max)
Keep the response concise (max 3 sentences total). Do not repeat the original message.
Respond in the same language as the input message."""
def __init__(self, config: Dict[str, Any]):
"""Initialize AIEnhancer with configuration.
Args:
config: Dictionary containing:
- ai_provider: Provider name (groq, openai, anthropic, gemini, ollama, openrouter)
- ai_api_key: API key (not required for ollama)
- ai_model: Optional model override
- ai_language: Target language code (en, es, fr, etc.)
- ai_ollama_url: URL for Ollama server (optional)
"""
self.config = config
self._provider = None
self._init_provider()
def __init__(self, provider: str, api_key: str, model: str = ''):
self.provider = provider.lower()
self.api_key = api_key
self.model = model
self._enabled = bool(api_key)
def _init_provider(self):
"""Initialize the AI provider based on configuration."""
try:
# Import here to avoid circular imports
import sys
import os
# Add script directory to path for ai_providers import
script_dir = os.path.dirname(os.path.abspath(__file__))
if script_dir not in sys.path:
sys.path.insert(0, script_dir)
from ai_providers import get_provider
provider_name = self.config.get('ai_provider', 'groq')
self._provider = get_provider(
provider_name,
api_key=self.config.get('ai_api_key', ''),
model=self.config.get('ai_model', ''),
base_url=self.config.get('ai_ollama_url', ''),
)
except Exception as e:
print(f"[AIEnhancer] Failed to initialize provider: {e}")
self._provider = None
@property
def enabled(self) -> bool:
return self._enabled
"""Check if AI enhancement is available."""
return self._provider is not None
def enhance(self, title: str, body: str, severity: str) -> Optional[str]:
"""Enhance a notification message with AI context.
def enhance(self, title: str, body: str, severity: str,
detail_level: str = 'standard',
journal_context: str = '',
use_emojis: bool = False) -> Optional[str]:
"""Enhance/translate notification with AI.
Returns enhanced body text, or None if enhancement fails/disabled.
Args:
title: Notification title
body: Notification body text
severity: Severity level (info, warning, critical)
detail_level: Level of detail (brief, standard, detailed)
journal_context: Optional journal log lines for context
use_emojis: Whether to include emojis in the response (for push channels)
Returns:
Enhanced/translated text or None if failed
"""
if not self._enabled:
if not self._provider:
return None
# Get language settings
language_code = self.config.get('ai_language', 'en')
language_name = AI_LANGUAGES.get(language_code, 'English')
# Get token limit for detail level
max_tokens = AI_DETAIL_TOKENS.get(detail_level, 200)
# Select emoji instructions based on channel type
emoji_instructions = AI_EMOJI_INSTRUCTIONS if use_emojis else AI_NO_EMOJI_INSTRUCTIONS
# Build system prompt with emoji instructions
system_prompt = AI_SYSTEM_PROMPT.format(
language=language_name,
detail_level=detail_level,
emoji_instructions=emoji_instructions
)
# Build user message
user_msg = f"Severity: {severity}\nTitle: {title}\nMessage:\n{body}"
if journal_context:
user_msg += f"\n\nJournal log context:\n{journal_context}"
try:
if self.provider in ('openai', 'groq'):
return self._call_openai_compatible(title, body, severity)
result = self._provider.generate(system_prompt, user_msg, max_tokens)
return result
except Exception as e:
print(f"[AIEnhancer] Enhancement failed: {e}")
return None
return None
def _call_openai_compatible(self, title: str, body: str, severity: str) -> Optional[str]:
"""Call OpenAI-compatible API (works with OpenAI, Groq, local)."""
if self.provider == 'groq':
url = 'https://api.groq.com/openai/v1/chat/completions'
model = self.model or 'llama-3.3-70b-versatile'
else: # openai
url = 'https://api.openai.com/v1/chat/completions'
model = self.model or 'gpt-4o-mini'
def test_connection(self) -> Dict[str, Any]:
"""Test the AI provider connection.
user_msg = f"Severity: {severity}\nTitle: {title}\nMessage: {body}"
payload = json.dumps({
'model': model,
'messages': [
{'role': 'system', 'content': self.SYSTEM_PROMPT},
{'role': 'user', 'content': user_msg},
],
'max_tokens': 150,
'temperature': 0.3,
}).encode('utf-8')
headers = {
'Content-Type': 'application/json',
'Authorization': f'Bearer {self.api_key}',
}
req = urllib.request.Request(url, data=payload, headers=headers)
with urllib.request.urlopen(req, timeout=10) as resp:
result = json.loads(resp.read().decode('utf-8'))
content = result['choices'][0]['message']['content'].strip()
return content if content else None
Returns:
Dict with success, message, and model info
"""
if not self._provider:
return {
'success': False,
'message': 'Provider not initialized',
'model': ''
}
return self._provider.test_connection()
def format_with_ai(title: str, body: str, severity: str,
ai_config: Dict[str, str]) -> str:
"""Format a message with optional AI enhancement.
ai_config: Dict[str, Any],
detail_level: str = 'standard',
journal_context: str = '',
use_emojis: bool = False) -> str:
"""Format a message with AI enhancement/translation.
If AI is configured and succeeds, appends AI insight to the body.
Otherwise returns the original body unchanged.
Replaces the message body with AI-processed version if successful.
Falls back to original body if AI is unavailable or fails.
Args:
title: Notification title
body: Notification body
severity: Severity level
ai_config: {'enabled': 'true', 'provider': 'groq', 'api_key': '...', 'model': ''}
ai_config: Configuration dictionary with AI settings
detail_level: Level of detail (brief, standard, detailed)
journal_context: Optional journal log context
use_emojis: Whether to include emojis (for push channels like Telegram/Discord)
Returns:
Enhanced body string
Enhanced body string or original if AI fails
"""
if ai_config.get('enabled') != 'true' or not ai_config.get('api_key'):
# Check if AI is enabled
ai_enabled = ai_config.get('ai_enabled')
if isinstance(ai_enabled, str):
ai_enabled = ai_enabled.lower() == 'true'
if not ai_enabled:
return body
enhancer = AIEnhancer(
provider=ai_config.get('provider', 'groq'),
api_key=ai_config['api_key'],
model=ai_config.get('model', ''),
# Check for API key (not required for Ollama)
provider = ai_config.get('ai_provider', 'groq')
if provider != 'ollama' and not ai_config.get('ai_api_key'):
return body
# For Ollama, check URL is configured
if provider == 'ollama' and not ai_config.get('ai_ollama_url'):
return body
# Create enhancer and process
enhancer = AIEnhancer(ai_config)
enhanced = enhancer.enhance(
title, body, severity,
detail_level=detail_level,
journal_context=journal_context,
use_emojis=use_emojis
)
insight = enhancer.enhance(title, body, severity)
if insight:
return f"{body}\n\n---\n{insight}"
# Return enhanced text if successful, otherwise original
if enhanced:
# For detailed level (email), append original message for reference
# This ensures full technical data is available even after AI processing
if detail_level == 'detailed' and body and len(body) > 50:
# Only append if original has substantial content
enhanced += "\n\n" + "-" * 40 + "\n"
enhanced += "Original message:\n"
enhanced += body
return enhanced
return body