Merge pull request #169 from MacRimi/develop

ProxMenux v1.2.0
This commit is contained in:
MacRimi
2026-04-17 23:51:51 +02:00
committed by GitHub
197 changed files with 73004 additions and 29702 deletions
BIN
View File
Binary file not shown.
+1 -1
View File
@@ -1 +1 @@
19c54fc98fe5492dded3367e70138d019efba6d6339f3f92f1333ccb2ab78ffb ProxMenux-1.0.2.AppImage
7d3df15c65411b4429e72305e9c7460d09b67c6d5bd2a50c05dd88a928fc4f94 ProxMenux-1.2.0.AppImage
+17
View File
@@ -730,6 +730,23 @@ entities:
![Home Assistant Integration Example](AppImage/public/images/docs/homeassistant-integration.png)
---
## License
This project is licensed under the **Creative Commons Attribution-NonCommercial 4.0 International License (CC BY-NC 4.0)**.
You are free to:
- Share — copy and redistribute the material in any medium or format
- Adapt — remix, transform, and build upon the material
Under the following terms:
- Attribution — You must give appropriate credit, provide a link to the license, and indicate if changes were made
- NonCommercial — You may not use the material for commercial purposes
For more details, see the [full license](https://creativecommons.org/licenses/by-nc/4.0/).
---
+12
View File
@@ -163,3 +163,15 @@
.xterm-rows {
margin: 0 !important;
}
/* ===================== */
/* Progress Animations */
/* ===================== */
@keyframes indeterminate {
0% {
transform: translateX(-100%);
}
100% {
transform: translateX(400%);
}
}
+8 -2
View File
@@ -1,5 +1,5 @@
import type React from "react"
import type { Metadata } from "next"
import type { Metadata, Viewport } from "next"
import { GeistSans } from "geist/font/sans"
import { GeistMono } from "geist/font/mono"
import { ThemeProvider } from "../components/theme-provider"
@@ -20,7 +20,13 @@ export const metadata: Metadata = {
shortcut: "/favicon.ico",
apple: [{ url: "/apple-touch-icon.png", sizes: "180x180", type: "image/png" }],
},
viewport: "width=device-width, initial-scale=1, maximum-scale=1, user-scalable=no",
}
export const viewport: Viewport = {
width: "device-width",
initialScale: 1,
maximumScale: 1,
userScalable: false,
themeColor: [
{ media: "(prefers-color-scheme: light)", color: "#ffffff" },
{ media: "(prefers-color-scheme: dark)", color: "#2b2f36" },
+20 -7
View File
@@ -29,10 +29,19 @@ export default function Home() {
const response = await fetch(getApiUrl("/api/auth/status"), {
headers: token ? { Authorization: `Bearer ${token}` } : {},
})
// Check if response is valid JSON before parsing
if (!response.ok) {
throw new Error(`HTTP ${response.status}`)
}
const contentType = response.headers.get("content-type")
if (!contentType || !contentType.includes("application/json")) {
throw new Error("Response is not JSON")
}
const data = await response.json()
console.log("[v0] Auth status:", data)
const authenticated = data.auth_enabled ? data.authenticated : true
setAuthStatus({
@@ -41,8 +50,8 @@ export default function Home() {
authConfigured: data.auth_configured,
authenticated,
})
} catch (error) {
console.error("[v0] Failed to check auth status:", error)
} catch {
// API not available - assume no auth configured (silent fail, no console error)
setAuthStatus({
loading: false,
authEnabled: false,
@@ -63,9 +72,13 @@ export default function Home() {
if (authStatus.loading) {
return (
<div className="min-h-screen bg-background flex items-center justify-center">
<div className="text-center space-y-4">
<div className="animate-spin rounded-full h-12 w-12 border-b-2 border-primary mx-auto"></div>
<p className="text-muted-foreground">Loading...</p>
<div className="flex flex-col items-center gap-4">
<div className="relative">
<div className="h-12 w-12 rounded-full border-2 border-muted"></div>
<div className="absolute inset-0 h-12 w-12 rounded-full border-2 border-transparent border-t-primary animate-spin"></div>
</div>
<div className="text-sm font-medium text-foreground">Loading...</div>
<p className="text-xs text-muted-foreground">Connecting to ProxMenux Monitor</p>
</div>
</div>
)
+14 -6
View File
@@ -27,18 +27,26 @@ export function AuthSetup({ onComplete }: AuthSetupProps) {
const checkOnboardingStatus = async () => {
try {
const response = await fetch(getApiUrl("/api/auth/status"))
// Check if response is valid JSON before parsing
if (!response.ok) {
// API not available - don't show modal in preview
return
}
const contentType = response.headers.get("content-type")
if (!contentType || !contentType.includes("application/json")) {
return
}
const data = await response.json()
console.log("[v0] Auth status for modal check:", data)
// Show modal if auth is not configured and not declined
if (!data.auth_configured) {
setTimeout(() => setOpen(true), 500)
}
} catch (error) {
console.error("[v0] Failed to check auth status:", error)
// Fail-safe: show modal if we can't check status
setTimeout(() => setOpen(true), 500)
} catch {
// API not available (preview environment) - don't show modal
}
}
@@ -0,0 +1,255 @@
"use client"
import { cn } from "@/lib/utils"
interface GpuSwitchModeIndicatorProps {
mode: "lxc" | "vm" | "unknown"
isEditing?: boolean
pendingMode?: "lxc" | "vm" | null
onToggle?: (e: React.MouseEvent) => void
className?: string
}
export function GpuSwitchModeIndicator({
mode,
isEditing = false,
pendingMode = null,
onToggle,
className,
}: GpuSwitchModeIndicatorProps) {
const displayMode = pendingMode ?? mode
const isLxcActive = displayMode === "lxc"
const isVmActive = displayMode === "vm"
const hasChanged = pendingMode !== null && pendingMode !== mode
// Colors
const activeColor = isLxcActive ? "#3b82f6" : isVmActive ? "#a855f7" : "#6b7280"
const inactiveColor = "#374151" // gray-700 for dark theme
const lxcColor = isLxcActive ? "#3b82f6" : inactiveColor
const vmColor = isVmActive ? "#a855f7" : inactiveColor
const handleClick = (e: React.MouseEvent) => {
// Only stop propagation and handle toggle when in editing mode
if (isEditing) {
e.stopPropagation()
if (onToggle) {
onToggle(e)
}
}
// When not editing, let the click propagate to the card to open the modal
}
return (
<div
className={cn(
"flex items-center gap-6",
isEditing && "cursor-pointer",
className
)}
onClick={handleClick}
>
{/* Large SVG Diagram */}
<svg
viewBox="0 0 220 100"
className="h-24 w-56 flex-shrink-0"
xmlns="http://www.w3.org/2000/svg"
>
{/* GPU Chip - Large with "GPU" text */}
<g transform="translate(0, 22)">
{/* Main chip body */}
<rect
x="4"
y="8"
width="44"
height="36"
rx="6"
fill={`${activeColor}20`}
stroke={activeColor}
strokeWidth="2.5"
className="transition-all duration-300"
/>
{/* Chip pins - top */}
<line x1="14" y1="2" x2="14" y2="8" stroke={activeColor} strokeWidth="2.5" strokeLinecap="round" className="transition-all duration-300" />
<line x1="26" y1="2" x2="26" y2="8" stroke={activeColor} strokeWidth="2.5" strokeLinecap="round" className="transition-all duration-300" />
<line x1="38" y1="2" x2="38" y2="8" stroke={activeColor} strokeWidth="2.5" strokeLinecap="round" className="transition-all duration-300" />
{/* Chip pins - bottom */}
<line x1="14" y1="44" x2="14" y2="50" stroke={activeColor} strokeWidth="2.5" strokeLinecap="round" className="transition-all duration-300" />
<line x1="26" y1="44" x2="26" y2="50" stroke={activeColor} strokeWidth="2.5" strokeLinecap="round" className="transition-all duration-300" />
<line x1="38" y1="44" x2="38" y2="50" stroke={activeColor} strokeWidth="2.5" strokeLinecap="round" className="transition-all duration-300" />
{/* GPU text */}
<text
x="26"
y="32"
textAnchor="middle"
fill={activeColor}
className="text-[14px] font-bold transition-all duration-300"
style={{ fontFamily: 'system-ui, sans-serif' }}
>
GPU
</text>
</g>
{/* Connection line from GPU to switch */}
<line
x1="52"
y1="50"
x2="78"
y2="50"
stroke={activeColor}
strokeWidth="3"
strokeLinecap="round"
className="transition-all duration-300"
/>
{/* Central Switch Node - Large circle with inner dot */}
<circle
cx="95"
cy="50"
r="14"
fill={isEditing ? "#f59e0b20" : `${activeColor}20`}
stroke={isEditing ? "#f59e0b" : activeColor}
strokeWidth="3"
className="transition-all duration-300"
/>
<circle
cx="95"
cy="50"
r="6"
fill={isEditing ? "#f59e0b" : activeColor}
className="transition-all duration-300"
/>
{/* LXC Branch Line - going up-right */}
<path
d="M 109 42 L 135 20"
fill="none"
stroke={lxcColor}
strokeWidth={isLxcActive ? "3.5" : "2"}
strokeLinecap="round"
className="transition-all duration-300"
/>
{/* VM Branch Line - going down-right */}
<path
d="M 109 58 L 135 80"
fill="none"
stroke={vmColor}
strokeWidth={isVmActive ? "3.5" : "2"}
strokeLinecap="round"
className="transition-all duration-300"
/>
{/* LXC Container Icon - Server/Stack icon */}
<g transform="translate(138, 2)">
{/* Container box */}
<rect
x="0"
y="0"
width="32"
height="28"
rx="4"
fill={isLxcActive ? `${lxcColor}25` : "transparent"}
stroke={lxcColor}
strokeWidth={isLxcActive ? "2.5" : "1.5"}
className="transition-all duration-300"
/>
{/* Container layers/lines */}
<line x1="0" y1="10" x2="32" y2="10" stroke={lxcColor} strokeWidth={isLxcActive ? "1.5" : "1"} className="transition-all duration-300" />
<line x1="0" y1="19" x2="32" y2="19" stroke={lxcColor} strokeWidth={isLxcActive ? "1.5" : "1"} className="transition-all duration-300" />
{/* Status dots */}
<circle cx="7" cy="5" r="2" fill={lxcColor} className="transition-all duration-300" />
<circle cx="7" cy="14.5" r="2" fill={lxcColor} className="transition-all duration-300" />
<circle cx="7" cy="23.5" r="2" fill={lxcColor} className="transition-all duration-300" />
</g>
{/* LXC Label */}
<text
x="188"
y="22"
textAnchor="start"
fill={lxcColor}
className={cn(
"transition-all duration-300",
isLxcActive ? "text-[14px] font-bold" : "text-[12px] font-medium"
)}
style={{ fontFamily: 'system-ui, sans-serif' }}
>
LXC
</text>
{/* VM Monitor Icon */}
<g transform="translate(138, 65)">
{/* Monitor screen */}
<rect
x="2"
y="0"
width="28"
height="18"
rx="3"
fill={isVmActive ? `${vmColor}25` : "transparent"}
stroke={vmColor}
strokeWidth={isVmActive ? "2.5" : "1.5"}
className="transition-all duration-300"
/>
{/* Screen inner/shine */}
<rect
x="5"
y="3"
width="22"
height="12"
rx="1"
fill={isVmActive ? `${vmColor}30` : `${vmColor}10`}
className="transition-all duration-300"
/>
{/* Monitor stand */}
<line x1="16" y1="18" x2="16" y2="24" stroke={vmColor} strokeWidth={isVmActive ? "2.5" : "1.5"} strokeLinecap="round" className="transition-all duration-300" />
{/* Monitor base */}
<line x1="8" y1="24" x2="24" y2="24" stroke={vmColor} strokeWidth={isVmActive ? "2.5" : "1.5"} strokeLinecap="round" className="transition-all duration-300" />
</g>
{/* VM Label */}
<text
x="188"
y="84"
textAnchor="start"
fill={vmColor}
className={cn(
"transition-all duration-300",
isVmActive ? "text-[14px] font-bold" : "text-[12px] font-medium"
)}
style={{ fontFamily: 'system-ui, sans-serif' }}
>
VM
</text>
</svg>
{/* Status Text - Large like GPU name */}
<div className="flex flex-col gap-1 min-w-0 flex-1">
<span
className={cn(
"text-base font-semibold transition-all duration-300",
isLxcActive ? "text-blue-500" : isVmActive ? "text-purple-500" : "text-muted-foreground"
)}
>
{isLxcActive
? "Ready for LXC containers"
: isVmActive
? "Ready for VM passthrough"
: "Mode unknown"}
</span>
<span className="text-sm text-muted-foreground">
{isLxcActive
? "Native driver active"
: isVmActive
? "VFIO-PCI driver active"
: "No driver detected"}
</span>
{hasChanged && (
<span className="text-sm text-amber-500 font-medium animate-pulse">
Change pending...
</span>
)}
</div>
</div>
)
}
File diff suppressed because it is too large Load Diff
+620 -144
View File
@@ -2,7 +2,8 @@
import type React from "react"
import { useState, useEffect } from "react"
import { useState, useEffect, useCallback } from "react"
import { getAuthToken } from "@/lib/api-config"
import { Dialog, DialogContent, DialogDescription, DialogHeader, DialogTitle } from "@/components/ui/dialog"
import { Badge } from "@/components/ui/badge"
import { Button } from "@/components/ui/button"
@@ -11,6 +12,7 @@ import {
CheckCircle2,
AlertTriangle,
XCircle,
Info,
Activity,
Cpu,
MemoryStick,
@@ -23,16 +25,42 @@ import {
RefreshCw,
Shield,
X,
Clock,
BellOff,
ChevronRight,
Settings2,
HelpCircle,
} from "lucide-react"
interface CategoryCheck {
status: string
reason?: string
details?: any
checks?: Record<string, { status: string; detail: string; [key: string]: any }>
dismissable?: boolean
[key: string]: any
}
interface DismissedError {
error_key: string
category: string
severity: string
reason: string
dismissed: boolean
permanent?: boolean
suppression_remaining_hours: number
suppression_hours?: number
resolved_at: string
}
interface CustomSuppression {
key: string
label: string
category: string
icon: string
hours: number
}
interface HealthDetails {
overall: string
summary: string
@@ -51,6 +79,14 @@ interface HealthDetails {
timestamp: string
}
interface FullHealthData {
health: HealthDetails
active_errors: any[]
dismissed: DismissedError[]
custom_suppressions: CustomSuppression[]
timestamp: string
}
interface HealthStatusModalProps {
open: boolean
onOpenChange: (open: boolean) => void
@@ -58,65 +94,174 @@ interface HealthStatusModalProps {
}
const CATEGORIES = [
{ key: "cpu", label: "CPU Usage & Temperature", Icon: Cpu },
{ key: "memory", label: "Memory & Swap", Icon: MemoryStick },
{ key: "storage", label: "Storage Mounts & Space", Icon: HardDrive },
{ key: "disks", label: "Disk I/O & Errors", Icon: Disc },
{ key: "network", label: "Network Interfaces", Icon: Network },
{ key: "vms", label: "VMs & Containers", Icon: Box },
{ key: "services", label: "PVE Services", Icon: Settings },
{ key: "logs", label: "System Logs", Icon: FileText },
{ key: "updates", label: "System Updates", Icon: RefreshCw },
{ key: "security", label: "Security & Certificates", Icon: Shield },
{ key: "cpu", category: "temperature", label: "CPU Usage & Temperature", Icon: Cpu },
{ key: "memory", category: "memory", label: "Memory & Swap", Icon: MemoryStick },
{ key: "storage", category: "storage", label: "Storage Mounts & Space", Icon: HardDrive },
{ key: "disks", category: "disks", label: "Disk I/O & Errors", Icon: Disc },
{ key: "network", category: "network", label: "Network Interfaces", Icon: Network },
{ key: "vms", category: "vms", label: "VMs & Containers", Icon: Box },
{ key: "services", category: "pve_services", label: "PVE Services", Icon: Settings },
{ key: "logs", category: "logs", label: "System Logs", Icon: FileText },
{ key: "updates", category: "updates", label: "System Updates", Icon: RefreshCw },
{ key: "security", category: "security", label: "Security & Certificates", Icon: Shield },
]
export function HealthStatusModal({ open, onOpenChange, getApiUrl }: HealthStatusModalProps) {
const [loading, setLoading] = useState(true)
const [healthData, setHealthData] = useState<HealthDetails | null>(null)
const [dismissedItems, setDismissedItems] = useState<DismissedError[]>([])
const [customSuppressions, setCustomSuppressions] = useState<CustomSuppression[]>([])
const [error, setError] = useState<string | null>(null)
const [dismissingKey, setDismissingKey] = useState<string | null>(null)
const [expandedCategories, setExpandedCategories] = useState<Set<string>>(new Set())
useEffect(() => {
if (open) {
fetchHealthDetails()
}
}, [open])
const fetchHealthDetails = async () => {
const fetchHealthDetails = useCallback(async () => {
setLoading(true)
setError(null)
try {
const response = await fetch(getApiUrl("/api/health/details"))
if (!response.ok) {
throw new Error("Failed to fetch health details")
let newOverallStatus = "OK"
// Use the new combined endpoint for fewer round-trips
const token = getAuthToken()
const authHeaders: Record<string, string> = {}
if (token) {
authHeaders["Authorization"] = `Bearer ${token}`
}
const data = await response.json()
console.log("[v0] Health data received:", data)
setHealthData(data)
const response = await fetch(getApiUrl("/api/health/full"), { headers: authHeaders })
let infoCount = 0
if (!response.ok) {
// Fallback to legacy endpoint
const legacyResponse = await fetch(getApiUrl("/api/health/details"), { headers: authHeaders })
if (!legacyResponse.ok) throw new Error("Failed to fetch health details")
const data = await legacyResponse.json()
setHealthData(data)
setDismissedItems([])
setCustomSuppressions([])
newOverallStatus = data?.overall || "OK"
// Count INFO categories from legacy data
if (data?.details) {
CATEGORIES.forEach(({ key }) => {
const cat = data.details[key as keyof typeof data.details]
if (cat && cat.status?.toUpperCase() === "INFO") {
infoCount++
}
})
}
} else {
const fullData: FullHealthData = await response.json()
setHealthData(fullData.health)
setDismissedItems(fullData.dismissed || [])
setCustomSuppressions(fullData.custom_suppressions || [])
newOverallStatus = fullData.health?.overall || "OK"
// Get categories that have dismissed items (these become INFO)
const customCats = new Set((fullData.custom_suppressions || []).map((cs: { category: string }) => cs.category))
const filteredDismissed = (fullData.dismissed || []).filter((item: { category: string }) => !customCats.has(item.category))
const categoriesWithDismissed = new Set<string>()
filteredDismissed.forEach((item: { category: string }) => {
const catMeta = CATEGORIES.find(c => c.category === item.category || c.key === item.category)
if (catMeta) {
categoriesWithDismissed.add(catMeta.key)
}
})
// Count effective INFO categories (original INFO + OK categories with dismissed)
if (fullData.health?.details) {
CATEGORIES.forEach(({ key }) => {
const cat = fullData.health.details[key as keyof typeof fullData.health.details]
if (cat) {
const originalStatus = cat.status?.toUpperCase()
// Count as INFO if: originally INFO OR (originally OK and has dismissed items)
if (originalStatus === "INFO" || (originalStatus === "OK" && categoriesWithDismissed.has(key))) {
infoCount++
}
}
})
}
}
const totalInfoCount = infoCount
// Emit event with the FRESH data from the response, not the stale state
const event = new CustomEvent("healthStatusUpdated", {
detail: { status: data.overall },
detail: { status: newOverallStatus, infoCount: totalInfoCount },
})
window.dispatchEvent(event)
} catch (err) {
console.error("[v0] Error fetching health data:", err)
setError(err instanceof Error ? err.message : "Unknown error")
} finally {
setLoading(false)
}
}, [getApiUrl])
// Tick counter to force re-render every 30s so "X minutes ago" stays current
const [, setTick] = useState(0)
useEffect(() => {
if (!open) return
const tickInterval = setInterval(() => setTick(t => t + 1), 30000)
return () => clearInterval(tickInterval)
}, [open])
useEffect(() => {
if (open) {
fetchHealthDetails()
// Auto-refresh every 5 minutes while modal is open
const refreshInterval = setInterval(fetchHealthDetails, 300000)
return () => clearInterval(refreshInterval)
}
}, [open, fetchHealthDetails])
// Auto-expand non-OK categories when data loads
useEffect(() => {
if (healthData?.details) {
const nonOkCategories = new Set<string>()
CATEGORIES.forEach(({ key }) => {
const cat = healthData.details[key as keyof typeof healthData.details]
if (cat && cat.status?.toUpperCase() !== "OK") {
// Updates section: only auto-expand on WARNING+, not INFO
if (key === "updates" && cat.status?.toUpperCase() === "INFO") {
return
}
nonOkCategories.add(key)
}
})
setExpandedCategories(nonOkCategories)
}
}, [healthData])
const toggleCategory = (key: string) => {
setExpandedCategories(prev => {
const next = new Set(prev)
if (next.has(key)) {
next.delete(key)
} else {
next.add(key)
}
return next
})
}
const getStatusIcon = (status: string) => {
const getStatusIcon = (status: string, size: "sm" | "md" = "md") => {
const statusUpper = status?.toUpperCase()
const cls = size === "sm" ? "h-4 w-4" : "h-5 w-5"
switch (statusUpper) {
case "OK":
return <CheckCircle2 className="h-5 w-5 text-green-500" />
return <CheckCircle2 className={`${cls} text-green-500`} />
case "INFO":
return <Info className={`${cls} text-blue-500`} />
case "WARNING":
return <AlertTriangle className="h-5 w-5 text-yellow-500" />
return <AlertTriangle className={`${cls} text-yellow-500`} />
case "CRITICAL":
return <XCircle className="h-5 w-5 text-red-500" />
return <XCircle className={`${cls} text-red-500`} />
case "UNKNOWN":
return <HelpCircle className={`${cls} text-amber-400`} />
default:
return <Activity className="h-5 w-5 text-gray-500" />
return <Activity className={`${cls} text-muted-foreground`} />
}
}
@@ -125,45 +270,76 @@ export function HealthStatusModal({ open, onOpenChange, getApiUrl }: HealthStatu
switch (statusUpper) {
case "OK":
return <Badge className="bg-green-500 text-white hover:bg-green-500">OK</Badge>
case "INFO":
return <Badge className="bg-blue-500 text-white hover:bg-blue-500">Info</Badge>
case "WARNING":
return <Badge className="bg-yellow-500 text-white hover:bg-yellow-500">Warning</Badge>
case "CRITICAL":
return <Badge className="bg-red-500 text-white hover:bg-red-500">Critical</Badge>
case "UNKNOWN":
return <Badge className="bg-amber-500 text-white hover:bg-amber-500">UNKNOWN</Badge>
default:
return <Badge>Unknown</Badge>
}
}
const getHealthStats = () => {
if (!healthData?.details) {
return { total: 0, healthy: 0, warnings: 0, critical: 0 }
// Get categories that have dismissed items (to show as INFO)
const getCategoriesWithDismissed = () => {
const customCats = new Set(customSuppressions.map(cs => cs.category))
const filteredDismissed = dismissedItems.filter(item => !customCats.has(item.category))
const categoriesWithDismissed = new Set<string>()
filteredDismissed.forEach(item => {
// Map dismissed category to our CATEGORIES keys
const catMeta = CATEGORIES.find(c => c.category === item.category || c.key === item.category)
if (catMeta) {
categoriesWithDismissed.add(catMeta.key)
}
})
return categoriesWithDismissed
}
const categoriesWithDismissed = getCategoriesWithDismissed()
// Get effective status for a category (considers dismissed items)
const getEffectiveStatus = (key: string, originalStatus: string) => {
// If category has dismissed items and original status is OK, show as INFO
if (categoriesWithDismissed.has(key) && originalStatus?.toUpperCase() === "OK") {
return "INFO"
}
return originalStatus?.toUpperCase() || "UNKNOWN"
}
const getHealthStats = () => {
if (!healthData?.details) return { total: 0, healthy: 0, info: 0, warnings: 0, critical: 0, unknown: 0 }
let healthy = 0
let info = 0
let warnings = 0
let critical = 0
let unknown = 0
CATEGORIES.forEach(({ key }) => {
const categoryData = healthData.details[key as keyof typeof healthData.details]
if (categoryData) {
const status = categoryData.status?.toUpperCase()
if (status === "OK") healthy++
else if (status === "WARNING") warnings++
else if (status === "CRITICAL") critical++
const effectiveStatus = getEffectiveStatus(key, categoryData.status)
if (effectiveStatus === "OK") healthy++
else if (effectiveStatus === "INFO") info++
else if (effectiveStatus === "WARNING") warnings++
else if (effectiveStatus === "CRITICAL") critical++
else if (effectiveStatus === "UNKNOWN") unknown++
}
})
return { total: CATEGORIES.length, healthy, warnings, critical }
return { total: CATEGORIES.length, healthy, info, warnings, critical, unknown }
}
const stats = getHealthStats()
const handleCategoryClick = (categoryKey: string, status: string) => {
if (status === "OK") return // No navegar si está OK
if (status === "OK" || status === "INFO") return
onOpenChange(false) // Cerrar el modal
onOpenChange(false)
// Mapear categorías a tabs
const categoryToTab: Record<string, string> = {
storage: "storage",
disks: "storage",
@@ -176,55 +352,222 @@ export function HealthStatusModal({ open, onOpenChange, getApiUrl }: HealthStatu
const targetTab = categoryToTab[categoryKey]
if (targetTab) {
// Disparar evento para cambiar tab
const event = new CustomEvent("changeTab", { detail: { tab: targetTab } })
window.dispatchEvent(event)
}
}
const handleAcknowledge = async (errorKey: string, e: React.MouseEvent) => {
e.stopPropagation() // Prevent navigation
console.log("[v0] Dismissing error:", errorKey)
e.stopPropagation()
setDismissingKey(errorKey)
try {
const response = await fetch(getApiUrl("/api/health/acknowledge"), {
const url = getApiUrl("/api/health/acknowledge")
const token = getAuthToken()
const headers: Record<string, string> = { "Content-Type": "application/json" }
if (token) {
headers["Authorization"] = `Bearer ${token}`
}
const response = await fetch(url, {
method: "POST",
headers: {
"Content-Type": "application/json",
},
headers,
body: JSON.stringify({ error_key: errorKey }),
})
const responseData = await response.json().catch(() => ({}))
if (!response.ok) {
const errorData = await response.json()
console.error("[v0] Acknowledge failed:", errorData)
throw new Error(errorData.error || "Failed to acknowledge error")
throw new Error(responseData.error || `Failed to dismiss error (${response.status})`)
}
const result = await response.json()
console.log("[v0] Acknowledge success:", result)
// Refresh health data
await fetchHealthDetails()
// Optimistically update local state to avoid slow re-fetch
// Add the dismissed item to the local list immediately
if (responseData.result || responseData.success) {
const dismissedItem = {
error_key: errorKey,
category: responseData.result?.category || responseData.category || '',
severity: responseData.result?.original_severity || 'WARNING',
reason: 'Dismissed by user',
dismissed: true,
acknowledged_at: new Date().toISOString()
}
setDismissedItems(prev => [...prev, dismissedItem])
}
// Fetch fresh data in background (non-blocking)
fetchHealthDetails().catch(() => {})
} catch (err) {
console.error("[v0] Error acknowledging:", err)
alert("Failed to dismiss error. Please try again.")
console.error("Error dismissing:", err)
} finally {
setDismissingKey(null)
}
}
const getTimeSinceCheck = () => {
if (!healthData?.timestamp) return null
const checkTime = new Date(healthData.timestamp)
const now = new Date()
const diffMs = now.getTime() - checkTime.getTime()
const diffMin = Math.floor(diffMs / 60000)
if (diffMin < 1) return "just now"
if (diffMin === 1) return "1 minute ago"
if (diffMin < 60) return `${diffMin} minutes ago`
const diffHours = Math.floor(diffMin / 60)
return `${diffHours}h ${diffMin % 60}m ago`
}
const getCategoryRowStyle = (status: string) => {
const s = status?.toUpperCase()
if (s === "CRITICAL") return "bg-red-500/5 border-red-500/20 hover:bg-red-500/10 cursor-pointer"
if (s === "WARNING") return "bg-yellow-500/5 border-yellow-500/20 hover:bg-yellow-500/10 cursor-pointer"
if (s === "UNKNOWN") return "bg-amber-500/5 border-amber-500/20 hover:bg-amber-500/10 cursor-pointer"
if (s === "INFO") return "bg-blue-500/5 border-blue-500/20 hover:bg-blue-500/10"
return "bg-card border-border hover:bg-muted/30"
}
const getOutlineBadgeStyle = (status: string) => {
const s = status?.toUpperCase()
if (s === "OK") return "border-green-500 text-green-500 bg-transparent"
if (s === "INFO") return "border-blue-500 text-blue-500 bg-blue-500/5"
if (s === "WARNING") return "border-yellow-500 text-yellow-500 bg-yellow-500/5"
if (s === "CRITICAL") return "border-red-500 text-red-500 bg-red-500/5"
if (s === "UNKNOWN") return "border-amber-400 text-amber-400 bg-amber-500/5"
return ""
}
const formatCheckLabel = (key: string): string => {
const labels: Record<string, string> = {
// CPU
cpu_usage: "CPU Usage",
cpu_temperature: "Temperature",
// Memory
ram_usage: "RAM Usage",
swap_usage: "Swap Usage",
// Disk I/O
root_filesystem: "Root Filesystem",
smart_health: "SMART Health",
io_errors: "I/O Errors",
zfs_pools: "ZFS Pools",
lvm_volumes: "LVM Volumes",
lvm_check: "LVM Status",
// Network
connectivity: "Connectivity",
// VMs & CTs
qmp_communication: "QMP Communication",
container_startup: "Container Startup",
vm_startup: "VM Startup",
oom_killer: "OOM Killer",
// Services
cluster_mode: "Cluster Mode",
// Logs (prefixed with log_)
log_error_cascade: "Error Cascade",
log_error_spike: "Error Spike",
log_persistent_errors: "Persistent Errors",
log_critical_errors: "Critical Errors",
// Updates
pve_version: "Proxmox VE Version",
security_updates: "Security Updates",
system_age: "System Age",
pending_updates: "Pending Updates",
kernel_pve: "Kernel / PVE",
// Security
uptime: "Uptime",
certificates: "Certificates",
login_attempts: "Login Attempts",
fail2ban: "Fail2Ban",
// Storage (Proxmox)
proxmox_storages: "Proxmox Storages",
}
if (labels[key]) return labels[key]
// Convert snake_case or camelCase to Title Case
return key
.replace(/_/g, " ")
.replace(/([a-z])([A-Z])/g, "$1 $2")
.replace(/\b\w/g, (c) => c.toUpperCase())
}
const renderChecks = (
checks: Record<string, { status: string; detail: string; dismissable?: boolean; [key: string]: any }>,
categoryKey: string
) => {
if (!checks || Object.keys(checks).length === 0) return null
return (
<div className="mt-2 space-y-0.5">
{Object.entries(checks)
.filter(([, checkData]) => checkData.installed !== false)
.map(([checkKey, checkData]) => {
const isDismissable = checkData.dismissable === true
const checkStatus = checkData.status?.toUpperCase() || "OK"
return (
<div
key={checkKey}
className="flex items-center justify-between gap-1.5 sm:gap-2 text-[10px] sm:text-xs py-1.5 px-2 sm:px-3 rounded-md hover:bg-muted/40 transition-colors"
>
<div className="flex items-start gap-1.5 sm:gap-2 min-w-0 flex-1">
<span className="mt-0.5 shrink-0">{getStatusIcon(checkData.dismissed ? "INFO" : checkData.status, "sm")}</span>
<span className="font-medium shrink-0">{formatCheckLabel(checkKey)}</span>
<span className="text-muted-foreground break-words whitespace-pre-wrap min-w-0">{checkData.detail}</span>
{checkData.dismissed && (
<Badge variant="outline" className="text-[9px] px-1 py-0 h-4 shrink-0 text-blue-400 border-blue-400/30">
Dismissed
</Badge>
)}
</div>
<div className="flex items-center gap-1 sm:gap-1.5 shrink-0">
{(checkStatus === "WARNING" || checkStatus === "CRITICAL" || checkStatus === "UNKNOWN") && isDismissable && !checkData.dismissed && (
<Button
size="sm"
variant="outline"
className="h-5 px-1 sm:px-1.5 shrink-0 hover:bg-red-500/10 hover:border-red-500/50 bg-transparent text-[10px]"
disabled={dismissingKey === (checkData.error_key || checkKey)}
onClick={(e) => {
e.stopPropagation()
handleAcknowledge(checkData.error_key || checkKey, e)
}}
>
{dismissingKey === (checkData.error_key || checkKey) ? (
<Loader2 className="h-3 w-3 animate-spin" />
) : (
<>
<X className="h-3 w-3 sm:mr-0.5" />
<span className="hidden sm:inline">Dismiss</span>
</>
)}
</Button>
)}
</div>
</div>
)
})}
</div>
)
}
return (
<Dialog open={open} onOpenChange={onOpenChange}>
<DialogContent className="max-w-3xl max-h-[85vh] overflow-y-auto">
<DialogContent className="max-w-3xl w-[calc(100vw-2rem)] sm:w-[95vw] max-h-[85vh] overflow-y-auto overflow-x-hidden p-4 sm:p-6">
<DialogHeader>
<div className="flex items-center justify-between gap-3">
<DialogTitle className="flex items-center gap-2 flex-1">
<Activity className="h-6 w-6" />
System Health Status
{healthData && <div className="ml-2">{getStatusBadge(healthData.overall)}</div>}
<DialogTitle className="flex items-center gap-2 flex-1 min-w-0">
<Activity className="h-5 w-5 sm:h-6 sm:w-6 shrink-0" />
<span className="truncate text-base sm:text-lg">System Health Status</span>
{healthData && <div className="shrink-0">{getStatusBadge(healthData.overall)}</div>}
</DialogTitle>
</div>
<DialogDescription>Detailed health checks for all system components</DialogDescription>
<DialogDescription className="flex flex-wrap items-center gap-x-2 gap-y-0.5 text-xs sm:text-sm">
<span>Detailed health checks for all system components</span>
{getTimeSinceCheck() && (
<span className="inline-flex items-center gap-1 text-xs text-muted-foreground">
<Clock className="h-3 w-3" />
{getTimeSinceCheck()}
</span>
)}
</DialogDescription>
</DialogHeader>
{loading && (
@@ -243,116 +586,249 @@ export function HealthStatusModal({ open, onOpenChange, getApiUrl }: HealthStatu
{healthData && !loading && (
<div className="space-y-4">
{/* Overall Stats Summary */}
<div className="grid grid-cols-4 gap-3 p-4 rounded-lg bg-muted/30 border">
<div className={`grid gap-2 sm:gap-3 p-3 sm:p-4 rounded-lg bg-muted/30 border ${stats.info > 0 ? "grid-cols-5" : "grid-cols-4"}`}>
<div className="text-center">
<div className="text-2xl font-bold">{stats.total}</div>
<div className="text-xs text-muted-foreground">Total Checks</div>
<div className="text-lg sm:text-2xl font-bold">{stats.total}</div>
<div className="text-[10px] sm:text-xs text-muted-foreground">Total</div>
</div>
<div className="text-center">
<div className="text-2xl font-bold text-green-500">{stats.healthy}</div>
<div className="text-xs text-muted-foreground">Healthy</div>
<div className="text-lg sm:text-2xl font-bold text-green-500">{stats.healthy}</div>
<div className="text-[10px] sm:text-xs text-muted-foreground">Healthy</div>
</div>
{stats.info > 0 && (
<div className="text-center">
<div className="text-lg sm:text-2xl font-bold text-blue-500">{stats.info}</div>
<div className="text-[10px] sm:text-xs text-muted-foreground">Info</div>
</div>
)}
<div className="text-center">
<div className="text-lg sm:text-2xl font-bold text-yellow-500">{stats.warnings}</div>
<div className="text-[10px] sm:text-xs text-muted-foreground">Warn</div>
</div>
<div className="text-center">
<div className="text-2xl font-bold text-yellow-500">{stats.warnings}</div>
<div className="text-xs text-muted-foreground">Warnings</div>
<div className="text-lg sm:text-2xl font-bold text-red-500">{stats.critical}</div>
<div className="text-[10px] sm:text-xs text-muted-foreground">Critical</div>
</div>
{stats.unknown > 0 && (
<div className="text-center">
<div className="text-2xl font-bold text-red-500">{stats.critical}</div>
<div className="text-xs text-muted-foreground">Critical</div>
<div className="text-lg sm:text-2xl font-bold text-amber-400">{stats.unknown}</div>
<div className="text-[10px] sm:text-xs text-muted-foreground">Unknown</div>
</div>
)}
</div>
{healthData.summary && healthData.summary !== "All systems operational" && (
<div className="text-sm p-3 rounded-lg bg-muted/20 border">
<span className="font-medium text-foreground">{healthData.summary}</span>
<div className="text-xs sm:text-sm p-3 rounded-lg bg-muted/20 border overflow-hidden max-w-full">
<p className="font-medium text-foreground break-words whitespace-pre-wrap">{healthData.summary}</p>
</div>
)}
{/* Category List */}
<div className="space-y-2">
{CATEGORIES.map(({ key, label, Icon }) => {
const categoryData = healthData.details[key as keyof typeof healthData.details]
const status = categoryData?.status || "UNKNOWN"
const originalStatus = categoryData?.status || "UNKNOWN"
const status = getEffectiveStatus(key, originalStatus)
const reason = categoryData?.reason
const details = categoryData?.details
const checks = categoryData?.checks
const isExpanded = expandedCategories.has(key)
const hasChecks = checks && Object.keys(checks).length > 0
return (
<div
key={key}
onClick={() => handleCategoryClick(key, status)}
className={`flex items-start gap-3 p-3 rounded-lg border transition-colors ${
status === "OK"
? "bg-card border-border hover:bg-muted/30"
: status === "WARNING"
? "bg-yellow-500/5 border-yellow-500/20 hover:bg-yellow-500/10 cursor-pointer"
: status === "CRITICAL"
? "bg-red-500/5 border-red-500/20 hover:bg-red-500/10 cursor-pointer"
: "bg-muted/30 hover:bg-muted/50"
}`}
className={`rounded-lg border transition-colors overflow-hidden ${getCategoryRowStyle(status)}`}
>
<div className="mt-0.5 flex-shrink-0 flex items-center gap-2">
<Icon className="h-4 w-4 text-blue-500" />
{getStatusIcon(status)}
</div>
<div className="flex-1 min-w-0">
<div className="flex items-center justify-between gap-2 mb-1">
<p className="font-medium text-sm">{label}</p>
<Badge
variant="outline"
className={`shrink-0 text-xs ${
status === "OK"
? "border-green-500 text-green-500 bg-transparent"
: status === "WARNING"
? "border-yellow-500 text-yellow-500 bg-yellow-500/5"
: status === "CRITICAL"
? "border-red-500 text-red-500 bg-red-500/5"
: ""
}`}
>
{/* Clickable header row */}
<div
className="flex items-center gap-2 sm:gap-3 p-2 sm:p-3 cursor-pointer select-none overflow-hidden"
onClick={() => toggleCategory(key)}
>
<div className="shrink-0 flex items-center gap-1.5 sm:gap-2">
<Icon className="h-4 w-4 text-blue-500 hidden sm:block" />
{getStatusIcon(status)}
</div>
<div className="flex-1 min-w-0 overflow-hidden">
<div className="flex items-center gap-1.5 sm:gap-2">
<p className="font-medium text-xs sm:text-sm truncate">{label}</p>
{hasChecks && (
<span className="text-[10px] text-muted-foreground shrink-0">
({Object.values(checks).filter(c => c.installed !== false).length})
</span>
)}
</div>
{reason && !isExpanded && (
<p className="text-[10px] sm:text-xs text-muted-foreground mt-0.5 line-clamp-2 break-words">{reason}</p>
)}
</div>
<div className="flex items-center gap-1 sm:gap-2 shrink-0">
<Badge variant="outline" className={`text-[10px] sm:text-xs px-1.5 sm:px-2.5 ${getOutlineBadgeStyle(status)}`}>
{status}
</Badge>
<ChevronRight
className={`h-3.5 w-3.5 sm:h-4 sm:w-4 text-muted-foreground transition-transform duration-200 ${
isExpanded ? "rotate-90" : ""
}`}
/>
</div>
{reason && <p className="text-xs text-muted-foreground mt-1">{reason}</p>}
{details && typeof details === "object" && (
<div className="mt-2 space-y-1">
{Object.entries(details).map(([detailKey, detailValue]: [string, any]) => {
if (typeof detailValue === "object" && detailValue !== null) {
const isDismissable = detailValue.dismissable !== false
return (
<div
key={detailKey}
className="flex items-start justify-between gap-2 text-xs pl-3 border-l-2 border-muted py-1"
>
<div className="flex-1">
<span className="font-medium">{detailKey}:</span>
{detailValue.reason && (
<span className="ml-1 text-muted-foreground">{detailValue.reason}</span>
)}
</div>
{(status === "WARNING" || status === "CRITICAL") && isDismissable && (
<Button
size="sm"
variant="outline"
className="h-6 px-2 shrink-0 hover:bg-red-500/10 hover:border-red-500/50 bg-transparent"
onClick={(e) => handleAcknowledge(detailKey, e)}
>
<X className="h-3 w-3 mr-1" />
<span className="text-xs">Dismiss</span>
</Button>
)}
</div>
)
}
return null
})}
</div>
)}
</div>
{/* Expandable checks section */}
{isExpanded && (
<div className="border-t border-border/50 bg-muted/5 px-1.5 sm:px-2 py-1.5 overflow-hidden">
{reason && (
<div className="flex items-center justify-between gap-2 px-3 py-1.5 mb-1">
<p className="text-xs text-muted-foreground break-words whitespace-pre-wrap flex-1">{reason}</p>
{/* Show dismiss button for UNKNOWN status at category level when dismissable */}
{status === "UNKNOWN" && categoryData?.dismissable && !hasChecks && (
<Button
size="sm"
variant="outline"
className="h-5 px-1.5 shrink-0 hover:bg-red-500/10 hover:border-red-500/50 bg-transparent text-[10px]"
disabled={dismissingKey === `category_${key}`}
onClick={(e) => {
e.stopPropagation()
handleAcknowledge(`category_${key}_unknown`, e)
}}
>
{dismissingKey === `category_${key}` ? (
<Loader2 className="h-3 w-3 animate-spin" />
) : (
<>
<X className="h-3 w-3 sm:mr-0.5" />
<span className="hidden sm:inline">Dismiss</span>
</>
)}
</Button>
)}
</div>
)}
{hasChecks ? (
renderChecks(checks, key)
) : (
<div className="flex items-center gap-2 text-xs text-muted-foreground px-3 py-2">
<CheckCircle2 className="h-3.5 w-3.5 text-green-500" />
No issues detected
</div>
)}
</div>
)}
</div>
)
})}
</div>
{/* Dismissed Items Section -- hide items whose category has custom suppression */}
{(() => {
const customCats = new Set(customSuppressions.map(cs => cs.category))
const filteredDismissed = dismissedItems.filter(item => !customCats.has(item.category))
if (filteredDismissed.length === 0) return null
return (
<div className="space-y-2">
<div className="flex items-center gap-2 text-xs sm:text-sm font-medium text-muted-foreground pt-2">
<BellOff className="h-3.5 w-3.5 sm:h-4 sm:w-4" />
Dismissed Items ({filteredDismissed.length})
</div>
{filteredDismissed.map((item) => {
const catMeta = CATEGORIES.find(c => c.category === item.category || c.key === item.category)
const CatIcon = catMeta?.Icon || BellOff
const catLabel = catMeta?.label || item.category
const isPermanent = item.permanent || item.suppression_remaining_hours === -1
return (
<div
key={item.error_key}
className="flex items-start gap-2 sm:gap-3 p-2 sm:p-3 rounded-lg border bg-muted/10 border-muted opacity-75"
>
<div className="mt-0.5 shrink-0 flex items-center gap-1.5 sm:gap-2">
<CatIcon className="h-3.5 w-3.5 sm:h-4 sm:w-4 text-muted-foreground" />
</div>
<div className="flex-1 min-w-0">
<div className="flex items-start justify-between gap-2 mb-1">
<div className="min-w-0 flex-1 overflow-hidden">
<p className="font-medium text-xs sm:text-sm text-muted-foreground truncate">{catLabel}</p>
<p className="text-[10px] sm:text-xs text-muted-foreground/70 break-words line-clamp-2">{item.reason}</p>
</div>
<div className="flex items-center gap-1.5 shrink-0">
{isPermanent ? (
<Badge variant="outline" className="text-[9px] sm:text-xs border-amber-500/50 text-amber-500/70 bg-transparent whitespace-nowrap">
Permanent
</Badge>
) : (
<Badge variant="outline" className="text-[9px] sm:text-xs border-blue-500/50 text-blue-500/70 bg-transparent whitespace-nowrap">
Dismissed
</Badge>
)}
<Badge variant="outline" className={`text-[9px] sm:text-xs whitespace-nowrap ${getOutlineBadgeStyle(item.severity)}`}>
was {item.severity}
</Badge>
</div>
</div>
<p className="text-[10px] sm:text-xs text-muted-foreground flex items-center gap-1">
<Clock className="h-3 w-3" />
{isPermanent
? "Permanently suppressed"
: `Suppressed for ${
item.suppression_remaining_hours < 24
? `${Math.round(item.suppression_remaining_hours)}h`
: item.suppression_remaining_hours < 720
? `${Math.round(item.suppression_remaining_hours / 24)} days`
: `${Math.round(item.suppression_remaining_hours / 720)} month(s)`
} more`
}
</p>
</div>
</div>
)
})}
</div>
)
})()}
{/* Custom Suppression Settings Summary */}
{customSuppressions.length > 0 && (
<div className="space-y-2 pt-2">
<div className="flex items-center gap-2 text-xs sm:text-sm font-medium text-muted-foreground">
<Settings2 className="h-3.5 w-3.5 sm:h-4 sm:w-4" />
Custom Suppression Settings
</div>
<div className="rounded-lg border border-blue-500/20 bg-blue-500/5 p-2.5 sm:p-3">
<div className="space-y-1.5">
{customSuppressions.map((cs) => {
const catMeta = CATEGORIES.find(c => c.category === cs.category || c.key === cs.category || c.label === cs.label)
const CatIcon = catMeta?.Icon || Settings2
const durationLabel = cs.hours === -1
? "Permanent"
: cs.hours >= 8760
? `${Math.floor(cs.hours / 8760)} year(s)`
: cs.hours >= 720
? `${Math.floor(cs.hours / 720)} month(s)`
: cs.hours >= 168
? `${Math.floor(cs.hours / 168)} week(s)`
: cs.hours >= 72
? `${Math.floor(cs.hours / 24)} days`
: `${cs.hours}h`
return (
<div key={cs.key} className="flex items-center justify-between gap-2">
<div className="flex items-center gap-2 min-w-0">
<CatIcon className="h-3 w-3 sm:h-3.5 sm:w-3.5 text-blue-400/70 shrink-0" />
<span className="text-[11px] sm:text-xs text-blue-400/80 truncate">{cs.label}</span>
</div>
<Badge variant="outline" className="text-[9px] sm:text-[10px] border-blue-500/30 text-blue-400/80 bg-transparent shrink-0">
{durationLabel}
</Badge>
</div>
)
})}
</div>
<p className="text-[10px] text-muted-foreground/60 mt-2 pt-1.5 border-t border-blue-500/10">
Alerts in these categories are auto-suppressed when detected.
</p>
</div>
</div>
)}
{healthData.timestamp && (
<div className="text-xs text-muted-foreground text-center pt-2">
Last updated: {new Date(healthData.timestamp).toLocaleString()}
File diff suppressed because it is too large Load Diff
+18 -4
View File
@@ -7,7 +7,7 @@ import { Button } from "./ui/button"
import { Input } from "./ui/input"
import { Label } from "./ui/label"
import { Checkbox } from "./ui/checkbox"
import { Lock, User, AlertCircle, Server, Shield } from "lucide-react"
import { Lock, User, AlertCircle, Server, Shield, Eye, EyeOff } from "lucide-react"
import { getApiUrl } from "../lib/api-config"
import Image from "next/image"
@@ -21,6 +21,7 @@ export function Login({ onLogin }: LoginProps) {
const [totpCode, setTotpCode] = useState("")
const [requiresTotp, setRequiresTotp] = useState(false)
const [rememberMe, setRememberMe] = useState(false)
const [showPassword, setShowPassword] = useState(false)
const [error, setError] = useState("")
const [loading, setLoading] = useState(false)
@@ -161,14 +162,27 @@ export function Login({ onLogin }: LoginProps) {
<Lock className="absolute left-3 top-1/2 -translate-y-1/2 h-4 w-4 text-muted-foreground" />
<Input
id="login-password"
type="password"
type={showPassword ? "text" : "password"}
placeholder="Enter your password"
value={password}
onChange={(e) => setPassword(e.target.value)}
className="pl-10 text-base"
className="pl-10 pr-10 text-base"
disabled={loading}
autoComplete="current-password"
/>
<button
type="button"
onClick={() => setShowPassword(!showPassword)}
className="absolute right-3 top-1/2 -translate-y-1/2 text-muted-foreground hover:text-foreground transition-colors"
disabled={loading}
tabIndex={-1}
>
{showPassword ? (
<EyeOff className="h-4 w-4" />
) : (
<Eye className="h-4 w-4" />
)}
</button>
</div>
</div>
@@ -237,7 +251,7 @@ export function Login({ onLogin }: LoginProps) {
</form>
</div>
<p className="text-center text-sm text-muted-foreground">ProxMenux Monitor v1.0.2</p>
<p className="text-center text-sm text-muted-foreground">ProxMenux Monitor v1.2.0</p>
</div>
</div>
)
+857
View File
@@ -0,0 +1,857 @@
"use client"
import type React from "react"
import { useState, useEffect, useRef, useCallback } from "react"
import { Dialog, DialogContent, DialogTitle } from "@/components/ui/dialog"
import { Button } from "@/components/ui/button"
import {
Activity,
ArrowUp,
ArrowDown,
ArrowLeft,
ArrowRight,
CornerDownLeft,
GripHorizontal,
ChevronDown,
Search,
Send,
Lightbulb,
Terminal,
Trash2,
X,
} from "lucide-react"
import {
DropdownMenu,
DropdownMenuContent,
DropdownMenuItem,
DropdownMenuTrigger,
DropdownMenuSeparator,
DropdownMenuLabel,
} from "@/components/ui/dropdown-menu"
import { DialogHeader, DialogDescription } from "@/components/ui/dialog"
import { Input } from "@/components/ui/input"
import { Dialog as SearchDialog, DialogContent as SearchDialogContent, DialogTitle as SearchDialogTitle } from "@/components/ui/dialog"
import "xterm/css/xterm.css"
import { API_PORT, fetchApi } from "@/lib/api-config"
interface LxcTerminalModalProps {
open: boolean
onClose: () => void
vmid: number
vmName: string
}
interface CheatSheetResult {
command: string
description: string
examples: string[]
}
const proxmoxCommands = [
{ cmd: "ls -la", desc: "List all files with details" },
{ cmd: "cd /path/to/dir", desc: "Change directory" },
{ cmd: "cat filename", desc: "Display file contents" },
{ cmd: "grep 'pattern' file", desc: "Search for pattern in file" },
{ cmd: "find . -name 'file'", desc: "Find files by name" },
{ cmd: "df -h", desc: "Show disk usage" },
{ cmd: "du -sh *", desc: "Show directory sizes" },
{ cmd: "free -h", desc: "Show memory usage" },
{ cmd: "top", desc: "Show running processes" },
{ cmd: "ps aux | grep process", desc: "Find running process" },
{ cmd: "systemctl status service", desc: "Check service status" },
{ cmd: "systemctl restart service", desc: "Restart a service" },
{ cmd: "apt update && apt upgrade", desc: "Update packages" },
{ cmd: "apt install package", desc: "Install package" },
{ cmd: "tail -f /var/log/syslog", desc: "Follow log file" },
{ cmd: "chmod 755 file", desc: "Change file permissions" },
{ cmd: "chown user:group file", desc: "Change file owner" },
{ cmd: "tar -xzf file.tar.gz", desc: "Extract tar.gz archive" },
{ cmd: "docker ps", desc: "List running containers" },
{ cmd: "docker images", desc: "List Docker images" },
{ cmd: "ip addr show", desc: "Show IP addresses" },
{ cmd: "ping host", desc: "Test network connectivity" },
{ cmd: "curl -I url", desc: "Get HTTP headers" },
{ cmd: "history", desc: "Show command history" },
{ cmd: "clear", desc: "Clear terminal screen" },
]
function getWebSocketUrl(): string {
if (typeof window === "undefined") {
return "ws://localhost:8008/ws/terminal"
}
const { protocol, hostname, port } = window.location
const isStandardPort = port === "" || port === "80" || port === "443"
const wsProtocol = protocol === "https:" ? "wss:" : "ws:"
if (isStandardPort) {
return `${wsProtocol}//${hostname}/ws/terminal`
} else {
return `${wsProtocol}//${hostname}:${API_PORT}/ws/terminal`
}
}
export function LxcTerminalModal({
open: isOpen,
onClose,
vmid,
vmName,
}: LxcTerminalModalProps) {
const termRef = useRef<any>(null)
const wsRef = useRef<WebSocket | null>(null)
const fitAddonRef = useRef<any>(null)
const terminalContainerRef = useRef<HTMLDivElement>(null)
const pingIntervalRef = useRef<ReturnType<typeof setInterval> | null>(null)
const [connectionStatus, setConnectionStatus] = useState<"connecting" | "online" | "offline">("connecting")
const [isMobile, setIsMobile] = useState(false)
const [isTablet, setIsTablet] = useState(false)
const isInsideLxcRef = useRef(false)
const outputBufferRef = useRef<string>("")
const [modalHeight, setModalHeight] = useState(500)
const [isResizing, setIsResizing] = useState(false)
const resizeBarRef = useRef<HTMLDivElement>(null)
const modalHeightRef = useRef(500)
// Search state
const [searchModalOpen, setSearchModalOpen] = useState(false)
const [searchQuery, setSearchQuery] = useState("")
const [filteredCommands, setFilteredCommands] = useState<Array<{ cmd: string; desc: string }>>(proxmoxCommands)
const [isSearching, setIsSearching] = useState(false)
const [searchResults, setSearchResults] = useState<CheatSheetResult[]>([])
const [useOnline, setUseOnline] = useState(true)
// Detect mobile/tablet
useEffect(() => {
const checkDevice = () => {
const width = window.innerWidth
setIsMobile(width < 640)
setIsTablet(width >= 640 && width < 1024)
}
checkDevice()
window.addEventListener("resize", checkDevice)
return () => window.removeEventListener("resize", checkDevice)
}, [])
// Cleanup on close
useEffect(() => {
if (!isOpen) {
if (pingIntervalRef.current) {
clearInterval(pingIntervalRef.current)
pingIntervalRef.current = null
}
if (wsRef.current) {
wsRef.current.close()
wsRef.current = null
}
if (termRef.current) {
termRef.current.dispose()
termRef.current = null
}
setConnectionStatus("connecting")
isInsideLxcRef.current = false
outputBufferRef.current = ""
}
}, [isOpen])
// Initialize terminal
useEffect(() => {
if (!isOpen) return
// Small delay to ensure Dialog content is rendered
const initTimeout = setTimeout(() => {
if (!terminalContainerRef.current) return
initTerminal()
}, 100)
const initTerminal = async () => {
const [TerminalClass, FitAddonClass] = await Promise.all([
import("xterm").then((mod) => mod.Terminal),
import("xterm-addon-fit").then((mod) => mod.FitAddon),
])
const fontSize = window.innerWidth < 768 ? 12 : 16
const term = new TerminalClass({
rendererType: "dom",
fontFamily: '"Courier", "Courier New", "Liberation Mono", "DejaVu Sans Mono", monospace',
fontSize: fontSize,
lineHeight: 1,
cursorBlink: true,
scrollback: 2000,
disableStdin: false,
customGlyphs: true,
fontWeight: "500",
fontWeightBold: "700",
theme: {
background: "#000000",
foreground: "#ffffff",
cursor: "#ffffff",
cursorAccent: "#000000",
black: "#2e3436",
red: "#cc0000",
green: "#4e9a06",
yellow: "#c4a000",
blue: "#3465a4",
magenta: "#75507b",
cyan: "#06989a",
white: "#d3d7cf",
brightBlack: "#555753",
brightRed: "#ef2929",
brightGreen: "#8ae234",
brightYellow: "#fce94f",
brightBlue: "#729fcf",
brightMagenta: "#ad7fa8",
brightCyan: "#34e2e2",
brightWhite: "#eeeeec",
},
})
const fitAddon = new FitAddonClass()
term.loadAddon(fitAddon)
if (terminalContainerRef.current) {
term.open(terminalContainerRef.current)
fitAddon.fit()
}
termRef.current = term
fitAddonRef.current = fitAddon
// Connect WebSocket to host terminal
const wsUrl = getWebSocketUrl()
const ws = new WebSocket(wsUrl)
wsRef.current = ws
// Reset state for new connection
isInsideLxcRef.current = false
outputBufferRef.current = ""
ws.onopen = () => {
setConnectionStatus("online")
// Start heartbeat ping
pingIntervalRef.current = setInterval(() => {
if (ws.readyState === WebSocket.OPEN) {
ws.send(JSON.stringify({ type: 'ping' }))
} else {
if (pingIntervalRef.current) {
clearInterval(pingIntervalRef.current)
}
}
}, 25000)
// Sync terminal size
fitAddon.fit()
ws.send(JSON.stringify({
type: "resize",
cols: term.cols,
rows: term.rows,
}))
// Auto-execute pct enter after connection is ready
setTimeout(() => {
if (ws.readyState === WebSocket.OPEN) {
ws.send(`pct enter ${vmid}\r`)
}
}, 300)
}
ws.onerror = () => {
setConnectionStatus("offline")
term.writeln("\r\n\x1b[31m[ERROR] WebSocket connection error\x1b[0m")
}
ws.onclose = () => {
setConnectionStatus("offline")
if (pingIntervalRef.current) {
clearInterval(pingIntervalRef.current)
}
term.writeln("\r\n\x1b[33m[INFO] Connection closed\x1b[0m")
}
term.onData((data) => {
if (ws.readyState === WebSocket.OPEN) {
ws.send(data)
}
})
ws.onmessage = (event) => {
// Filter out pong responses
if (event.data === '{"type": "pong"}' || event.data === '{"type":"pong"}') {
return
}
// Helper to strip ANSI escape codes for pattern matching
const stripAnsi = (str: string) => str.replace(/\x1b\[[0-9;]*[a-zA-Z]/g, '')
// Buffer output until we detect we're inside the LXC
// pct enter always enters directly without login prompt when run as root
if (!isInsideLxcRef.current) {
outputBufferRef.current += event.data
const buffer = outputBufferRef.current
const cleanBuffer = stripAnsi(buffer)
// Look for pct enter command followed by a new prompt
const pctEnterMatch = cleanBuffer.match(/pct enter (\d+)\r?\n/)
if (pctEnterMatch) {
const afterPctEnter = cleanBuffer.substring(cleanBuffer.indexOf(pctEnterMatch[0]) + pctEnterMatch[0].length)
// Extract the host name from the prompt BEFORE pct enter (e.g., "root@amd")
const hostPromptMatch = cleanBuffer.match(/@([a-zA-Z0-9_-]+).*pct enter/)
const hostName = hostPromptMatch ? hostPromptMatch[1] : null
// Look for a new prompt after pct enter that ends with # or $
// This works for both bash (user@host:~#) and ash/Alpine ([user@host /]#)
const promptMatch = afterPctEnter.match(/[@\[]([a-zA-Z0-9_-]+)[^\r\n]*[#$]\s*$/)
if (promptMatch) {
const lxcHostname = promptMatch[1]
// If we found a prompt with a DIFFERENT hostname than the Proxmox host,
// we're inside the LXC container
if (!hostName || lxcHostname !== hostName) {
isInsideLxcRef.current = true
// Find the original prompt with ANSI codes to display it properly
const afterPctEnterWithAnsi = buffer.substring(buffer.indexOf('pct enter') + pctEnterMatch[0].length)
// Write the LXC prompt (last line with # or $)
const lastPromptMatch = afterPctEnterWithAnsi.match(/[^\r\n]*[#$]\s*$/)
if (lastPromptMatch) {
term.write(lastPromptMatch[0])
}
// Detect if this is Alpine/ash shell by checking prompt format
// Alpine uses: [root@hostname ~]# or [root@hostname /]#
// Other distros use: root@hostname:/# or root@hostname:~#
const isAlpine = afterPctEnter.match(/\[[^\]]+@[^\]]+\s+[^\]]*\][#$]/)
if (isAlpine) {
// Send an extra Enter ONLY for Alpine containers (ash shell)
// This forces the prompt to refresh properly
setTimeout(() => {
if (ws.readyState === WebSocket.OPEN) {
ws.send('\r')
}
}, 100)
}
return
}
}
}
} else {
// Already inside LXC, write directly
term.write(event.data)
}
}
}
return () => {
clearTimeout(initTimeout)
if (pingIntervalRef.current) {
clearInterval(pingIntervalRef.current)
}
if (wsRef.current) {
wsRef.current.close()
}
if (termRef.current) {
termRef.current.dispose()
}
}
}, [isOpen, vmid])
// Resize handling
useEffect(() => {
if (termRef.current && fitAddonRef.current && isOpen) {
setTimeout(() => {
fitAddonRef.current?.fit()
if (wsRef.current?.readyState === WebSocket.OPEN) {
wsRef.current.send(JSON.stringify({
type: "resize",
cols: termRef.current.cols,
rows: termRef.current.rows,
}))
}
}, 100)
}
}, [modalHeight, isOpen])
// Resize bar handlers
const handleResizeStart = useCallback((e: React.MouseEvent | React.TouchEvent) => {
e.preventDefault()
setIsResizing(true)
modalHeightRef.current = modalHeight
}, [modalHeight])
useEffect(() => {
if (!isResizing) return
const handleMove = (e: MouseEvent | TouchEvent) => {
const clientY = 'touches' in e ? e.touches[0].clientY : e.clientY
const windowHeight = window.innerHeight
const newHeight = windowHeight - clientY - 20
const clampedHeight = Math.max(300, Math.min(windowHeight - 100, newHeight))
modalHeightRef.current = clampedHeight
setModalHeight(clampedHeight)
}
const handleEnd = () => {
setIsResizing(false)
}
document.addEventListener("mousemove", handleMove)
document.addEventListener("mouseup", handleEnd)
document.addEventListener("touchmove", handleMove)
document.addEventListener("touchend", handleEnd)
return () => {
document.removeEventListener("mousemove", handleMove)
document.removeEventListener("mouseup", handleEnd)
document.removeEventListener("touchmove", handleMove)
document.removeEventListener("touchend", handleEnd)
}
}, [isResizing])
// Send key helpers for mobile/tablet
const sendKey = useCallback((key: string) => {
if (wsRef.current?.readyState === WebSocket.OPEN) {
wsRef.current.send(key)
}
}, [])
const sendEsc = useCallback(() => sendKey("\x1b"), [sendKey])
const sendTab = useCallback(() => sendKey("\t"), [sendKey])
const sendArrowUp = useCallback(() => sendKey("\x1b[A"), [sendKey])
const sendArrowDown = useCallback(() => sendKey("\x1b[B"), [sendKey])
const sendArrowLeft = useCallback(() => sendKey("\x1b[D"), [sendKey])
const sendArrowRight = useCallback(() => sendKey("\x1b[C"), [sendKey])
const sendEnter = useCallback(() => sendKey("\r"), [sendKey])
const sendCtrlC = useCallback(() => sendKey("\x03"), [sendKey]) // Ctrl+C
// Search effect - debounced search with cheat.sh
useEffect(() => {
const searchCheatSh = async (query: string) => {
if (!query.trim()) {
setSearchResults([])
setFilteredCommands(proxmoxCommands)
return
}
try {
setIsSearching(true)
const searchEndpoint = `/api/terminal/search-command?q=${encodeURIComponent(query)}`
const data = await fetchApi<{ success: boolean; examples: any[] }>(searchEndpoint, {
method: "GET",
signal: AbortSignal.timeout(10000),
})
if (!data.success || !data.examples || data.examples.length === 0) {
throw new Error("No examples found")
}
const formattedResults: CheatSheetResult[] = data.examples.map((example: any) => ({
command: example.command,
description: example.description || "",
examples: [example.command],
}))
setUseOnline(true)
setSearchResults(formattedResults)
} catch (error) {
const filtered = proxmoxCommands.filter(
(item) =>
item.cmd.toLowerCase().includes(query.toLowerCase()) ||
item.desc.toLowerCase().includes(query.toLowerCase()),
)
setFilteredCommands(filtered)
setSearchResults([])
setUseOnline(false)
} finally {
setIsSearching(false)
}
}
const debounce = setTimeout(() => {
if (searchQuery && searchQuery.length >= 2) {
searchCheatSh(searchQuery)
} else {
setSearchResults([])
setFilteredCommands(proxmoxCommands)
}
}, 800)
return () => clearTimeout(debounce)
}, [searchQuery])
const handleClear = useCallback(() => {
if (termRef.current) {
termRef.current.clear()
}
}, [])
const sendToTerminal = useCallback((command: string) => {
if (wsRef.current?.readyState === WebSocket.OPEN) {
wsRef.current.send(command)
setTimeout(() => {
setSearchModalOpen(false)
}, 100)
}
}, [])
const showMobileControls = isMobile || isTablet
return (
<Dialog open={isOpen} onOpenChange={(open) => !open && onClose()}>
<DialogContent
className="max-w-4xl w-[95vw] p-0 gap-0 bg-black border-border overflow-hidden flex flex-col"
style={{ height: `${modalHeight}px` }}
hideClose
>
{/* Resize bar */}
<div
ref={resizeBarRef}
className="h-3 w-full cursor-ns-resize flex items-center justify-center bg-zinc-900 hover:bg-zinc-800 transition-colors touch-none"
onMouseDown={handleResizeStart}
onTouchStart={handleResizeStart}
>
<GripHorizontal className="h-4 w-4 text-zinc-500" />
</div>
{/* Header */}
<div className="flex items-center justify-between px-4 py-2 bg-zinc-900 border-b border-zinc-800">
<DialogTitle className="text-sm font-medium text-white">
Terminal: {vmName} (ID: {vmid})
</DialogTitle>
<div className="flex gap-2">
<Button
onClick={() => setSearchModalOpen(true)}
variant="outline"
size="sm"
disabled={connectionStatus !== "online"}
className="h-8 gap-2 bg-blue-600/20 hover:bg-blue-600/30 border-blue-600/50 text-blue-400 disabled:opacity-50"
>
<Search className="h-4 w-4" />
<span className="hidden sm:inline">Search</span>
</Button>
<Button
onClick={handleClear}
variant="outline"
size="sm"
disabled={connectionStatus !== "online"}
className="h-8 gap-2 bg-yellow-600/20 hover:bg-yellow-600/30 border-yellow-600/50 text-yellow-400 disabled:opacity-50"
>
<Trash2 className="h-4 w-4" />
<span className="hidden sm:inline">Clear</span>
</Button>
</div>
</div>
{/* Terminal container */}
<div className="flex-1 overflow-hidden bg-black p-1">
<div
ref={terminalContainerRef}
className="w-full h-full"
style={{ minHeight: "200px" }}
/>
</div>
{/* Mobile/Tablet control buttons */}
{showMobileControls && (
<div className="px-2 py-2 bg-zinc-900 border-t border-zinc-800">
<div className="flex items-center justify-center gap-1.5">
<Button
variant="outline"
size="sm"
onClick={sendEsc}
className="h-8 px-2 text-xs bg-zinc-800 border-zinc-700 text-zinc-300"
>
ESC
</Button>
<Button
variant="outline"
size="sm"
onClick={sendTab}
className="h-8 px-2 text-xs bg-zinc-800 border-zinc-700 text-zinc-300"
>
TAB
</Button>
<Button
variant="outline"
size="sm"
onClick={sendArrowUp}
className="h-8 w-8 p-0 bg-zinc-800 border-zinc-700"
>
<ArrowUp className="h-4 w-4" />
</Button>
<Button
variant="outline"
size="sm"
onClick={sendArrowDown}
className="h-8 w-8 p-0 bg-zinc-800 border-zinc-700"
>
<ArrowDown className="h-4 w-4" />
</Button>
<Button
variant="outline"
size="sm"
onClick={sendArrowLeft}
className="h-8 w-8 p-0 bg-zinc-800 border-zinc-700"
>
<ArrowLeft className="h-4 w-4" />
</Button>
<Button
variant="outline"
size="sm"
onClick={sendArrowRight}
className="h-8 w-8 p-0 bg-zinc-800 border-zinc-700"
>
<ArrowRight className="h-4 w-4" />
</Button>
<Button
variant="outline"
size="sm"
onClick={sendEnter}
className="h-8 px-2 text-xs bg-blue-600/20 border-blue-600/50 text-blue-400 hover:bg-blue-600/30"
>
<CornerDownLeft className="h-4 w-4 mr-1" />
Enter
</Button>
<DropdownMenu>
<DropdownMenuTrigger asChild>
<Button
variant="outline"
size="sm"
className="h-8 px-2 text-xs bg-zinc-800 border-zinc-700 text-zinc-300 gap-1"
>
Ctrl
<ChevronDown className="h-3 w-3" />
</Button>
</DropdownMenuTrigger>
<DropdownMenuContent align="end" className="w-48">
<DropdownMenuLabel className="text-xs text-muted-foreground">Control Sequences</DropdownMenuLabel>
<DropdownMenuSeparator />
<DropdownMenuItem onSelect={() => sendKey("\x03")}>
<span className="font-mono text-xs mr-2">Ctrl+C</span>
<span className="text-muted-foreground text-xs">Cancel/Interrupt</span>
</DropdownMenuItem>
<DropdownMenuItem onSelect={() => sendKey("\x18")}>
<span className="font-mono text-xs mr-2">Ctrl+X</span>
<span className="text-muted-foreground text-xs">Exit (nano)</span>
</DropdownMenuItem>
<DropdownMenuItem onSelect={() => sendKey("\x12")}>
<span className="font-mono text-xs mr-2">Ctrl+R</span>
<span className="text-muted-foreground text-xs">Search history</span>
</DropdownMenuItem>
</DropdownMenuContent>
</DropdownMenu>
</div>
</div>
)}
{/* Status bar at bottom */}
<div className="flex items-center justify-between px-4 py-2 bg-zinc-900 border-t border-zinc-800">
<div className="flex items-center gap-3">
<Activity className="h-5 w-5 text-blue-500" />
<div
className={`w-2 h-2 rounded-full ${
connectionStatus === "online"
? "bg-green-500"
: connectionStatus === "connecting"
? "bg-yellow-500 animate-pulse"
: "bg-red-500"
}`}
/>
<span className="text-xs text-zinc-400 capitalize">{connectionStatus}</span>
</div>
<Button
onClick={onClose}
variant="outline"
size="sm"
className="h-8 gap-2 bg-red-600/20 hover:bg-red-600/30 border-red-600/50 text-red-400"
>
<X className="h-4 w-4" />
<span className="hidden sm:inline">Close</span>
</Button>
</div>
</DialogContent>
{/* Search Commands Modal */}
<SearchDialog open={searchModalOpen} onOpenChange={setSearchModalOpen}>
<SearchDialogContent className="max-w-3xl max-h-[85vh] overflow-hidden flex flex-col">
<DialogHeader className="flex flex-row items-center justify-between space-y-0 pb-4 border-b border-zinc-800">
<SearchDialogTitle className="text-xl font-semibold">Search Commands</SearchDialogTitle>
<div className="flex items-center gap-2">
<div
className={`w-2 h-2 rounded-full ${useOnline ? "bg-green-500" : "bg-red-500"}`}
title={useOnline ? "Online - Using cheat.sh API" : "Offline - Using local commands"}
/>
</div>
</DialogHeader>
<DialogDescription className="sr-only">Search for Linux commands</DialogDescription>
<div className="space-y-4">
<div className="relative">
<Search className="absolute left-3 top-1/2 -translate-y-1/2 w-4 h-4 text-zinc-500" />
<Input
placeholder="Search commands... (e.g., tar, docker, systemctl)"
value={searchQuery}
onChange={(e) => setSearchQuery(e.target.value)}
className="pl-10 bg-zinc-900 border-zinc-700 focus:border-blue-500 focus:ring-1 focus:ring-blue-500 text-base"
autoCapitalize="none"
autoComplete="off"
autoCorrect="off"
spellCheck={false}
/>
</div>
{isSearching && (
<div className="text-center py-4 text-zinc-400">
<div className="animate-spin inline-block w-6 h-6 border-2 border-current border-t-transparent rounded-full mb-2" />
<p className="text-sm">Searching cheat.sh...</p>
</div>
)}
<div className="flex-1 overflow-y-auto space-y-2 pr-2 max-h-[50vh]">
{searchResults.length > 0 ? (
<>
{searchResults.map((result, index) => (
<div
key={index}
className="p-4 rounded-lg border border-zinc-700 bg-zinc-800/50 hover:border-zinc-600 transition-colors"
>
{result.description && (
<p className="text-xs text-zinc-400 mb-2 leading-relaxed"># {result.description}</p>
)}
<div
onClick={() => sendToTerminal(result.command)}
className="flex items-start justify-between gap-2 cursor-pointer group hover:bg-zinc-800/50 rounded p-2 -m-2"
>
<code className="text-sm text-blue-400 font-mono break-all flex-1">{result.command}</code>
<Send className="h-4 w-4 text-zinc-600 group-hover:text-blue-400 flex-shrink-0 mt-0.5 transition-colors" />
</div>
</div>
))}
<div className="text-center py-2">
<p className="text-xs text-zinc-500">
<Lightbulb className="inline-block w-3 h-3 mr-1" />
Powered by cheat.sh
</p>
</div>
</>
) : filteredCommands.length > 0 && !useOnline ? (
filteredCommands.map((item, index) => (
<div
key={index}
onClick={() => sendToTerminal(item.cmd)}
className="p-3 rounded-lg border border-zinc-700 bg-zinc-800/50 hover:bg-zinc-800 hover:border-blue-500 cursor-pointer transition-colors"
>
<div className="flex items-start justify-between gap-2">
<div className="flex-1 min-w-0">
<code className="text-sm text-blue-400 font-mono break-all">{item.cmd}</code>
<p className="text-xs text-zinc-400 mt-1">{item.desc}</p>
</div>
<Button
onClick={(e) => {
e.stopPropagation()
sendToTerminal(item.cmd)
}}
size="sm"
variant="ghost"
className="shrink-0 h-7 px-2 text-xs"
>
<Send className="h-3 w-3 mr-1" />
Send
</Button>
</div>
</div>
))
) : !isSearching && !searchQuery && !useOnline ? (
proxmoxCommands.map((item, index) => (
<div
key={index}
onClick={() => sendToTerminal(item.cmd)}
className="p-3 rounded-lg border border-zinc-700 bg-zinc-800/50 hover:bg-zinc-800 hover:border-blue-500 cursor-pointer transition-colors"
>
<div className="flex items-start justify-between gap-2">
<div className="flex-1 min-w-0">
<code className="text-sm text-blue-400 font-mono break-all">{item.cmd}</code>
<p className="text-xs text-zinc-400 mt-1">{item.desc}</p>
</div>
<Button
onClick={(e) => {
e.stopPropagation()
sendToTerminal(item.cmd)
}}
size="sm"
variant="ghost"
className="shrink-0 h-7 px-2 text-xs"
>
<Send className="h-3 w-3 mr-1" />
Send
</Button>
</div>
</div>
))
) : !isSearching ? (
<div className="text-center py-12 space-y-4">
{searchQuery ? (
<>
<Search className="w-12 h-12 text-zinc-600 mx-auto" />
<div>
<p className="text-zinc-400 font-medium">{"No results found for \""}{searchQuery}{"\""}</p>
<p className="text-xs text-zinc-500 mt-1">Try a different command or check your spelling</p>
</div>
</>
) : (
<>
<Terminal className="w-12 h-12 text-zinc-600 mx-auto" />
<div>
<p className="text-zinc-400 font-medium mb-2">Search for any command</p>
<div className="text-sm text-zinc-500 space-y-1">
<p>Try searching for:</p>
<div className="flex flex-wrap justify-center gap-2 mt-2">
{["tar", "grep", "docker", "systemctl", "curl"].map((cmd) => (
<code
key={cmd}
onClick={() => setSearchQuery(cmd)}
className="px-2 py-1 bg-zinc-800 rounded text-blue-400 cursor-pointer hover:bg-zinc-700"
>
{cmd}
</code>
))}
</div>
</div>
</div>
{useOnline && (
<div className="flex items-center justify-center gap-2 text-xs text-zinc-600 mt-4">
<Lightbulb className="w-3 h-3" />
<span>Powered by cheat.sh</span>
</div>
)}
</>
)}
</div>
) : null}
</div>
<div className="pt-2 border-t border-zinc-800 flex items-center justify-between text-xs text-zinc-500">
<div className="flex items-center gap-2">
<Lightbulb className="w-3 h-3" />
<span>Tip: Search for any Linux command</span>
</div>
{useOnline && searchResults.length > 0 && <span className="text-zinc-600">Powered by cheat.sh</span>}
</div>
</div>
</SearchDialogContent>
</SearchDialog>
</Dialog>
)
}
+99 -30
View File
@@ -4,12 +4,14 @@ import { useEffect, useState } from "react"
import { Card, CardContent, CardHeader, CardTitle } from "./ui/card"
import { Badge } from "./ui/badge"
import { Dialog, DialogContent, DialogHeader, DialogTitle, DialogDescription } from "./ui/dialog"
import { Wifi, Activity, Network, Router, AlertCircle, Zap } from 'lucide-react'
import { Wifi, Activity, Network, Router, AlertCircle, Zap, Timer } from 'lucide-react'
import useSWR from "swr"
import { NetworkTrafficChart } from "./network-traffic-chart"
import { Select, SelectContent, SelectItem, SelectTrigger, SelectValue } from "./ui/select"
import { fetchApi } from "../lib/api-config"
import { formatNetworkTraffic, getNetworkUnit } from "../lib/format-network"
import { LatencyDetailModal } from "./latency-detail-modal"
import { AreaChart, Area, LineChart, Line, ResponsiveContainer, YAxis } from "recharts"
interface NetworkData {
interfaces: NetworkInterface[]
@@ -140,8 +142,8 @@ export function NetworkMetrics() {
error,
isLoading,
} = useSWR<NetworkData>("/api/network", fetcher, {
refreshInterval: 53000,
revalidateOnFocus: false,
refreshInterval: 15000,
revalidateOnFocus: true,
revalidateOnReconnect: true,
})
@@ -150,8 +152,19 @@ export function NetworkMetrics() {
const [modalTimeframe, setModalTimeframe] = useState<"hour" | "day" | "week" | "month" | "year">("day")
const [networkTotals, setNetworkTotals] = useState<{ received: number; sent: number }>({ received: 0, sent: 0 })
const [interfaceTotals, setInterfaceTotals] = useState<{ received: number; sent: number }>({ received: 0, sent: 0 })
const [latencyModalOpen, setLatencyModalOpen] = useState(false)
const [networkUnit, setNetworkUnit] = useState<"Bytes" | "Bits">(() => getNetworkUnit())
// Latency history for sparkline (last hour)
const { data: latencyData } = useSWR<{
data: Array<{ timestamp: number; value: number }>
stats: { min: number; max: number; avg: number; current: number }
target: string
}>("/api/network/latency/history?target=gateway&timeframe=hour",
(url: string) => fetchApi(url),
{ refreshInterval: 60000, revalidateOnFocus: false }
)
useEffect(() => {
setNetworkUnit(getNetworkUnit())
@@ -177,10 +190,13 @@ export function NetworkMetrics() {
if (isLoading) {
return (
<div className="space-y-6">
<div className="text-center py-8">
<div className="text-lg font-medium text-foreground mb-2">Loading network data...</div>
<div className="flex flex-col items-center justify-center min-h-[400px] gap-4">
<div className="relative">
<div className="h-12 w-12 rounded-full border-2 border-muted"></div>
<div className="absolute inset-0 h-12 w-12 rounded-full border-2 border-transparent border-t-primary animate-spin"></div>
</div>
<div className="text-sm font-medium text-foreground">Loading network data...</div>
<p className="text-xs text-muted-foreground">Scanning interfaces, bridges and traffic</p>
</div>
)
}
@@ -327,48 +343,95 @@ export function NetworkMetrics() {
</CardContent>
</Card>
{/* Merged Network Config & Health Card */}
<Card className="bg-card border-border">
<CardHeader className="flex flex-row items-center justify-between space-y-0 pb-2">
<CardTitle className="text-sm font-medium text-muted-foreground">Network Configuration</CardTitle>
<Network className="h-4 w-4 text-muted-foreground" />
<CardTitle className="text-sm font-medium text-muted-foreground">Network Status</CardTitle>
<Badge variant="outline" className={healthColor}>
{healthStatus}
</Badge>
</CardHeader>
<CardContent>
<div className="space-y-2">
<div className="flex flex-col">
<div className="flex items-center justify-between">
<span className="text-xs text-muted-foreground">Hostname</span>
<span className="text-sm font-medium text-foreground truncate">{hostname}</span>
<span className="text-xs font-medium text-foreground truncate max-w-[120px]">{hostname}</span>
</div>
<div className="flex flex-col">
<span className="text-xs text-muted-foreground">Domain</span>
<span className="text-sm font-medium text-foreground truncate">{domain}</span>
</div>
<div className="flex flex-col">
<div className="flex items-center justify-between">
<span className="text-xs text-muted-foreground">Primary DNS</span>
<span className="text-sm font-medium text-foreground truncate">{primaryDNS}</span>
<span className="text-xs font-medium text-foreground font-mono">{primaryDNS}</span>
</div>
<div className="flex items-center justify-between">
<span className="text-xs text-muted-foreground">Packet Loss</span>
<span className="text-xs font-medium text-foreground">{avgPacketLoss}%</span>
</div>
<div className="flex items-center justify-between">
<span className="text-xs text-muted-foreground">Errors</span>
<span className="text-xs font-medium text-foreground">{totalErrors}</span>
</div>
</div>
</CardContent>
</Card>
<Card className="bg-card border-border">
{/* Latency Card with Sparkline */}
<Card
className="bg-card border-border cursor-pointer hover:bg-muted/50 transition-colors"
onClick={() => setLatencyModalOpen(true)}
>
<CardHeader className="flex flex-row items-center justify-between space-y-0 pb-2">
<CardTitle className="text-sm font-medium text-muted-foreground">Network Health</CardTitle>
<Activity className="h-4 w-4 text-muted-foreground" />
<CardTitle className="text-sm font-medium text-muted-foreground">Network Latency</CardTitle>
<Timer className="h-4 w-4 text-muted-foreground" />
</CardHeader>
<CardContent>
<Badge variant="outline" className={healthColor}>
{healthStatus}
</Badge>
<div className="flex flex-col gap-1 mt-2 text-xs">
<div className="flex items-center justify-between">
<span className="text-muted-foreground">Packet Loss:</span>
<span className="font-medium text-foreground">{avgPacketLoss}%</span>
</div>
<div className="flex items-center justify-between">
<span className="text-muted-foreground">Errors:</span>
<span className="font-medium text-foreground">{totalErrors}</span>
<div className="flex items-center justify-between mb-2">
<div className="text-xl lg:text-2xl font-bold text-foreground">
{latencyData?.stats?.current ?? 0} <span className="text-sm font-normal text-muted-foreground">ms</span>
</div>
<Badge
variant="outline"
className={
(latencyData?.stats?.current ?? 0) < 50
? "bg-green-500/10 text-green-500 border-green-500/20"
: (latencyData?.stats?.current ?? 0) < 100
? "bg-green-500/10 text-green-500 border-green-500/20"
: (latencyData?.stats?.current ?? 0) < 200
? "bg-yellow-500/10 text-yellow-500 border-yellow-500/20"
: "bg-red-500/10 text-red-500 border-red-500/20"
}
>
{(latencyData?.stats?.current ?? 0) < 50 ? "Excellent" :
(latencyData?.stats?.current ?? 0) < 100 ? "Good" :
(latencyData?.stats?.current ?? 0) < 200 ? "Fair" : "Poor"}
</Badge>
</div>
{/* Sparkline */}
{latencyData?.data && latencyData.data.length > 0 && (
<div className="h-[40px] w-full">
<ResponsiveContainer width="100%" height="100%">
<AreaChart data={latencyData.data.slice(-30)} margin={{ top: 2, right: 0, left: 0, bottom: 0 }}>
<defs>
<linearGradient id="latencySparkGradient" x1="0" y1="0" x2="0" y2="1">
<stop offset="0%" stopColor="#3b82f6" stopOpacity={0.4} />
<stop offset="100%" stopColor="#3b82f6" stopOpacity={0.05} />
</linearGradient>
</defs>
<Area
type="monotone"
dataKey="value"
stroke="#3b82f6"
strokeWidth={1.5}
fill="url(#latencySparkGradient)"
dot={false}
isAnimationActive={false}
baseValue="dataMin"
/>
</AreaChart>
</ResponsiveContainer>
</div>
)}
<p className="text-xs text-muted-foreground mt-1">
Avg: {latencyData?.stats?.avg ?? 0}ms | Max: {latencyData?.stats?.max ?? 0}ms
</p>
</CardContent>
</Card>
</div>
@@ -1088,6 +1151,12 @@ export function NetworkMetrics() {
)}
</DialogContent>
</Dialog>
{/* Latency Detail Modal */}
<LatencyDetailModal
open={latencyModalOpen}
onOpenChange={setLatencyModalOpen}
/>
</div>
)
}
+35 -20
View File
@@ -78,6 +78,10 @@ export function NodeMetricsCharts() {
memory: { memoryTotal: true, memoryUsed: true, memoryZfsArc: true, memoryFree: true },
})
// Check if ZFS ARC or Free memory have any non-zero values to decide if we should show them
const hasZfsArc = data.some(d => d.memoryZfsArc > 0)
const hasMemoryFree = data.some(d => d.memoryFree > 0)
useEffect(() => {
console.log("[v0] NodeMetricsCharts component mounted")
fetchMetrics()
@@ -194,6 +198,11 @@ export function NodeMetricsCharts() {
return (
<div className="flex justify-center gap-4 pb-2 flex-wrap">
{payload.map((entry: any, index: number) => {
// For memory chart, hide ZFS ARC and Free from legend if they have no data
if (chartType === "memory") {
if (entry.dataKey === "memoryZfsArc" && !hasZfsArc) return null
if (entry.dataKey === "memoryFree" && !hasMemoryFree) return null
}
const isVisible = visibleLines[chartType][entry.dataKey as keyof (typeof visibleLines)[typeof chartType]]
return (
<div
@@ -428,26 +437,32 @@ export function NodeMetricsCharts() {
name="Used"
hide={!visibleLines.memory.memoryUsed}
/>
<Area
type="monotone"
dataKey="memoryZfsArc"
stroke="#f59e0b"
strokeWidth={2}
fill="#f59e0b"
fillOpacity={0.3}
name="ZFS ARC"
hide={!visibleLines.memory.memoryZfsArc}
/>
<Area
type="monotone"
dataKey="memoryFree"
stroke="#06b6d4"
strokeWidth={2}
fill="#06b6d4"
fillOpacity={0.3}
name="Available"
hide={!visibleLines.memory.memoryFree}
/>
{/* Only show ZFS ARC if there's data */}
{hasZfsArc && (
<Area
type="monotone"
dataKey="memoryZfsArc"
stroke="#f59e0b"
strokeWidth={2}
fill="#f59e0b"
fillOpacity={0.3}
name="ZFS ARC"
hide={!visibleLines.memory.memoryZfsArc}
/>
)}
{/* Only show Free memory if there's data */}
{hasMemoryFree && (
<Area
type="monotone"
dataKey="memoryFree"
stroke="#06b6d4"
strokeWidth={2}
fill="#06b6d4"
fillOpacity={0.3}
name="Free"
hide={!visibleLines.memory.memoryFree}
/>
)}
</AreaChart>
</ResponsiveContainer>
</CardContent>
File diff suppressed because it is too large Load Diff
+165 -26
View File
@@ -11,6 +11,7 @@ import { VirtualMachines } from "./virtual-machines"
import Hardware from "./hardware"
import { SystemLogs } from "./system-logs"
import { Settings } from "./settings"
import { Security } from "./security"
import { OnboardingCarousel } from "./onboarding-carousel"
import { HealthStatusModal } from "./health-status-modal"
import { ReleaseNotesModal, useVersionCheck } from "./release-notes-modal"
@@ -31,6 +32,8 @@ import {
FileText,
SettingsIcon,
Terminal,
ShieldCheck,
Info,
} from "lucide-react"
import Image from "next/image"
import { ThemeToggle } from "./theme-toggle"
@@ -76,11 +79,77 @@ export function ProxmoxDashboard() {
const [componentKey, setComponentKey] = useState(0)
const [mobileMenuOpen, setMobileMenuOpen] = useState(false)
const [activeTab, setActiveTab] = useState("overview")
const [infoCount, setInfoCount] = useState(0)
const [updateAvailable, setUpdateAvailable] = useState(false)
const [showNavigation, setShowNavigation] = useState(true)
const [lastScrollY, setLastScrollY] = useState(0)
const [showHealthModal, setShowHealthModal] = useState(false)
const { showReleaseNotes, setShowReleaseNotes } = useVersionCheck()
// Category keys for health info count calculation
const HEALTH_CATEGORY_KEYS = [
{ key: "cpu", category: "temperature" },
{ key: "memory", category: "memory" },
{ key: "storage", category: "storage" },
{ key: "disks", category: "disks" },
{ key: "network", category: "network" },
{ key: "vms", category: "vms" },
{ key: "services", category: "pve_services" },
{ key: "logs", category: "logs" },
{ key: "updates", category: "updates" },
{ key: "security", category: "security" },
]
// Fetch ProxMenux update status
const fetchUpdateStatus = useCallback(async () => {
try {
const response = await fetchApi("/api/proxmenux/update-status")
if (response?.success && response?.update_available) {
const { stable, beta } = response.update_available
setUpdateAvailable(stable || beta)
}
} catch (error) {
// Silently fail - updateAvailable will remain false
}
}, [])
// Fetch health info count independently (for initial load and refresh)
const fetchHealthInfoCount = useCallback(async () => {
try {
const response = await fetchApi("/api/health/full")
let calculatedInfoCount = 0
if (response && response.health?.details) {
// Get categories that have dismissed items (these become INFO)
const customCats = new Set((response.custom_suppressions || []).map((cs: { category: string }) => cs.category))
const filteredDismissed = (response.dismissed || []).filter((item: { category: string }) => !customCats.has(item.category))
const categoriesWithDismissed = new Set<string>()
filteredDismissed.forEach((item: { category: string }) => {
const catMeta = HEALTH_CATEGORY_KEYS.find(c => c.category === item.category || c.key === item.category)
if (catMeta) {
categoriesWithDismissed.add(catMeta.key)
}
})
// Count effective INFO categories (original INFO + OK categories with dismissed)
HEALTH_CATEGORY_KEYS.forEach(({ key }) => {
const cat = response.health.details[key as keyof typeof response.health.details]
if (cat) {
const originalStatus = cat.status?.toUpperCase()
// Count as INFO if: originally INFO OR (originally OK and has dismissed items)
if (originalStatus === "INFO" || (originalStatus === "OK" && categoriesWithDismissed.has(key))) {
calculatedInfoCount++
}
}
})
}
setInfoCount(calculatedInfoCount)
} catch (error) {
// Silently fail - infoCount will remain at 0
}
}, [])
const fetchSystemData = useCallback(async () => {
try {
const data: FlaskSystemInfo = await fetchApi("/api/system-info")
@@ -108,7 +177,7 @@ export function ProxmoxDashboard() {
})
setIsServerConnected(true)
} catch (error) {
console.error("[v0] Failed to fetch system data from Flask server:", error)
// Expected to fail in v0 preview (no Flask server)
setIsServerConnected(false)
setSystemStatus((prev) => ({
@@ -123,22 +192,28 @@ export function ProxmoxDashboard() {
}, [])
useEffect(() => {
// Siempre fetch inicial
fetchSystemData()
// Siempre fetch inicial
fetchSystemData()
fetchHealthInfoCount()
fetchUpdateStatus()
// En overview: cada 30 segundos para actualización frecuente del estado de salud
// En otras tabs: cada 60 segundos para reducir carga
let interval: ReturnType<typeof setInterval> | null = null
let healthInterval: ReturnType<typeof setInterval> | null = null
if (activeTab === "overview") {
interval = setInterval(fetchSystemData, 30000) // 30 segundos
healthInterval = setInterval(fetchHealthInfoCount, 30000) // Also refresh info count
} else {
interval = setInterval(fetchSystemData, 60000) // 60 segundos
healthInterval = setInterval(fetchHealthInfoCount, 60000) // Also refresh info count
}
return () => {
if (interval) clearInterval(interval)
if (healthInterval) clearInterval(healthInterval)
}
}, [fetchSystemData, activeTab])
}, [fetchSystemData, fetchHealthInfoCount, fetchUpdateStatus, activeTab])
useEffect(() => {
const handleChangeTab = (event: CustomEvent) => {
@@ -153,10 +228,28 @@ export function ProxmoxDashboard() {
window.removeEventListener("changeTab", handleChangeTab as EventListener)
}
}, [])
// Auto-refresh terminal on mobile devices
// This fixes the issue where terminal doesn't connect properly on mobile/VPN
useEffect(() => {
if (activeTab === "terminal") {
const isMobileDevice = window.innerWidth < 768 ||
('ontouchstart' in window && navigator.maxTouchPoints > 0)
if (isMobileDevice) {
// Delay to allow initial connection attempt, then refresh to ensure proper connection
const timeoutId = setTimeout(() => {
setComponentKey(prev => prev + 1)
}, 500)
return () => clearTimeout(timeoutId)
}
}
}, [activeTab])
useEffect(() => {
const handleHealthStatusUpdate = (event: CustomEvent) => {
const { status } = event.detail
const { status, infoCount: newInfoCount } = event.detail
let healthStatus: "healthy" | "warning" | "critical"
if (status === "CRITICAL") {
@@ -171,6 +264,11 @@ export function ProxmoxDashboard() {
...prev,
status: healthStatus,
}))
// Update info count (INFO categories + dismissed items)
if (typeof newInfoCount === "number") {
setInfoCount(newInfoCount)
}
}
window.addEventListener("healthStatusUpdated", handleHealthStatusUpdate as EventListener)
@@ -265,8 +363,10 @@ export function ProxmoxDashboard() {
return "Terminal"
case "logs":
return "System Logs"
case "settings":
return "Settings"
case "security":
return "Security"
case "settings":
return "Settings"
default:
return "Navigation Menu"
}
@@ -309,14 +409,13 @@ export function ProxmoxDashboard() {
<div className="flex items-center space-x-2 md:space-x-3 min-w-0">
<div className="w-16 h-16 md:w-10 md:h-10 relative flex items-center justify-center bg-primary/10 flex-shrink-0">
<Image
src="/images/proxmenux-logo.png"
src={updateAvailable ? "/images/proxmenux_update-logo.png" : "/images/proxmenux-logo.png"}
alt="ProxMenux Logo"
width={64}
height={64}
className="object-contain md:w-10 md:h-10"
priority
onError={(e) => {
console.log("[v0] Logo failed to load, using fallback icon")
const target = e.target as HTMLImageElement
target.style.display = "none"
const fallback = target.parentElement?.querySelector(".fallback-icon")
@@ -346,10 +445,18 @@ export function ProxmoxDashboard() {
</div>
</div>
<Badge variant="outline" className={statusColor}>
{statusIcon}
<span className="ml-1 capitalize">{systemStatus.status}</span>
</Badge>
<div className="flex items-center gap-2">
<Badge variant="outline" className={statusColor}>
{statusIcon}
<span className="ml-1 capitalize">{systemStatus.status}</span>
</Badge>
{systemStatus.status === "healthy" && infoCount > 0 && (
<Badge variant="outline" className="bg-blue-500/10 text-blue-500 border-blue-500/20">
<Info className="h-4 w-4" />
<span className="ml-1">{infoCount} info</span>
</Badge>
)}
</div>
<div className="text-sm text-muted-foreground whitespace-nowrap">
Uptime: {systemStatus.uptime || "N/A"}
@@ -375,11 +482,18 @@ export function ProxmoxDashboard() {
</div>
{/* Mobile Actions */}
<div className="flex lg:hidden items-center gap-2">
<Badge variant="outline" className={`${statusColor} text-xs px-2`}>
{statusIcon}
<span className="ml-1 capitalize hidden sm:inline">{systemStatus.status}</span>
</Badge>
<div className="flex lg:hidden items-start gap-2 pt-2">
<div className="flex flex-col items-end gap-1">
<Badge variant="outline" className={`${statusColor} text-xs px-2`}>
{statusIcon}
</Badge>
{systemStatus.status === "healthy" && infoCount > 0 && (
<Badge variant="outline" className="bg-blue-500/10 text-blue-500 border-blue-500/20 text-xs px-2">
<Info className="h-4 w-4" />
<span className="ml-1">{infoCount}</span>
</Badge>
)}
</div>
<Button
variant="ghost"
@@ -389,12 +503,12 @@ export function ProxmoxDashboard() {
refreshData()
}}
disabled={isRefreshing}
className="h-8 w-8 p-0"
className="h-8 w-8 p-0 -mt-1"
>
<RefreshCw className={`h-4 w-4 ${isRefreshing ? "animate-spin" : ""}`} />
</Button>
<div onClick={(e) => e.stopPropagation()}>
<div onClick={(e) => e.stopPropagation()} className="-mt-1">
<ThemeToggle />
</div>
</div>
@@ -409,14 +523,14 @@ export function ProxmoxDashboard() {
<div
className={`sticky z-40 bg-background
top-[120px] md:top-[76px]
transition-all duration-700 ease-[cubic-bezier(0.4,0,0.2,1)]
top-[120px] lg:top-[76px]
transition-all duration-700 ease-in-out
${showNavigation ? "translate-y-0 opacity-100" : "-translate-y-[120%] opacity-0 pointer-events-none"}
`}
>
<div className="container mx-auto px-4 md:px-6 pt-4 md:pt-6">
<div className="container mx-auto px-4 lg:px-6 pt-4 lg:pt-6">
<Tabs value={activeTab} onValueChange={setActiveTab} className="space-y-0">
<TabsList className="hidden md:grid w-full grid-cols-8 bg-card border border-border">
<TabsList className="hidden lg:grid w-full grid-cols-9 bg-card border border-border">
<TabsTrigger
value="overview"
className="data-[state=active]:bg-blue-500 data-[state=active]:text-white data-[state=active]:rounded-md"
@@ -459,6 +573,12 @@ export function ProxmoxDashboard() {
>
Terminal
</TabsTrigger>
<TabsTrigger
value="security"
className="data-[state=active]:bg-blue-500 data-[state=active]:text-white data-[state=active]:rounded-md"
>
Security
</TabsTrigger>
<TabsTrigger
value="settings"
className="data-[state=active]:bg-blue-500 data-[state=active]:text-white data-[state=active]:rounded-md"
@@ -468,7 +588,7 @@ export function ProxmoxDashboard() {
</TabsList>
<Sheet open={mobileMenuOpen} onOpenChange={setMobileMenuOpen}>
<div className="md:hidden">
<div className="lg:hidden">
<SheetTrigger asChild>
<Button
variant="outline"
@@ -588,6 +708,21 @@ export function ProxmoxDashboard() {
<Terminal className="h-5 w-5" />
<span>Terminal</span>
</Button>
<Button
variant="ghost"
onClick={() => {
setActiveTab("security")
setMobileMenuOpen(false)
}}
className={`w-full justify-start gap-3 ${
activeTab === "security"
? "bg-blue-500/10 text-blue-500 border-l-4 border-blue-500 rounded-l-none"
: ""
}`}
>
<ShieldCheck className="h-5 w-5" />
<span>Security</span>
</Button>
<Button
variant="ghost"
onClick={() => {
@@ -640,13 +775,17 @@ export function ProxmoxDashboard() {
<TerminalPanel key={`terminal-${componentKey}`} />
</TabsContent>
<TabsContent value="security" className="space-y-4 md:space-y-6 mt-0">
<Security key={`security-${componentKey}`} />
</TabsContent>
<TabsContent value="settings" className="space-y-4 md:space-y-6 mt-0">
<Settings />
</TabsContent>
</Tabs>
<footer className="mt-8 md:mt-12 pt-4 md:pt-6 border-t border-border text-center text-xs md:text-sm text-muted-foreground">
<p className="font-medium mb-2">ProxMenux Monitor v1.0.2</p>
<p className="font-medium mb-2">ProxMenux Monitor v1.2.0</p>
<p>
<a
href="https://ko-fi.com/macrimi"
+52 -29
View File
@@ -3,10 +3,10 @@
import { useState, useEffect } from "react"
import { Button } from "./ui/button"
import { Dialog, DialogContent, DialogTitle } from "./ui/dialog"
import { X, Sparkles, Link2, Shield, Zap, HardDrive, Gauge, Wrench, Settings } from "lucide-react"
import { X, Sparkles, Thermometer, Terminal, Activity, HardDrive, Bell, Shield, Globe, Cpu, Zap } from "lucide-react"
import { Checkbox } from "./ui/checkbox"
const APP_VERSION = "1.0.2" // Sync with AppImage/package.json
const APP_VERSION = "1.2.0" // Sync with AppImage/package.json
interface ReleaseNote {
date: string
@@ -18,6 +18,32 @@ interface ReleaseNote {
}
export const CHANGELOG: Record<string, ReleaseNote> = {
"1.1.2-beta": {
date: "March 18, 2026",
changes: {
added: [
"Temperature & Latency Charts - Real-time visual monitoring with interactive graphs",
"WebSocket Terminal - Direct access to Proxmox host and LXC containers terminal",
"AI-Enhanced Notifications - Intelligent message formatting with multi-provider support (OpenAI, Groq, Anthropic, Ollama)",
"Security Section - Comprehensive security settings for ProxMenux and Proxmox",
"VPN Integration - Easy Tailscale VPN installation and configuration",
"GPU Scripts - Installation utilities for Intel, AMD and NVIDIA drivers",
"Disk Observations System - Track and document disk health observations over time",
"Enhanced Health Monitor - Configurable monitoring with advanced settings panel",
],
changed: [
"Improved overall performance with optimized data fetching",
"Notifications now support rich formatting with contextual emojis",
"Health monitor now configurable from Settings section",
"Better Proxmox service name translation for non-expert users",
],
fixed: [
"Fixed notification message truncation for large backup reports",
"Improved disk error deduplication to prevent repeated alerts",
"Corrected AI provider base URL handling for OpenAI-compatible APIs",
],
},
},
"1.0.1": {
date: "November 11, 2025",
changes: {
@@ -25,23 +51,16 @@ export const CHANGELOG: Record<string, ReleaseNote> = {
"Proxy Support - Access ProxMenux through reverse proxies with full functionality",
"Authentication System - Secure your dashboard with password protection",
"PCIe Link Speed Detection - View NVMe drive connection speeds and detect performance issues",
"Enhanced Storage Display - Better formatting for disk sizes (auto-converts GB to TB when needed)",
"SATA/SAS Information - View detailed interface information for all storage devices",
"Two-Factor Authentication (2FA) - Enhanced security with TOTP support",
"Health Monitoring System - Comprehensive system health checks with dismissible warnings",
"Release Notes Modal - Automatic notification of new features and improvements",
],
changed: [
"Optimized VM & LXC page - Reduced CPU usage by 85% through intelligent caching",
"Storage metrics now separate local and remote storage for clarity",
"Update warnings now appear only after 365 days instead of 30 days",
"API intervals staggered to distribute server load (23s and 37s)",
],
fixed: [
"Fixed dark mode text contrast issues in various components",
"Corrected storage calculation discrepancies between Overview and Storage pages",
"Resolved JSON stringify error in VM control actions",
"Improved IP address fetching for LXC containers",
],
},
},
@@ -63,32 +82,36 @@ export const CHANGELOG: Record<string, ReleaseNote> = {
const CURRENT_VERSION_FEATURES = [
{
icon: <Link2 className="h-5 w-5" />,
text: "Proxy Support - Access ProxMenux through reverse proxies with full functionality",
icon: <Thermometer className="h-5 w-5" />,
text: "Temperature & Latency Charts - Real-time visual monitoring with interactive historical graphs",
},
{
icon: <Terminal className="h-5 w-5" />,
text: "WebSocket Terminal - Direct terminal access to Proxmox host and LXC containers from the browser",
},
{
icon: <Activity className="h-5 w-5" />,
text: "Enhanced Health Monitor - Configurable health monitoring with advanced settings and disk observations",
},
{
icon: <Bell className="h-5 w-5" />,
text: "AI-Enhanced Notifications - Intelligent message formatting with support for OpenAI, Groq, Anthropic and Ollama",
},
{
icon: <Shield className="h-5 w-5" />,
text: "Two-Factor Authentication (2FA) - Enhanced security with TOTP support for login protection",
text: "Security Section - Comprehensive security configuration for both ProxMenux and Proxmox systems",
},
{
icon: <Globe className="h-5 w-5" />,
text: "VPN Integration - Easy Tailscale VPN installation and configuration for secure remote access",
},
{
icon: <Cpu className="h-5 w-5" />,
text: "GPU Drivers - Installation scripts for Intel, AMD and NVIDIA graphics drivers and utilities",
},
{
icon: <Zap className="h-5 w-5" />,
text: "Performance Improvements - Optimized loading times and reduced CPU usage across the application",
},
{
icon: <HardDrive className="h-5 w-5" />,
text: "Storage Enhancements - Improved disk space consumption display with local and remote storage separation",
},
{
icon: <Gauge className="h-5 w-5" />,
text: "PCIe Link Speed Detection - View NVMe drive connection speeds and identify performance bottlenecks",
},
{
icon: <Wrench className="h-5 w-5" />,
text: "Hardware Page Improvements - Enhanced hardware information display with detailed PCIe and interface data",
},
{
icon: <Settings className="h-5 w-5" />,
text: "New Settings Page - Centralized configuration for authentication, optimizations, and system preferences",
text: "Performance Improvements - Optimized data fetching and reduced resource consumption",
},
]
+68 -28
View File
@@ -15,7 +15,16 @@ import {
ArrowRight,
CornerDownLeft,
GripHorizontal,
ChevronDown,
} from "lucide-react"
import {
DropdownMenu,
DropdownMenuContent,
DropdownMenuItem,
DropdownMenuTrigger,
DropdownMenuSeparator,
DropdownMenuLabel,
} from "@/components/ui/dropdown-menu"
import "xterm/css/xterm.css"
import { API_PORT } from "@/lib/api-config"
@@ -34,6 +43,8 @@ interface ScriptTerminalModalProps {
scriptPath: string
title: string
description: string
scriptName?: string
params?: Record<string, string>
}
export function ScriptTerminalModal({
@@ -42,6 +53,7 @@ export function ScriptTerminalModal({
scriptPath,
title,
description,
params = { EXECUTION_MODE: "web" },
}: ScriptTerminalModalProps) {
const termRef = useRef<any>(null)
const wsRef = useRef<WebSocket | null>(null)
@@ -68,6 +80,12 @@ export function ScriptTerminalModal({
const modalHeightRef = useRef(600)
const terminalContainerRef = useRef<HTMLDivElement>(null)
const paramsRef = useRef(params)
// Keep paramsRef updated with latest params
useEffect(() => {
paramsRef.current = params
}, [params])
const attemptReconnect = useCallback(() => {
if (!isOpen || isComplete || reconnectAttemptsRef.current >= 3) {
@@ -104,13 +122,11 @@ export function ScriptTerminalModal({
}
}, 30000)
const initMessage = {
script_path: scriptPath,
params: {
EXECUTION_MODE: "web",
},
}
ws.send(JSON.stringify(initMessage))
const initMessage = {
script_path: scriptPath,
params: paramsRef.current,
}
ws.send(JSON.stringify(initMessage))
setTimeout(() => {
if (fitAddonRef.current && termRef.current && ws.readyState === WebSocket.OPEN) {
@@ -122,6 +138,11 @@ export function ScriptTerminalModal({
}
ws.onmessage = (event) => {
// Filter out pong responses from heartbeat
if (event.data === '{"type": "pong"}' || event.data === '{"type":"pong"}') {
return
}
try {
const msg = JSON.parse(event.data)
if (msg.type === "web_interaction" && msg.interaction) {
@@ -268,11 +289,8 @@ export function ScriptTerminalModal({
const initMessage = {
script_path: scriptPath,
params: {
EXECUTION_MODE: "web",
},
params: paramsRef.current,
}
ws.send(JSON.stringify(initMessage))
setTimeout(() => {
@@ -291,6 +309,11 @@ export function ScriptTerminalModal({
}
ws.onmessage = (event) => {
// Filter out pong responses from heartbeat - don't display in terminal
if (event.data === '{"type": "pong"}' || event.data === '{"type":"pong"}') {
return
}
try {
const msg = JSON.parse(event.data)
@@ -641,13 +664,13 @@ export function ScriptTerminalModal({
ref={resizeBarRef}
onMouseDown={handleResizeStart}
onTouchStart={handleResizeStart}
className={`h-2 w-full cursor-row-resize transition-colors flex items-center justify-center group relative ${
className={`h-4 w-full cursor-row-resize transition-colors flex items-center justify-center group relative ${
isResizing ? "bg-blue-500" : "bg-zinc-800 hover:bg-blue-600"
}`}
style={{ touchAction: "none" }}
>
<GripHorizontal
className={`h-4 w-4 transition-colors pointer-events-none ${
className={`h-5 w-5 transition-colors pointer-events-none ${
isResizing ? "text-white" : "text-zinc-600 group-hover:text-white"
}`}
/>
@@ -736,22 +759,39 @@ export function ScriptTerminalModal({
}}
variant="outline"
size="sm"
className="h-8 px-2.5 text-xs bg-zinc-800 hover:bg-zinc-700 border-zinc-700 text-white"
className="h-8 px-2.5 text-xs bg-blue-600/20 hover:bg-blue-600/30 border-blue-600/50 text-blue-400"
>
<CornerDownLeft className="h-4 w-4" />
</Button>
<Button
onPointerDown={(e) => {
e.preventDefault()
e.stopPropagation()
sendCommand("\x03")
}}
variant="outline"
size="sm"
className="h-8 px-2 text-xs bg-zinc-800 hover:bg-zinc-700 border-zinc-700 text-white min-w-[65px]"
>
CTRL+C
<CornerDownLeft className="h-4 w-4 mr-1" />
Enter
</Button>
<DropdownMenu>
<DropdownMenuTrigger asChild>
<Button
variant="outline"
size="sm"
className="h-8 px-2 text-xs bg-zinc-800 hover:bg-zinc-700 border-zinc-700 text-white min-w-[65px] gap-1"
>
Ctrl
<ChevronDown className="h-3 w-3" />
</Button>
</DropdownMenuTrigger>
<DropdownMenuContent align="end" className="w-48">
<DropdownMenuLabel className="text-xs text-muted-foreground">Control Sequences</DropdownMenuLabel>
<DropdownMenuSeparator />
<DropdownMenuItem onSelect={() => sendCommand("\x03")}>
<span className="font-mono text-xs mr-2">Ctrl+C</span>
<span className="text-muted-foreground text-xs">Cancel/Interrupt</span>
</DropdownMenuItem>
<DropdownMenuItem onSelect={() => sendCommand("\x18")}>
<span className="font-mono text-xs mr-2">Ctrl+X</span>
<span className="text-muted-foreground text-xs">Exit (nano)</span>
</DropdownMenuItem>
<DropdownMenuItem onSelect={() => sendCommand("\x12")}>
<span className="font-mono text-xs mr-2">Ctrl+R</span>
<span className="text-muted-foreground text-xs">Search history</span>
</DropdownMenuItem>
</DropdownMenuContent>
</DropdownMenu>
</div>
)}
@@ -786,7 +826,7 @@ export function ScriptTerminalModal({
<Button
onClick={handleCloseModal}
variant="outline"
className="bg-red-600 hover:bg-red-700 border-red-500 text-white"
className="bg-red-600/20 hover:bg-red-600/30 border-red-600/50 text-red-400"
>
Close
</Button>
File diff suppressed because it is too large Load Diff
File diff suppressed because it is too large Load Diff
File diff suppressed because it is too large Load Diff
File diff suppressed because it is too large Load Diff
+100 -241
View File
@@ -30,16 +30,6 @@ import {
import { useState, useEffect, useMemo } from "react"
import { API_PORT, fetchApi } from "@/lib/api-config"
interface Log {
timestamp: string
level: string
service: string
message: string
source: string
pid?: string
hostname?: string
}
interface Backup {
volid: string
storage: string
@@ -76,6 +66,7 @@ interface SystemLog {
timestamp: string
level: string
service: string
unit?: string
message: string
source: string
pid?: string
@@ -86,6 +77,7 @@ interface CombinedLogEntry {
timestamp: string
level: string
service: string
unit?: string
message: string
source: string
pid?: string
@@ -108,161 +100,73 @@ export function SystemLogs() {
const [serviceFilter, setServiceFilter] = useState("all")
const [activeTab, setActiveTab] = useState("logs")
const [displayedLogsCount, setDisplayedLogsCount] = useState(50) // Increased from 500 to 50 for initial load, will use pagination
const [displayedLogsCount, setDisplayedLogsCount] = useState(100)
const [selectedLog, setSelectedLog] = useState<SystemLog | null>(null)
const [selectedEvent, setSelectedEvent] = useState<Event | null>(null)
const [selectedBackup, setSelectedBackup] = useState<Backup | null>(null)
const [selectedNotification, setSelectedNotification] = useState<Notification | null>(null) // Added
const [selectedNotification, setSelectedNotification] = useState<Notification | null>(null)
const [isLogModalOpen, setIsLogModalOpen] = useState(false)
const [isEventModalOpen, setIsEventModalOpen] = useState(false)
const [isBackupModalOpen, setIsBackupModalOpen] = useState(false)
const [isNotificationModalOpen, setIsNotificationModalOpen] = useState(false) // Added
const [isNotificationModalOpen, setIsNotificationModalOpen] = useState(false)
const [isMobileMenuOpen, setIsMobileMenuOpen] = useState(false)
const [dateFilter, setDateFilter] = useState("1") // Changed from "now" to "1" to load 1 day by default
const [dateFilter, setDateFilter] = useState("1")
const [customDays, setCustomDays] = useState("1")
const [refreshCounter, setRefreshCounter] = useState(0)
const getApiUrl = (endpoint: string) => {
if (typeof window !== "undefined") {
const { protocol, hostname, port } = window.location
const isStandardPort = port === "" || port === "80" || port === "443"
if (isStandardPort) {
return endpoint
} else {
return `${protocol}//${hostname}:${API_PORT}${endpoint}`
}
}
// This part might not be strictly necessary if only running client-side, but good for SSR safety
// In a real SSR scenario, you'd need to handle API_PORT differently
const protocol = typeof window !== "undefined" ? window.location.protocol : "http:" // Defaulting to http for SSR safety
const hostname = typeof window !== "undefined" ? window.location.hostname : "localhost" // Defaulting to localhost for SSR safety
return `${protocol}//${hostname}:${API_PORT}${endpoint}`
}
// Single unified useEffect for all data loading
// Fires on mount, when filters change, or when refresh is triggered
useEffect(() => {
fetchAllData()
}, [])
// CHANGE: Simplified useEffect - always fetch logs with date filter (no more "now" option)
useEffect(() => {
console.log("[v0] Date filter changed:", dateFilter, "Custom days:", customDays)
setLoading(true)
fetchSystemLogs()
.then((newLogs) => {
console.log("[v0] Loaded logs for date filter:", dateFilter, "Count:", newLogs.length)
console.log("[v0] First log:", newLogs[0])
setLogs(newLogs)
setLoading(false)
})
.catch((err) => {
console.error("[v0] Error loading logs:", err)
setLoading(false)
})
}, [dateFilter, customDays])
useEffect(() => {
console.log("[v0] Level or service filter changed:", levelFilter, serviceFilter)
if (levelFilter !== "all" || serviceFilter !== "all") {
setLoading(true)
fetchSystemLogs()
.then((newLogs) => {
console.log(
"[v0] Loaded logs for filters - Level:",
levelFilter,
"Service:",
serviceFilter,
"Count:",
newLogs.length,
)
setLogs(newLogs)
setLoading(false)
})
.catch((err) => {
console.error("[v0] Error loading logs:", err)
setLoading(false)
})
} else {
// Only reload all data if we're on "now" and all filters are cleared
// This else block is now theoretically unreachable given the change above, but kept for safety
fetchAllData()
}
}, [levelFilter, serviceFilter])
const fetchAllData = async () => {
try {
let cancelled = false
const loadData = async () => {
setLoading(true)
setError(null)
const [logsRes, backupsRes, eventsRes, notificationsRes] = await Promise.all([
fetchSystemLogs(),
fetchApi("/api/backups"),
fetchApi("/api/events?limit=50"),
fetchApi("/api/notifications"),
])
setLogs(logsRes)
setBackups(backupsRes.backups || [])
setEvents(eventsRes.events || [])
setNotifications(notificationsRes.notifications || [])
} catch (err) {
console.error("[v0] Error fetching system logs data:", err)
setError("Failed to connect to server")
} finally {
setLoading(false)
try {
const [logsRes, backupsRes, eventsRes, notificationsRes] = await Promise.all([
fetchSystemLogs(dateFilter, customDays),
fetchApi("/api/backups"),
fetchApi("/api/events?limit=50"),
fetchApi("/api/notifications"),
])
if (cancelled) return
setLogs(logsRes)
setBackups(backupsRes.backups || [])
setEvents(eventsRes.events || [])
setNotifications(notificationsRes.notifications || [])
} catch (err) {
if (cancelled) return
setError("Failed to connect to server")
} finally {
if (!cancelled) setLoading(false)
}
}
loadData()
return () => { cancelled = true }
}, [dateFilter, customDays, refreshCounter])
// Reset pagination when filters change
useEffect(() => {
setDisplayedLogsCount(100)
}, [searchTerm, levelFilter, serviceFilter, dateFilter, customDays])
const refreshData = () => {
setRefreshCounter((prev) => prev + 1)
}
const fetchSystemLogs = async (): Promise<SystemLog[]> => {
const fetchSystemLogs = async (filterDays: string, filterCustom: string): Promise<SystemLog[]> => {
try {
let apiUrl = "/api/logs"
const params = new URLSearchParams()
const daysAgo = filterDays === "custom" ? Number.parseInt(filterCustom) : Number.parseInt(filterDays)
const clampedDays = Math.max(1, Math.min(daysAgo || 1, 90))
const apiUrl = `/api/logs?since_days=${clampedDays}`
// CHANGE: Always add since_days parameter (no more "now" option)
const daysAgo = dateFilter === "custom" ? Number.parseInt(customDays) : Number.parseInt(dateFilter)
params.append("since_days", daysAgo.toString())
console.log("[v0] Fetching logs since_days:", daysAgo)
if (levelFilter !== "all") {
const priorityMap: Record<string, string> = {
error: "3", // 0-3: emerg, alert, crit, err
warning: "4", // 4: warning
info: "6", // 5-7: notice, info, debug
}
const priority = priorityMap[levelFilter]
if (priority) {
params.append("priority", priority)
console.log("[v0] Fetching logs with priority:", priority, "for level:", levelFilter)
}
}
if (serviceFilter !== "all") {
params.append("service", serviceFilter)
console.log("[v0] Fetching logs for service:", serviceFilter)
}
params.append("limit", "5000")
if (params.toString()) {
apiUrl += `?${params.toString()}`
}
console.log("[v0] Making fetch request to:", apiUrl)
const data = await fetchApi(apiUrl)
console.log("[v0] Received logs data, count:", data.logs?.length || 0)
const logsArray = Array.isArray(data) ? data : data.logs || []
console.log("[v0] Returning logs array with length:", logsArray.length)
return logsArray
} catch (error) {
console.error("[v0] Failed to fetch system logs:", error)
if (error instanceof Error && error.name === "TimeoutError") {
setError("Request timed out. Try selecting a more specific filter.")
} else {
setError("Failed to load logs. Please try again.")
}
} catch {
setError("Failed to load logs. Please try again.")
return []
}
}
@@ -271,7 +175,6 @@ export function SystemLogs() {
try {
// Generate filename based on active filters
const filters = []
// CHANGE: Always include days in filename (no more "now" option)
const days = dateFilter === "custom" ? customDays : dateFilter
filters.push(`${days}days`)
@@ -294,7 +197,7 @@ export function SystemLogs() {
`Total Entries: ${filteredCombinedLogs.length.toLocaleString()}`,
``,
`Filters Applied:`,
`- Date Range: ${dateFilter === "now" ? "Current logs" : dateFilter === "custom" ? `${customDays} days ago` : `${dateFilter} days ago`}`,
`- Date Range: ${dateFilter === "custom" ? `${customDays} days ago` : `${dateFilter} day(s) ago`}`,
`- Level: ${levelFilter === "all" ? "All Levels" : levelFilter}`,
`- Service: ${serviceFilter === "all" ? "All Services" : serviceFilter}`,
`- Search: ${searchTerm || "None"}`,
@@ -368,8 +271,7 @@ export function SystemLogs() {
window.URL.revokeObjectURL(url)
document.body.removeChild(a)
return
} catch (error) {
console.error("[v0] Failed to fetch task log from Proxmox:", error)
} catch {
// Fall through to download notification message
}
}
@@ -397,8 +299,8 @@ export function SystemLogs() {
a.click()
window.URL.revokeObjectURL(url)
document.body.removeChild(a)
} catch (err) {
console.error("[v0] Error downloading notification:", err)
} catch {
// Download failed silently
}
}
@@ -407,70 +309,11 @@ export function SystemLogs() {
return String(value).toLowerCase()
}
const memoizedLogs = useMemo(() => logs, [logs])
const memoizedEvents = useMemo(() => events, [events])
const memoizedBackups = useMemo(() => backups, [backups])
const memoizedNotifications = useMemo(() => notifications, [notifications])
const logsOnly: CombinedLogEntry[] = useMemo(
() =>
memoizedLogs
.map((log) => ({ ...log, isEvent: false, sortTimestamp: new Date(log.timestamp).getTime() }))
.sort((a, b) => b.sortTimestamp - a.sortTimestamp),
[memoizedLogs],
)
const eventsOnly: CombinedLogEntry[] = useMemo(
() =>
memoizedEvents
.map((event) => ({
timestamp: event.starttime,
level: event.level,
service: event.type,
message: `${event.type}${event.vmid ? ` (VM/CT ${event.vmid})` : ""} - ${event.status}`,
source: `Node: ${event.node} • User: ${event.user}`,
isEvent: true,
eventData: event,
sortTimestamp: new Date(event.starttime).getTime(),
}))
.sort((a, b) => b.sortTimestamp - a.sortTimestamp),
[memoizedEvents],
)
const filteredLogsOnly = logsOnly.filter((log) => {
const message = log.message || ""
const service = log.service || ""
const searchTermLower = safeToLowerCase(searchTerm)
const matchesSearch =
safeToLowerCase(message).includes(searchTermLower) || safeToLowerCase(service).includes(searchTermLower)
const matchesLevel = levelFilter === "all" || log.level === levelFilter
const matchesService = serviceFilter === "all" || log.service === serviceFilter
return matchesSearch && matchesLevel && matchesService
})
const filteredEventsOnly = eventsOnly.filter((event) => {
const message = event.message || ""
const service = event.service || ""
const searchTermLower = safeToLowerCase(searchTerm)
const matchesSearch =
safeToLowerCase(message).includes(searchTermLower) || safeToLowerCase(service).includes(searchTermLower)
const matchesLevel = levelFilter === "all" || event.level === levelFilter
const matchesService = serviceFilter === "all" || event.service === serviceFilter
return matchesSearch && matchesLevel && matchesService
})
const displayedLogsOnly = filteredLogsOnly.slice(0, displayedLogsCount)
const displayedEventsOnly = filteredEventsOnly.slice(0, displayedLogsCount)
const combinedLogs: CombinedLogEntry[] = useMemo(
() =>
[
...memoizedLogs.map((log) => ({ ...log, isEvent: false, sortTimestamp: new Date(log.timestamp).getTime() })),
...memoizedEvents.map((event) => ({
...logs.map((log) => ({ ...log, isEvent: false, sortTimestamp: new Date(log.timestamp).getTime() })),
...events.map((event) => ({
timestamp: event.starttime,
level: event.level,
service: event.type,
@@ -481,18 +324,20 @@ export function SystemLogs() {
sortTimestamp: new Date(event.starttime).getTime(),
})),
].sort((a, b) => b.sortTimestamp - a.sortTimestamp),
[memoizedLogs, memoizedEvents],
[logs, events],
)
const filteredCombinedLogs = useMemo(
() =>
combinedLogs.filter((log) => {
const message = log.message || ""
const service = log.service || ""
const searchTermLower = safeToLowerCase(searchTerm)
const matchesSearch =
safeToLowerCase(message).includes(searchTermLower) || safeToLowerCase(service).includes(searchTermLower)
const matchesSearch = !searchTermLower ||
safeToLowerCase(log.message).includes(searchTermLower) ||
safeToLowerCase(log.service).includes(searchTermLower) ||
safeToLowerCase(log.pid).includes(searchTermLower) ||
safeToLowerCase(log.hostname).includes(searchTermLower) ||
safeToLowerCase(log.unit).includes(searchTermLower)
const matchesLevel = levelFilter === "all" || log.level === levelFilter
const matchesService = serviceFilter === "all" || log.service === serviceFilter
@@ -501,7 +346,6 @@ export function SystemLogs() {
[combinedLogs, searchTerm, levelFilter, serviceFilter],
)
// CHANGE: Re-assigning displayedLogs to use the filteredCombinedLogs
const displayedLogs = filteredCombinedLogs.slice(0, displayedLogsCount)
const hasMoreLogs = displayedLogsCount < filteredCombinedLogs.length
@@ -577,7 +421,6 @@ export function SystemLogs() {
}
}
// ADDED: New function for notification source colors
const getNotificationSourceColor = (source: string) => {
if (!source) return "bg-gray-500/10 text-gray-500 border-gray-500/20"
@@ -600,7 +443,10 @@ export function SystemLogs() {
info: logs.filter((log) => ["info", "notice", "debug"].includes(log.level)).length,
}
const uniqueServices = useMemo(() => [...new Set(memoizedLogs.map((log) => log.service))], [memoizedLogs])
const uniqueServices = useMemo(
() => [...new Set(logs.map((log) => log.service).filter(Boolean))].sort((a, b) => a.localeCompare(b)),
[logs],
)
const getBackupType = (volid: string): "vm" | "lxc" => {
if (volid.includes("/vm/") || volid.includes("vzdump-qemu")) {
@@ -695,20 +541,27 @@ export function SystemLogs() {
if (loading && logs.length === 0 && events.length === 0) {
return (
<div className="flex items-center justify-center h-64">
<RefreshCw className="h-8 w-8 animate-spin text-muted-foreground" />
<div className="flex flex-col items-center justify-center min-h-[400px] gap-4">
<div className="relative">
<div className="h-12 w-12 rounded-full border-2 border-muted"></div>
<div className="absolute inset-0 h-12 w-12 rounded-full border-2 border-transparent border-t-primary animate-spin"></div>
</div>
<div className="text-sm font-medium text-foreground">Loading logs...</div>
<p className="text-xs text-muted-foreground">Fetching system logs and events</p>
</div>
)
}
return (
<div className="space-y-6">
<div className="space-y-6 w-full max-w-full overflow-hidden">
{loading && (logs.length > 0 || events.length > 0) && (
<div className="fixed inset-0 bg-background/80 backdrop-blur-sm z-50 flex items-center justify-center">
<div className="flex flex-col items-center gap-4 p-8 rounded-lg bg-card border border-border shadow-lg">
<RefreshCw className="h-12 w-12 animate-spin text-primary" />
<div className="text-lg font-medium text-foreground">Loading logs selected...</div>
<div className="text-sm text-muted-foreground">Please wait while we fetch the logs</div>
<div className="fixed inset-0 bg-background/60 backdrop-blur-sm z-50 flex items-center justify-center">
<div className="flex flex-col items-center gap-3 p-6 rounded-xl bg-card border border-border shadow-xl">
<div className="relative">
<div className="h-10 w-10 rounded-full border-2 border-muted"></div>
<div className="absolute inset-0 h-10 w-10 rounded-full border-2 border-transparent border-t-primary animate-spin"></div>
</div>
<div className="text-sm font-medium text-foreground">Loading logs...</div>
</div>
</div>
)}
@@ -763,21 +616,21 @@ export function SystemLogs() {
</div>
{/* Main Content with Tabs */}
<Card className="bg-card border-border">
<Card className="bg-card border-border w-full max-w-full overflow-hidden">
<CardHeader>
<div className="flex items-center justify-between">
<CardTitle className="text-foreground flex items-center">
<Activity className="h-5 w-5 mr-2" />
System Logs & Events
</CardTitle>
<Button variant="outline" size="sm" onClick={fetchAllData} disabled={loading}>
<Button variant="outline" size="sm" onClick={refreshData} disabled={loading}>
<RefreshCw className={`h-4 w-4 mr-2 ${loading ? "animate-spin" : ""}`} />
Refresh
</Button>
</div>
</CardHeader>
<CardContent className="max-w-full overflow-hidden">
<Tabs value={activeTab} onValueChange={setActiveTab}>
<Tabs value={activeTab} onValueChange={setActiveTab} className="w-full max-w-full">
<TabsList className="hidden md:grid w-full grid-cols-3">
<TabsTrigger value="logs" className="data-[state=active]:bg-blue-500 data-[state=active]:text-white">
<Terminal className="h-4 w-4 mr-2" />
@@ -875,7 +728,6 @@ export function SystemLogs() {
<Search className="absolute left-3 top-1/2 transform -translate-y-1/2 h-4 w-4 text-muted-foreground" />
<Input
placeholder="Search logs & events..."
// CHANGE: Renamed searchTerm to searchQuery
value={searchTerm}
onChange={(e) => setSearchTerm(e.target.value)}
className="pl-10 bg-background border-border"
@@ -928,8 +780,8 @@ export function SystemLogs() {
<SelectItem key="service-all" value="all">
All Services
</SelectItem>
{uniqueServices.slice(0, 20).map((service, idx) => (
<SelectItem key={`service-${service}-${idx}`} value={service}>
{uniqueServices.map((service) => (
<SelectItem key={`service-${service}`} value={service}>
{service}
</SelectItem>
))}
@@ -942,8 +794,8 @@ export function SystemLogs() {
</Button>
</div>
<ScrollArea className="h-[600px] w-full rounded-md border border-border overflow-x-hidden">
<div className="space-y-2 p-4 w-full box-border">
<ScrollArea className="h-[600px] w-full rounded-md border border-border overflow-hidden [&>div]:!max-w-full [&>div>div]:!max-w-full">
<div className="space-y-2 p-4 w-full min-w-0">
{displayedLogs.map((log, index) => {
// Generate a more stable unique key
const timestampMs = new Date(log.timestamp).getTime()
@@ -954,7 +806,7 @@ export function SystemLogs() {
return (
<div
key={uniqueKey}
className="flex flex-col md:flex-row md:items-start space-y-2 md:space-y-0 md:space-x-4 p-3 rounded-lg border border-white/10 sm:border-border bg-white/5 sm:bg-card sm:hover:bg-white/5 transition-colors cursor-pointer overflow-hidden box-border"
className="flex flex-col md:flex-row md:items-start space-y-2 md:space-y-0 md:space-x-4 p-3 rounded-lg border border-white/10 sm:border-border bg-white/5 sm:bg-card sm:hover:bg-white/5 transition-colors cursor-pointer overflow-hidden w-full max-w-full min-w-0"
onClick={() => {
if (log.eventData) {
setSelectedEvent(log.eventData)
@@ -978,18 +830,19 @@ export function SystemLogs() {
)}
</div>
<div className="flex-1 min-w-0 overflow-hidden box-border">
<div className="flex-1 min-w-0 overflow-hidden">
<div className="flex flex-col sm:flex-row sm:items-center sm:justify-between mb-1 gap-1">
<div className="text-sm font-medium text-foreground truncate min-w-0">{log.service}</div>
<div className="text-xs text-muted-foreground font-mono truncate sm:ml-2 sm:flex-shrink-0">
{log.timestamp}
</div>
</div>
<div className="text-sm text-foreground mb-1 line-clamp-2 break-all overflow-hidden">
<div className="text-sm text-foreground mb-1 line-clamp-2 break-words overflow-hidden">
{log.message}
</div>
<div className="text-xs text-muted-foreground truncate break-all overflow-hidden">
<div className="text-xs text-muted-foreground truncate overflow-hidden">
{log.source}
{log.unit && log.unit !== log.service && ` • Unit: ${log.unit}`}
{log.pid && ` • PID: ${log.pid}`}
{log.hostname && ` • Host: ${log.hostname}`}
</div>
@@ -1006,10 +859,10 @@ export function SystemLogs() {
)}
{hasMoreLogs && (
<div className="flex justify-center pt-4">
<div className="flex justify-center pt-4 w-full">
<Button
variant="outline"
onClick={() => setDisplayedLogsCount((prev) => prev + 500)}
onClick={() => setDisplayedLogsCount((prev) => prev + 200)}
className="border-border"
>
<RefreshCw className="h-4 w-4 mr-2" />
@@ -1057,7 +910,7 @@ export function SystemLogs() {
<ScrollArea className="h-[500px] w-full rounded-md border border-border">
<div className="space-y-2 p-4">
{memoizedBackups.map((backup, index) => {
{backups.map((backup, index) => {
const uniqueKey = `backup-${backup.volid.replace(/[/:]/g, "-")}-${backup.timestamp || index}`
return (
@@ -1114,7 +967,7 @@ export function SystemLogs() {
<TabsContent value="notifications" className="space-y-4">
<ScrollArea className="h-[600px] w-full rounded-md border border-border">
<div className="space-y-2 p-4">
{memoizedNotifications.map((notification, index) => {
{notifications.map((notification, index) => {
const timestampMs = new Date(notification.timestamp).getTime()
const uniqueKey = `notification-${timestampMs}-${notification.service?.substring(0, 10) || "unknown"}-${notification.source?.substring(0, 10) || "unknown"}-${index}`
@@ -1202,6 +1055,12 @@ export function SystemLogs() {
<div className="text-sm font-medium text-muted-foreground mb-1">Source</div>
<div className="text-sm text-foreground break-all overflow-hidden">{selectedLog.source}</div>
</div>
{selectedLog.unit && (
<div>
<div className="text-sm font-medium text-muted-foreground mb-1">Systemd Unit</div>
<div className="text-sm text-foreground font-mono break-all overflow-hidden">{selectedLog.unit}</div>
</div>
)}
{selectedLog.pid && (
<div>
<div className="text-sm font-medium text-muted-foreground mb-1">Process ID</div>
+78 -44
View File
@@ -7,9 +7,17 @@ import { Badge } from "./ui/badge"
import { Cpu, MemoryStick, Thermometer, Server, Zap, AlertCircle, HardDrive, Network } from "lucide-react"
import { NodeMetricsCharts } from "./node-metrics-charts"
import { NetworkTrafficChart } from "./network-traffic-chart"
import { TemperatureDetailModal } from "./temperature-detail-modal"
import { Select, SelectContent, SelectItem, SelectTrigger, SelectValue } from "./ui/select"
import { fetchApi } from "../lib/api-config"
import { formatNetworkTraffic, getNetworkUnit } from "../lib/format-network"
import { formatStorage } from "../lib/utils"
import { Area, AreaChart, ResponsiveContainer } from "recharts"
interface TempDataPoint {
timestamp: number
value: number
}
interface SystemData {
cpu_usage: number
@@ -17,6 +25,7 @@ interface SystemData {
memory_total: number
memory_used: number
temperature: number
temperature_sparkline?: TempDataPoint[]
uptime: string
load_average: number[]
hostname: string
@@ -102,9 +111,9 @@ const fetchSystemData = async (retries = 3, delayMs = 500): Promise<SystemData |
try {
const data = await fetchApi<SystemData>("/api/system")
return data
} catch (error) {
} catch {
if (attempt === retries - 1) {
console.error("[v0] Failed to fetch system data after retries:", error)
// Silent fail - API not available (expected in preview environment)
return null
}
// Wait before retry
@@ -118,8 +127,8 @@ const fetchVMData = async (): Promise<VMData[]> => {
try {
const data = await fetchApi<any>("/api/vms")
return Array.isArray(data) ? data : data.vms || []
} catch (error) {
console.error("[v0] Failed to fetch VM data:", error)
} catch {
// Silent fail - API not available
return []
}
}
@@ -128,8 +137,7 @@ const fetchStorageData = async (): Promise<StorageData | null> => {
try {
const data = await fetchApi<StorageData>("/api/storage/summary")
return data
} catch (error) {
console.log("[v0] Storage API not available (this is normal if not configured)")
} catch {
return null
}
}
@@ -138,18 +146,16 @@ const fetchNetworkData = async (): Promise<NetworkData | null> => {
try {
const data = await fetchApi<NetworkData>("/api/network/summary")
return data
} catch (error) {
console.log("[v0] Network API not available (this is normal if not configured)")
} catch {
return null
}
}
const fetchProxmoxStorageData = async (): Promise<ProxmoxStorageData | null> => {
const fetchProxmoxStorageData = async (): Promise<ProxmoxStorage[] | null> => {
try {
const data = await fetchApi<ProxmoxStorageData>("/api/proxmox-storage")
const data = await fetchApi<ProxmoxStorage[]>("/api/proxmox-storage")
return data
} catch (error) {
console.log("[v0] Proxmox storage API not available")
} catch {
return null
}
}
@@ -177,6 +183,7 @@ export function SystemOverview() {
const [networkTimeframe, setNetworkTimeframe] = useState("day")
const [networkTotals, setNetworkTotals] = useState<{ received: number; sent: number }>({ received: 0, sent: 0 })
const [networkUnit, setNetworkUnit] = useState<"Bytes" | "Bits">("Bytes") // Added networkUnit state
const [tempModalOpen, setTempModalOpen] = useState(false)
useEffect(() => {
const fetchAllData = async () => {
@@ -215,7 +222,7 @@ export function SystemOverview() {
const systemInterval = setInterval(async () => {
const data = await fetchSystemData()
if (data) setSystemData(data)
}, 9000)
}, 5000)
const vmInterval = setInterval(async () => {
const data = await fetchVMData()
@@ -252,19 +259,13 @@ export function SystemOverview() {
if (!hasAttemptedLoad || loadingStates.system) {
return (
<div className="space-y-6">
<div className="grid grid-cols-1 md:grid-cols-2 xl:grid-cols-4 gap-6">
{[...Array(4)].map((_, i) => (
<Card key={i} className="bg-card border-border animate-pulse">
<CardContent className="p-6">
<div className="h-4 bg-muted rounded w-1/2 mb-4"></div>
<div className="h-8 bg-muted rounded w-3/4 mb-2"></div>
<div className="h-2 bg-muted rounded w-full mb-2"></div>
<div className="h-3 bg-muted rounded w-2/3"></div>
</CardContent>
</Card>
))}
<div className="flex flex-col items-center justify-center min-h-[400px] gap-4">
<div className="relative">
<div className="h-12 w-12 rounded-full border-2 border-muted"></div>
<div className="absolute inset-0 h-12 w-12 rounded-full border-2 border-transparent border-t-primary animate-spin"></div>
</div>
<div className="text-sm font-medium text-foreground">Loading system overview...</div>
<p className="text-xs text-muted-foreground">Fetching system status and metrics</p>
</div>
)
}
@@ -457,27 +458,60 @@ export function SystemOverview() {
</CardContent>
</Card>
<Card className="bg-card border-border">
<Card
className={`bg-card border-border ${systemData.temperature > 0 ? "cursor-pointer hover:bg-white/5 transition-colors" : ""}`}
onClick={() => systemData.temperature > 0 && setTempModalOpen(true)}
>
<CardHeader className="flex flex-row items-center justify-between space-y-0 pb-2">
<CardTitle className="text-sm font-medium text-muted-foreground">Temperature</CardTitle>
<Thermometer className="h-4 w-4 text-muted-foreground" />
</CardHeader>
<CardContent>
<div className="text-xl lg:text-2xl font-bold text-foreground">
{systemData.temperature === 0 ? "N/A" : `${systemData.temperature}°C`}
</div>
<div className="flex items-center mt-2">
<div className="flex items-center justify-between">
<span className="text-xl lg:text-2xl font-bold text-foreground">
{systemData.temperature === 0 ? "N/A" : `${Math.round(systemData.temperature * 10) / 10}°C`}
</span>
<Badge variant="outline" className={tempStatus.color}>
{tempStatus.status}
</Badge>
</div>
<p className="text-xs text-muted-foreground mt-2">
{systemData.temperature === 0 ? "No sensor available" : "Live temperature reading"}
</p>
{systemData.temperature > 0 && systemData.temperature_sparkline && systemData.temperature_sparkline.length > 1 ? (
<div className="mt-2 h-10">
<ResponsiveContainer width="100%" height="100%">
<AreaChart data={systemData.temperature_sparkline} margin={{ top: 0, right: 0, left: 0, bottom: 0 }}>
<defs>
<linearGradient id="tempSparkGradient" x1="0" y1="0" x2="0" y2="1">
<stop offset="0%" stopColor={systemData.temperature >= 75 ? "#ef4444" : systemData.temperature >= 60 ? "#f59e0b" : "#22c55e"} stopOpacity={0.3} />
<stop offset="100%" stopColor={systemData.temperature >= 75 ? "#ef4444" : systemData.temperature >= 60 ? "#f59e0b" : "#22c55e"} stopOpacity={0} />
</linearGradient>
</defs>
<Area
type="monotone"
dataKey="value"
stroke={systemData.temperature >= 75 ? "#ef4444" : systemData.temperature >= 60 ? "#f59e0b" : "#22c55e"}
strokeWidth={1.5}
fill="url(#tempSparkGradient)"
dot={false}
isAnimationActive={false}
/>
</AreaChart>
</ResponsiveContainer>
</div>
) : (
<p className="text-xs text-muted-foreground mt-2">
{systemData.temperature === 0 ? "No sensor available" : "Collecting data..."}
</p>
)}
</CardContent>
</Card>
</div>
<TemperatureDetailModal
open={tempModalOpen}
onOpenChange={setTempModalOpen}
liveTemperature={systemData.temperature}
/>
<NodeMetricsCharts />
<div className="grid grid-cols-1 lg:grid-cols-2 gap-6">
@@ -508,7 +542,7 @@ export function SystemOverview() {
<div className="flex justify-between items-center">
<span className="text-sm font-medium text-foreground">Total Node Capacity:</span>
<span className="text-lg font-bold text-foreground">
{formatNetworkTraffic(totalCapacity, "Bytes")}
{formatStorage(totalCapacity)}
</span>
</div>
<Progress
@@ -520,13 +554,13 @@ export function SystemOverview() {
<span className="text-xs text-muted-foreground">
Used:{" "}
<span className="font-semibold text-foreground">
{formatNetworkTraffic(totalUsed, "Bytes")}
{formatStorage(totalUsed)}
</span>
</span>
<span className="text-xs text-muted-foreground">
Free:{" "}
<span className="font-semibold text-green-500">
{formatNetworkTraffic(totalAvailable, "Bytes")}
{formatStorage(totalAvailable)}
</span>
</span>
</div>
@@ -555,20 +589,20 @@ export function SystemOverview() {
<div className="flex justify-between items-center">
<span className="text-xs text-muted-foreground">Used:</span>
<span className="text-sm font-semibold text-foreground">
{formatNetworkTraffic(vmLxcStorageUsed, "Bytes")}
{formatStorage(vmLxcStorageUsed)}
</span>
</div>
<div className="flex justify-between items-center">
<span className="text-xs text-muted-foreground">Available:</span>
<span className="text-sm font-semibold text-green-500">
{formatNetworkTraffic(vmLxcStorageAvailable, "Bytes")}
{formatStorage(vmLxcStorageAvailable)}
</span>
</div>
<Progress value={vmLxcStoragePercent} className="mt-2 [&>div]:bg-blue-500" />
<div className="flex justify-between items-center mt-1">
<span className="text-xs text-muted-foreground">
{formatNetworkTraffic(vmLxcStorageUsed, "Bytes")} /{" "}
{formatNetworkTraffic(vmLxcStorageTotal, "Bytes")}
{formatStorage(vmLxcStorageUsed)} /{" "}
{formatStorage(vmLxcStorageTotal)}
</span>
<span className="text-xs text-muted-foreground">{vmLxcStoragePercent.toFixed(1)}%</span>
</div>
@@ -591,20 +625,20 @@ export function SystemOverview() {
<div className="flex justify-between items-center">
<span className="text-xs text-muted-foreground">Used:</span>
<span className="text-sm font-semibold text-foreground">
{formatNetworkTraffic(localStorage.used, "Bytes")}
{formatStorage(localStorage.used)}
</span>
</div>
<div className="flex justify-between items-center">
<span className="text-xs text-muted-foreground">Available:</span>
<span className="text-sm font-semibold text-green-500">
{formatNetworkTraffic(localStorage.available, "Bytes")}
{formatStorage(localStorage.available)}
</span>
</div>
<Progress value={localStorage.percent} className="mt-2 [&>div]:bg-purple-500" />
<div className="flex justify-between items-center mt-1">
<span className="text-xs text-muted-foreground">
{formatNetworkTraffic(localStorage.used, "Bytes")} /{" "}
{formatNetworkTraffic(localStorage.total, "Bytes")}
{formatStorage(localStorage.used)} /{" "}
{formatStorage(localStorage.total)}
</span>
<span className="text-xs text-muted-foreground">{localStorage.percent.toFixed(1)}%</span>
</div>
@@ -0,0 +1,242 @@
"use client"
import { useState, useEffect } from "react"
import { Dialog, DialogContent, DialogHeader, DialogTitle } from "./ui/dialog"
import { Select, SelectContent, SelectItem, SelectTrigger, SelectValue } from "./ui/select"
import { Badge } from "./ui/badge"
import { Thermometer, TrendingDown, TrendingUp, Minus } from "lucide-react"
import { AreaChart, Area, XAxis, YAxis, CartesianGrid, Tooltip, ResponsiveContainer } from "recharts"
import { useIsMobile } from "../hooks/use-mobile"
import { fetchApi } from "@/lib/api-config"
const TIMEFRAME_OPTIONS = [
{ value: "hour", label: "1 Hour" },
{ value: "day", label: "24 Hours" },
{ value: "week", label: "7 Days" },
{ value: "month", label: "30 Days" },
]
interface TempHistoryPoint {
timestamp: number
value: number
min?: number
max?: number
}
interface TempStats {
min: number
max: number
avg: number
current: number
}
interface TemperatureDetailModalProps {
open: boolean
onOpenChange: (open: boolean) => void
liveTemperature?: number
}
const CustomTooltip = ({ active, payload, label }: any) => {
if (active && payload && payload.length) {
return (
<div className="bg-gray-900/95 backdrop-blur-sm border border-gray-700 rounded-lg p-3 shadow-xl">
<p className="text-sm font-semibold text-white mb-2">{label}</p>
<div className="space-y-1.5">
{payload.map((entry: any, index: number) => (
<div key={index} className="flex items-center gap-2">
<div className="w-2.5 h-2.5 rounded-full flex-shrink-0" style={{ backgroundColor: entry.color }} />
<span className="text-xs text-gray-300 min-w-[60px]">{entry.name}:</span>
<span className="text-sm font-semibold text-white">{entry.value}°C</span>
</div>
))}
</div>
</div>
)
}
return null
}
const getStatusColor = (temp: number) => {
if (temp >= 75) return "#ef4444"
if (temp >= 60) return "#f59e0b"
return "#22c55e"
}
const getStatusInfo = (temp: number) => {
if (temp === 0) return { status: "N/A", color: "bg-gray-500/10 text-gray-500 border-gray-500/20" }
if (temp < 60) return { status: "Normal", color: "bg-green-500/10 text-green-500 border-green-500/20" }
if (temp < 75) return { status: "Warm", color: "bg-yellow-500/10 text-yellow-500 border-yellow-500/20" }
return { status: "Hot", color: "bg-red-500/10 text-red-500 border-red-500/20" }
}
export function TemperatureDetailModal({ open, onOpenChange, liveTemperature }: TemperatureDetailModalProps) {
const [timeframe, setTimeframe] = useState("hour")
const [data, setData] = useState<TempHistoryPoint[]>([])
const [stats, setStats] = useState<TempStats>({ min: 0, max: 0, avg: 0, current: 0 })
const [loading, setLoading] = useState(true)
const isMobile = useIsMobile()
useEffect(() => {
if (open) {
fetchHistory()
}
}, [open, timeframe])
const fetchHistory = async () => {
setLoading(true)
try {
const result = await fetchApi<{ data: TempHistoryPoint[]; stats: TempStats }>(
`/api/temperature/history?timeframe=${timeframe}`
)
if (result && result.data) {
setData(result.data)
setStats(result.stats)
}
} catch (err) {
console.error("[v0] Failed to fetch temperature history:", err)
} finally {
setLoading(false)
}
}
const formatTime = (timestamp: number) => {
const date = new Date(timestamp * 1000)
if (timeframe === "hour") {
return date.toLocaleTimeString([], { hour: "2-digit", minute: "2-digit" })
} else if (timeframe === "day") {
return date.toLocaleTimeString([], { hour: "2-digit", minute: "2-digit" })
} else {
return date.toLocaleDateString([], { month: "short", day: "numeric", hour: "2-digit", minute: "2-digit" })
}
}
const chartData = data.map((d) => ({
...d,
time: formatTime(d.timestamp),
}))
// Use live temperature from the overview card (real-time) instead of last DB record
const currentTemp = liveTemperature && liveTemperature > 0 ? Math.round(liveTemperature * 10) / 10 : stats.current
const currentStatus = getStatusInfo(currentTemp)
const chartColor = getStatusColor(currentTemp)
// Calculate Y axis domain based on plotted data values only.
// Stats cards already show the real historical min/max separately.
// Using only graphed values keeps the chart readable and avoids
// large empty gaps caused by momentary spikes that get averaged out.
const values = data.map((d) => d.value)
const yMin = values.length > 0 ? Math.max(0, Math.floor(Math.min(...values) - 3)) : 0
const yMax = values.length > 0 ? Math.ceil(Math.max(...values) + 3) : 100
return (
<Dialog open={open} onOpenChange={onOpenChange}>
<DialogContent className="max-w-3xl bg-card border-border px-3 sm:px-6">
<DialogHeader>
<div className="flex items-center justify-between pr-6">
<DialogTitle className="text-foreground flex items-center gap-2">
<Thermometer className="h-5 w-5" />
CPU Temperature
</DialogTitle>
<Select value={timeframe} onValueChange={setTimeframe}>
<SelectTrigger className="w-[130px] bg-card border-border">
<SelectValue />
</SelectTrigger>
<SelectContent>
{TIMEFRAME_OPTIONS.map((opt) => (
<SelectItem key={opt.value} value={opt.value}>
{opt.label}
</SelectItem>
))}
</SelectContent>
</Select>
</div>
</DialogHeader>
{/* Stats bar */}
<div className="grid grid-cols-2 sm:grid-cols-4 gap-2 sm:gap-3">
<div className={`rounded-lg p-3 text-center ${currentStatus.color}`}>
<div className="text-xs opacity-80 mb-1">Current</div>
<div className="text-lg font-bold">{currentTemp}°C</div>
</div>
<div className="bg-muted/50 rounded-lg p-3 text-center">
<div className="text-xs text-muted-foreground mb-1 flex items-center justify-center gap-1">
<TrendingDown className="h-3 w-3" /> Min
</div>
<div className="text-lg font-bold text-green-500">{stats.min}°C</div>
</div>
<div className="bg-muted/50 rounded-lg p-3 text-center">
<div className="text-xs text-muted-foreground mb-1 flex items-center justify-center gap-1">
<Minus className="h-3 w-3" /> Avg
</div>
<div className="text-lg font-bold text-foreground">{stats.avg}°C</div>
</div>
<div className="bg-muted/50 rounded-lg p-3 text-center">
<div className="text-xs text-muted-foreground mb-1 flex items-center justify-center gap-1">
<TrendingUp className="h-3 w-3" /> Max
</div>
<div className="text-lg font-bold text-red-500">{stats.max}°C</div>
</div>
</div>
{/* Chart */}
<div className="h-[300px] lg:h-[350px]">
{loading ? (
<div className="h-full flex items-center justify-center">
<div className="space-y-3 w-full animate-pulse">
<div className="h-4 bg-muted rounded w-1/4 mx-auto" />
<div className="h-[250px] bg-muted/50 rounded" />
</div>
</div>
) : chartData.length === 0 ? (
<div className="h-full flex items-center justify-center text-muted-foreground">
<div className="text-center">
<Thermometer className="h-8 w-8 mx-auto mb-2 opacity-50" />
<p>No temperature data available for this period</p>
<p className="text-sm mt-1">Data is collected every 60 seconds</p>
</div>
</div>
) : (
<ResponsiveContainer width="100%" height="100%">
<AreaChart data={chartData} margin={{ top: 10, right: 10, left: 0, bottom: 0 }}>
<defs>
<linearGradient id="tempGradient" x1="0" y1="0" x2="0" y2="1">
<stop offset="0%" stopColor={chartColor} stopOpacity={0.3} />
<stop offset="100%" stopColor={chartColor} stopOpacity={0.02} />
</linearGradient>
</defs>
<CartesianGrid strokeDasharray="3 3" stroke="currentColor" className="text-border" />
<XAxis
dataKey="time"
stroke="currentColor"
className="text-foreground"
tick={{ fill: "currentColor", fontSize: isMobile ? 10 : 12 }}
interval="preserveStartEnd"
minTickGap={isMobile ? 40 : 60}
/>
<YAxis
domain={[yMin, yMax]}
stroke="currentColor"
className="text-foreground"
tick={{ fill: "currentColor", fontSize: isMobile ? 10 : 12 }}
tickFormatter={(v) => `${v}°`}
width={isMobile ? 40 : 45}
/>
<Tooltip content={<CustomTooltip />} />
<Area
type="monotone"
dataKey="value"
name="Temperature"
stroke={chartColor}
strokeWidth={2}
fill="url(#tempGradient)"
dot={false}
activeDot={{ r: 4, fill: chartColor, stroke: "#fff", strokeWidth: 2 }}
/>
</AreaChart>
</ResponsiveContainer>
)}
</div>
</DialogContent>
</Dialog>
)
}
+230 -33
View File
@@ -15,7 +15,16 @@ import {
AlignJustify,
Grid2X2,
GripHorizontal,
ChevronDown,
} from "lucide-react"
import {
DropdownMenu,
DropdownMenuContent,
DropdownMenuItem,
DropdownMenuTrigger,
DropdownMenuSeparator,
DropdownMenuLabel,
} from "@/components/ui/dropdown-menu"
import { Button } from "@/components/ui/button"
import { Dialog, DialogContent, DialogHeader, DialogTitle, DialogDescription } from "@/components/ui/dialog"
import { Input } from "@/components/ui/input"
@@ -34,6 +43,7 @@ interface TerminalInstance {
ws: WebSocket | null
isConnected: boolean
fitAddon: any // Added fitAddon to TerminalInstance
pingInterval?: ReturnType<typeof setInterval> | null // Heartbeat interval to keep connection alive
}
function getWebSocketUrl(): string {
@@ -171,6 +181,29 @@ export const TerminalPanel: React.FC<TerminalPanelProps> = ({ websocketUrl, onCl
}
}, [])
// Handle page visibility change for automatic reconnection when user returns
// This is especially important for mobile/tablet devices (iPad) where switching apps
// puts the browser tab in background and may close WebSocket connections
useEffect(() => {
const handleVisibilityChange = () => {
if (document.visibilityState === 'visible') {
// When page becomes visible again, check all terminal connections
terminals.forEach((terminal) => {
if (terminal.ws && terminal.ws.readyState !== WebSocket.OPEN && terminal.term) {
// Terminal is disconnected, attempt to reconnect
reconnectTerminal(terminal.id)
}
})
}
}
document.addEventListener('visibilitychange', handleVisibilityChange)
return () => {
document.removeEventListener('visibilitychange', handleVisibilityChange)
}
}, [terminals])
const handleResizeStart = (e: React.MouseEvent | React.TouchEvent) => {
// Bloquear solo en pantallas muy pequeñas (móviles)
if (window.innerWidth < 640 && !isTablet) {
@@ -273,6 +306,85 @@ export const TerminalPanel: React.FC<TerminalPanelProps> = ({ websocketUrl, onCl
return () => clearTimeout(debounce)
}, [searchQuery])
// Function to reconnect a terminal when connection is lost
// This is called when page visibility changes (user returns from another app)
const reconnectTerminal = async (terminalId: string) => {
const terminal = terminals.find(t => t.id === terminalId)
if (!terminal || !terminal.term) return
// Show reconnecting message
terminal.term.writeln('\r\n\x1b[33m[INFO] Reconnecting...\x1b[0m')
const wsUrl = websocketUrl || getWebSocketUrl()
const ws = new WebSocket(wsUrl)
ws.onopen = () => {
// Clear any existing ping interval
if (terminal.pingInterval) {
clearInterval(terminal.pingInterval)
}
// Start heartbeat ping every 25 seconds to keep connection alive
const pingInterval = setInterval(() => {
if (ws.readyState === WebSocket.OPEN) {
ws.send(JSON.stringify({ type: 'ping' }))
} else {
clearInterval(pingInterval)
}
}, 25000)
setTerminals((prev) =>
prev.map((t) => (t.id === terminalId ? { ...t, isConnected: true, ws, pingInterval } : t))
)
terminal.term.writeln('\r\n\x1b[32m[INFO] Reconnected successfully\x1b[0m')
// Sync terminal size
if (terminal.fitAddon) {
try {
terminal.fitAddon.fit()
ws.send(JSON.stringify({
type: 'resize',
cols: terminal.term.cols,
rows: terminal.term.rows,
}))
} catch (err) {
console.warn('[Terminal] resize on reconnect failed:', err)
}
}
}
ws.onmessage = (event) => {
// Filter out pong responses from heartbeat - don't display in terminal
if (event.data === '{"type": "pong"}' || event.data === '{"type":"pong"}') {
return
}
terminal.term.write(event.data)
}
ws.onerror = () => {
terminal.term.writeln('\r\n\x1b[31m[ERROR] Reconnection failed\x1b[0m')
}
ws.onclose = () => {
setTerminals((prev) => prev.map((t) => {
if (t.id === terminalId) {
if (t.pingInterval) {
clearInterval(t.pingInterval)
}
return { ...t, isConnected: false, pingInterval: null }
}
return t
}))
terminal.term.writeln('\r\n\x1b[33m[INFO] Connection closed\x1b[0m')
}
terminal.term.onData((data: string) => {
if (ws.readyState === WebSocket.OPEN) {
ws.send(data)
}
})
}
const addNewTerminal = () => {
if (terminals.length >= 4) return
@@ -286,6 +398,7 @@ export const TerminalPanel: React.FC<TerminalPanelProps> = ({ websocketUrl, onCl
ws: null,
isConnected: false,
fitAddon: null, // Added fitAddon initialization
pingInterval: null, // Added pingInterval initialization
},
])
setActiveTerminalId(newId)
@@ -294,6 +407,10 @@ export const TerminalPanel: React.FC<TerminalPanelProps> = ({ websocketUrl, onCl
const closeTerminal = (id: string) => {
const terminal = terminals.find((t) => t.id === id)
if (terminal) {
// Clear heartbeat interval
if (terminal.pingInterval) {
clearInterval(terminal.pingInterval)
}
if (terminal.ws) {
terminal.ws.close()
}
@@ -314,12 +431,18 @@ export const TerminalPanel: React.FC<TerminalPanelProps> = ({ websocketUrl, onCl
}
useEffect(() => {
terminals.forEach((terminal) => {
const container = containerRefs.current[terminal.id]
if (!terminal.term && container) {
initializeTerminal(terminal, container)
}
})
// Small delay to ensure DOM refs are available after state update
// This fixes the issue where first terminal doesn't connect on mobile/VPN
const timer = setTimeout(() => {
terminals.forEach((terminal) => {
const container = containerRefs.current[terminal.id]
if (!terminal.term && container) {
initializeTerminal(terminal, container)
}
})
}, 50)
return () => clearTimeout(timer)
}, [terminals, isMobile])
useEffect(() => {
@@ -401,7 +524,22 @@ export const TerminalPanel: React.FC<TerminalPanelProps> = ({ websocketUrl, onCl
fitAddon.fit()
const wsUrl = websocketUrl || getWebSocketUrl()
// Connection with timeout for VPN/mobile (15 seconds)
const connectionTimeout = 15000
let connectionTimedOut = false
const ws = new WebSocket(wsUrl)
// Set connection timeout
const timeoutId = setTimeout(() => {
if (ws.readyState !== WebSocket.OPEN) {
connectionTimedOut = true
ws.close()
term.writeln('\x1b[31m[ERROR] Connection timeout. Please check your network and try again.\x1b[0m')
term.writeln('\x1b[33m[TIP] If using VPN, ensure the connection is stable.\x1b[0m')
}
}, connectionTimeout)
const syncSizeWithBackend = () => {
try {
@@ -423,25 +561,66 @@ export const TerminalPanel: React.FC<TerminalPanelProps> = ({ websocketUrl, onCl
}
ws.onopen = () => {
// Clear connection timeout - we're connected!
clearTimeout(timeoutId)
// Start heartbeat ping every 25 seconds to keep connection alive
// This prevents disconnection when switching apps on mobile/tablet (iPad)
const pingInterval = setInterval(() => {
if (ws.readyState === WebSocket.OPEN) {
ws.send(JSON.stringify({ type: 'ping' }))
} else {
clearInterval(pingInterval)
}
}, 25000)
setTerminals((prev) =>
prev.map((t) => (t.id === terminal.id ? { ...t, isConnected: true, term, ws, fitAddon } : t)),
prev.map((t) => (t.id === terminal.id ? { ...t, isConnected: true, term, ws, fitAddon, pingInterval } : t)),
)
syncSizeWithBackend()
}
ws.onmessage = (event) => {
// Filter out pong responses from heartbeat - don't display in terminal
if (event.data === '{"type": "pong"}' || event.data === '{"type":"pong"}') {
return
}
term.write(event.data)
}
ws.onerror = (error) => {
clearTimeout(timeoutId)
console.error("[v0] TerminalPanel: WebSocket error:", error)
setTerminals((prev) => prev.map((t) => (t.id === terminal.id ? { ...t, isConnected: false } : t)))
term.writeln("\r\n\x1b[31m[ERROR] WebSocket connection error\x1b[0m")
setTerminals((prev) => prev.map((t) => {
if (t.id === terminal.id) {
if (t.pingInterval) {
clearInterval(t.pingInterval)
}
return { ...t, isConnected: false, pingInterval: null }
}
return t
}))
// Only show error if not already shown by timeout
if (!connectionTimedOut) {
term.writeln("\r\n\x1b[31m[ERROR] WebSocket connection error\x1b[0m")
}
}
ws.onclose = () => {
setTerminals((prev) => prev.map((t) => (t.id === terminal.id ? { ...t, isConnected: false } : t)))
term.writeln("\r\n\x1b[33m[INFO] Connection closed\x1b[0m")
clearTimeout(timeoutId)
setTerminals((prev) => prev.map((t) => {
if (t.id === terminal.id) {
if (t.pingInterval) {
clearInterval(t.pingInterval)
}
return { ...t, isConnected: false, pingInterval: null }
}
return t
}))
// Only show close message if not already shown by timeout
if (!connectionTimedOut) {
term.writeln("\r\n\x1b[33m[INFO] Connection closed\x1b[0m")
}
}
term.onData((data) => {
@@ -518,8 +697,10 @@ export const TerminalPanel: React.FC<TerminalPanelProps> = ({ websocketUrl, onCl
}
}
const handleClose = () => {
const handleClose = () => {
terminals.forEach((terminal) => {
// Clear heartbeat interval
if (terminal.pingInterval) clearInterval(terminal.pingInterval)
if (terminal.ws) terminal.ws.close()
if (terminal.term) terminal.term.dispose()
})
@@ -543,13 +724,13 @@ export const TerminalPanel: React.FC<TerminalPanelProps> = ({ websocketUrl, onCl
e.preventDefault()
e.stopPropagation()
}
const activeTerminal = terminals.find((t) => t.id === activeTerminalId)
if (activeTerminal?.ws && activeTerminal.ws.readyState === WebSocket.OPEN) {
activeTerminal.ws.send(seq)
}
}
const getLayoutClass = () => {
const count = terminals.length
if (isMobile || count === 1) return "grid grid-cols-1"
@@ -611,7 +792,7 @@ export const TerminalPanel: React.FC<TerminalPanelProps> = ({ websocketUrl, onCl
variant="outline"
size="sm"
disabled={terminals.length >= 4}
className="h-8 gap-2 bg-green-600 hover:bg-green-700 border-green-500 text-white disabled:opacity-50"
className="h-8 gap-2 bg-green-600/20 hover:bg-green-600/30 border-green-600/50 text-green-400 disabled:opacity-50"
>
<Plus className="h-4 w-4" />
<span className="hidden sm:inline">New</span>
@@ -621,7 +802,7 @@ export const TerminalPanel: React.FC<TerminalPanelProps> = ({ websocketUrl, onCl
variant="outline"
size="sm"
disabled={!activeTerminal?.isConnected}
className="h-8 gap-2 bg-blue-600 hover:bg-blue-700 border-blue-500 text-white disabled:opacity-50"
className="h-8 gap-2 bg-blue-600/20 hover:bg-blue-600/30 border-blue-600/50 text-blue-400 disabled:opacity-50"
>
<Search className="h-4 w-4" />
<span className="hidden sm:inline">Search</span>
@@ -631,7 +812,7 @@ export const TerminalPanel: React.FC<TerminalPanelProps> = ({ websocketUrl, onCl
variant="outline"
size="sm"
disabled={!activeTerminal?.isConnected}
className="h-8 gap-2 bg-yellow-600 hover:bg-yellow-700 border-yellow-500 text-white disabled:opacity-50"
className="h-8 gap-2 bg-yellow-600/20 hover:bg-yellow-600/30 border-yellow-600/50 text-yellow-400 disabled:opacity-50"
>
<Trash2 className="h-4 w-4" />
<span className="hidden sm:inline">Clear</span>
@@ -640,7 +821,7 @@ export const TerminalPanel: React.FC<TerminalPanelProps> = ({ websocketUrl, onCl
onClick={handleClose}
variant="outline"
size="sm"
className="h-8 gap-2 bg-red-600 hover:bg-red-700 border-red-500 text-white"
className="h-8 gap-2 bg-red-600/20 hover:bg-red-600/30 border-red-600/50 text-red-400"
>
<X className="h-4 w-4" />
<span className="hidden sm:inline">Close</span>
@@ -738,7 +919,7 @@ export const TerminalPanel: React.FC<TerminalPanelProps> = ({ websocketUrl, onCl
)}
{(isMobile || isTablet) && (
<div className="flex flex-wrap gap-1.5 justify-center items-center px-1 bg-zinc-900 text-sm rounded-b-md border-t border-zinc-700 py-1.5">
<div className="flex gap-1.5 justify-center items-center px-1 bg-zinc-900 text-sm rounded-b-md border-t border-zinc-700 py-1.5">
<Button
onPointerDown={(e) => {
e.preventDefault()
@@ -819,22 +1000,38 @@ export const TerminalPanel: React.FC<TerminalPanelProps> = ({ websocketUrl, onCl
}}
variant="outline"
size="sm"
className="h-8 px-3 text-xs"
className="h-8 px-2 text-xs bg-blue-600/20 hover:bg-blue-600/30 border-blue-600/50 text-blue-400"
>
</Button>
<Button
onPointerDown={(e) => {
e.preventDefault()
e.stopPropagation()
sendSequence("\x03", e)
}}
variant="outline"
size="sm"
className="h-8 px-2 text-xs"
>
CTRL+C
Enter
</Button>
<DropdownMenu>
<DropdownMenuTrigger asChild>
<Button
variant="outline"
size="sm"
className="h-8 px-2 text-xs gap-1 bg-transparent"
>
Ctrl
<ChevronDown className="h-3 w-3" />
</Button>
</DropdownMenuTrigger>
<DropdownMenuContent align="end" className="w-48">
<DropdownMenuLabel className="text-xs text-muted-foreground">Control Sequences</DropdownMenuLabel>
<DropdownMenuSeparator />
<DropdownMenuItem onSelect={() => sendSequence("\x03")}>
<span className="font-mono text-xs mr-2">Ctrl+C</span>
<span className="text-muted-foreground text-xs">Cancel/Interrupt</span>
</DropdownMenuItem>
<DropdownMenuItem onSelect={() => sendSequence("\x18")}>
<span className="font-mono text-xs mr-2">Ctrl+X</span>
<span className="text-muted-foreground text-xs">Exit (nano)</span>
</DropdownMenuItem>
<DropdownMenuItem onSelect={() => sendSequence("\x12")}>
<span className="font-mono text-xs mr-2">Ctrl+R</span>
<span className="text-muted-foreground text-xs">Search history</span>
</DropdownMenuItem>
</DropdownMenuContent>
</DropdownMenu>
</div>
)}
+38 -2
View File
@@ -89,8 +89,44 @@ export function TwoFactorSetup({ open, onClose, onSuccess }: TwoFactorSetupProps
}
}
const copyToClipboard = (text: string, type: "secret" | "codes") => {
navigator.clipboard.writeText(text)
const copyToClipboard = async (text: string, type: "secret" | "codes") => {
let ok = false
// Preferred path (HTTPS / localhost). On plain HTTP the Promise rejects,
// so we catch and fall through to the textarea fallback.
try {
if (navigator.clipboard && window.isSecureContext) {
await navigator.clipboard.writeText(text)
ok = true
}
} catch {
// fall through to execCommand fallback
}
if (!ok) {
try {
const textarea = document.createElement("textarea")
textarea.value = text
textarea.style.position = "fixed"
textarea.style.left = "-9999px"
textarea.style.top = "-9999px"
textarea.style.opacity = "0"
textarea.readOnly = true
document.body.appendChild(textarea)
textarea.focus()
textarea.select()
ok = document.execCommand("copy")
document.body.removeChild(textarea)
} catch {
ok = false
}
}
if (!ok) {
console.error("Failed to copy to clipboard")
return
}
if (type === "secret") {
setCopiedSecret(true)
setTimeout(() => setCopiedSecret(false), 2000)
+257
View File
@@ -0,0 +1,257 @@
'use client'
import * as React from 'react'
import * as DropdownMenuPrimitive from '@radix-ui/react-dropdown-menu'
import { CheckIcon, ChevronRightIcon, CircleIcon } from 'lucide-react'
import { cn } from '@/lib/utils'
function DropdownMenu({
...props
}: React.ComponentProps<typeof DropdownMenuPrimitive.Root>) {
return <DropdownMenuPrimitive.Root data-slot="dropdown-menu" {...props} />
}
function DropdownMenuPortal({
...props
}: React.ComponentProps<typeof DropdownMenuPrimitive.Portal>) {
return (
<DropdownMenuPrimitive.Portal data-slot="dropdown-menu-portal" {...props} />
)
}
function DropdownMenuTrigger({
...props
}: React.ComponentProps<typeof DropdownMenuPrimitive.Trigger>) {
return (
<DropdownMenuPrimitive.Trigger
data-slot="dropdown-menu-trigger"
{...props}
/>
)
}
function DropdownMenuContent({
className,
sideOffset = 4,
...props
}: React.ComponentProps<typeof DropdownMenuPrimitive.Content>) {
return (
<DropdownMenuPrimitive.Portal>
<DropdownMenuPrimitive.Content
data-slot="dropdown-menu-content"
sideOffset={sideOffset}
className={cn(
'bg-popover text-popover-foreground data-[state=open]:animate-in data-[state=closed]:animate-out data-[state=closed]:fade-out-0 data-[state=open]:fade-in-0 data-[state=closed]:zoom-out-95 data-[state=open]:zoom-in-95 data-[side=bottom]:slide-in-from-top-2 data-[side=left]:slide-in-from-right-2 data-[side=right]:slide-in-from-left-2 data-[side=top]:slide-in-from-bottom-2 z-50 max-h-[var(--radix-dropdown-menu-content-available-height)] min-w-[8rem] origin-[var(--radix-dropdown-menu-content-transform-origin)] overflow-x-hidden overflow-y-auto rounded-md border p-1 shadow-md',
className,
)}
{...props}
/>
</DropdownMenuPrimitive.Portal>
)
}
function DropdownMenuGroup({
...props
}: React.ComponentProps<typeof DropdownMenuPrimitive.Group>) {
return (
<DropdownMenuPrimitive.Group data-slot="dropdown-menu-group" {...props} />
)
}
function DropdownMenuItem({
className,
inset,
variant = 'default',
...props
}: React.ComponentProps<typeof DropdownMenuPrimitive.Item> & {
inset?: boolean
variant?: 'default' | 'destructive'
}) {
return (
<DropdownMenuPrimitive.Item
data-slot="dropdown-menu-item"
data-inset={inset}
data-variant={variant}
className={cn(
"focus:bg-accent focus:text-accent-foreground data-[variant=destructive]:text-destructive data-[variant=destructive]:focus:bg-destructive/10 dark:data-[variant=destructive]:focus:bg-destructive/20 data-[variant=destructive]:focus:text-destructive data-[variant=destructive]:*:[svg]:!text-destructive [&_svg:not([class*='text-'])]:text-muted-foreground relative flex cursor-default items-center gap-2 rounded-sm px-2 py-1.5 text-sm outline-none select-none data-[disabled]:pointer-events-none data-[disabled]:opacity-50 data-[inset]:pl-8 [&_svg]:pointer-events-none [&_svg]:shrink-0 [&_svg:not([class*='size-'])]:size-4",
className,
)}
{...props}
/>
)
}
function DropdownMenuCheckboxItem({
className,
children,
checked,
...props
}: React.ComponentProps<typeof DropdownMenuPrimitive.CheckboxItem>) {
return (
<DropdownMenuPrimitive.CheckboxItem
data-slot="dropdown-menu-checkbox-item"
className={cn(
"focus:bg-accent focus:text-accent-foreground relative flex cursor-default items-center gap-2 rounded-sm py-1.5 pr-2 pl-8 text-sm outline-none select-none data-[disabled]:pointer-events-none data-[disabled]:opacity-50 [&_svg]:pointer-events-none [&_svg]:shrink-0 [&_svg:not([class*='size-'])]:size-4",
className,
)}
checked={checked}
{...props}
>
<span className="pointer-events-none absolute left-2 flex size-3.5 items-center justify-center">
<DropdownMenuPrimitive.ItemIndicator>
<CheckIcon className="size-4" />
</DropdownMenuPrimitive.ItemIndicator>
</span>
{children}
</DropdownMenuPrimitive.CheckboxItem>
)
}
function DropdownMenuRadioGroup({
...props
}: React.ComponentProps<typeof DropdownMenuPrimitive.RadioGroup>) {
return (
<DropdownMenuPrimitive.RadioGroup
data-slot="dropdown-menu-radio-group"
{...props}
/>
)
}
function DropdownMenuRadioItem({
className,
children,
...props
}: React.ComponentProps<typeof DropdownMenuPrimitive.RadioItem>) {
return (
<DropdownMenuPrimitive.RadioItem
data-slot="dropdown-menu-radio-item"
className={cn(
"focus:bg-accent focus:text-accent-foreground relative flex cursor-default items-center gap-2 rounded-sm py-1.5 pr-2 pl-8 text-sm outline-none select-none data-[disabled]:pointer-events-none data-[disabled]:opacity-50 [&_svg]:pointer-events-none [&_svg]:shrink-0 [&_svg:not([class*='size-'])]:size-4",
className,
)}
{...props}
>
<span className="pointer-events-none absolute left-2 flex size-3.5 items-center justify-center">
<DropdownMenuPrimitive.ItemIndicator>
<CircleIcon className="size-2 fill-current" />
</DropdownMenuPrimitive.ItemIndicator>
</span>
{children}
</DropdownMenuPrimitive.RadioItem>
)
}
function DropdownMenuLabel({
className,
inset,
...props
}: React.ComponentProps<typeof DropdownMenuPrimitive.Label> & {
inset?: boolean
}) {
return (
<DropdownMenuPrimitive.Label
data-slot="dropdown-menu-label"
data-inset={inset}
className={cn(
'px-2 py-1.5 text-sm font-medium data-[inset]:pl-8',
className,
)}
{...props}
/>
)
}
function DropdownMenuSeparator({
className,
...props
}: React.ComponentProps<typeof DropdownMenuPrimitive.Separator>) {
return (
<DropdownMenuPrimitive.Separator
data-slot="dropdown-menu-separator"
className={cn('bg-border -mx-1 my-1 h-px', className)}
{...props}
/>
)
}
function DropdownMenuShortcut({
className,
...props
}: React.ComponentProps<'span'>) {
return (
<span
data-slot="dropdown-menu-shortcut"
className={cn(
'text-muted-foreground ml-auto text-xs tracking-widest',
className,
)}
{...props}
/>
)
}
function DropdownMenuSub({
...props
}: React.ComponentProps<typeof DropdownMenuPrimitive.Sub>) {
return <DropdownMenuPrimitive.Sub data-slot="dropdown-menu-sub" {...props} />
}
function DropdownMenuSubTrigger({
className,
inset,
children,
...props
}: React.ComponentProps<typeof DropdownMenuPrimitive.SubTrigger> & {
inset?: boolean
}) {
return (
<DropdownMenuPrimitive.SubTrigger
data-slot="dropdown-menu-sub-trigger"
data-inset={inset}
className={cn(
'focus:bg-accent focus:text-accent-foreground data-[state=open]:bg-accent data-[state=open]:text-accent-foreground flex cursor-default items-center rounded-sm px-2 py-1.5 text-sm outline-none select-none data-[inset]:pl-8',
className,
)}
{...props}
>
{children}
<ChevronRightIcon className="ml-auto size-4" />
</DropdownMenuPrimitive.SubTrigger>
)
}
function DropdownMenuSubContent({
className,
...props
}: React.ComponentProps<typeof DropdownMenuPrimitive.SubContent>) {
return (
<DropdownMenuPrimitive.SubContent
data-slot="dropdown-menu-sub-content"
className={cn(
'bg-popover text-popover-foreground data-[state=open]:animate-in data-[state=closed]:animate-out data-[state=closed]:fade-out-0 data-[state=open]:fade-in-0 data-[state=closed]:zoom-out-95 data-[state=open]:zoom-in-95 data-[side=bottom]:slide-in-from-top-2 data-[side=left]:slide-in-from-right-2 data-[side=right]:slide-in-from-left-2 data-[side=top]:slide-in-from-bottom-2 z-50 min-w-[8rem] origin-[var(--radix-dropdown-menu-content-transform-origin)] overflow-hidden rounded-md border p-1 shadow-lg',
className,
)}
{...props}
/>
)
}
export {
DropdownMenu,
DropdownMenuPortal,
DropdownMenuTrigger,
DropdownMenuContent,
DropdownMenuGroup,
DropdownMenuLabel,
DropdownMenuItem,
DropdownMenuCheckboxItem,
DropdownMenuRadioGroup,
DropdownMenuRadioItem,
DropdownMenuSeparator,
DropdownMenuShortcut,
DropdownMenuSub,
DropdownMenuSubTrigger,
DropdownMenuSubContent,
}
+29
View File
@@ -0,0 +1,29 @@
"use client"
import * as React from "react"
import * as SwitchPrimitives from "@radix-ui/react-switch"
import { cn } from "@/lib/utils"
const Switch = React.forwardRef<
React.ElementRef<typeof SwitchPrimitives.Root>,
React.ComponentPropsWithoutRef<typeof SwitchPrimitives.Root>
>(({ className, ...props }, ref) => (
<SwitchPrimitives.Root
className={cn(
"peer inline-flex h-5 w-9 shrink-0 cursor-pointer items-center rounded-full border-2 border-transparent shadow-sm transition-colors focus-visible:outline-none focus-visible:ring-2 focus-visible:ring-ring focus-visible:ring-offset-2 focus-visible:ring-offset-background disabled:cursor-not-allowed disabled:opacity-50 data-[state=checked]:bg-primary data-[state=unchecked]:bg-input",
className
)}
{...props}
ref={ref}
>
<SwitchPrimitives.Thumb
className={cn(
"pointer-events-none block h-4 w-4 rounded-full bg-white shadow-lg ring-0 transition-transform data-[state=checked]:translate-x-4 data-[state=unchecked]:translate-x-0"
)}
/>
</SwitchPrimitives.Root>
))
Switch.displayName = SwitchPrimitives.Root.displayName
export { Switch }
+24
View File
@@ -0,0 +1,24 @@
import * as React from "react"
import { cn } from "@/lib/utils"
export interface TextareaProps
extends React.TextareaHTMLAttributes<HTMLTextAreaElement> {}
const Textarea = React.forwardRef<HTMLTextAreaElement, TextareaProps>(
({ className, ...props }, ref) => {
return (
<textarea
className={cn(
"flex min-h-[80px] w-full rounded-md border border-input bg-background px-3 py-2 text-sm ring-offset-background placeholder:text-muted-foreground focus-visible:outline-none focus-visible:ring-2 focus-visible:ring-ring focus-visible:ring-offset-2 disabled:cursor-not-allowed disabled:opacity-50",
className
)}
ref={ref}
{...props}
/>
)
}
)
Textarea.displayName = "Textarea"
export { Textarea }
+604 -50
View File
@@ -7,10 +7,15 @@ import { Card, CardContent, CardHeader, CardTitle } from "./ui/card"
import { Badge } from "./ui/badge"
import { Progress } from "./ui/progress"
import { Button } from "./ui/button"
import { Dialog, DialogContent, DialogHeader, DialogTitle } from "./ui/dialog"
import { Server, Play, Square, Cpu, MemoryStick, HardDrive, Network, Power, RotateCcw, StopCircle, Container, ChevronDown, ChevronUp } from 'lucide-react'
import { Dialog, DialogContent, DialogHeader, DialogTitle, DialogFooter, DialogDescription } from "./ui/dialog"
import { Server, Play, Square, Cpu, MemoryStick, HardDrive, Network, Power, RotateCcw, StopCircle, Container, ChevronDown, ChevronUp, Terminal, Archive, Plus, Loader2, Clock, Database, Shield, Bell, FileText, Settings2, Activity } from 'lucide-react'
import { Select, SelectContent, SelectItem, SelectTrigger, SelectValue } from "./ui/select"
import { Checkbox } from "./ui/checkbox"
import { Textarea } from "./ui/textarea"
import { Label } from "./ui/label"
import useSWR from "swr"
import { MetricsView } from "./metrics-dialog"
import { LxcTerminalModal } from "./lxc-terminal-modal"
import { formatStorage } from "../lib/utils"
import { formatNetworkTraffic, getNetworkUnit } from "../lib/format-network"
import { fetchApi } from "../lib/api-config"
@@ -120,6 +125,29 @@ interface VMDetails extends VMData {
}
}
interface BackupStorage {
storage: string
type: string
content: string
total: number
used: number
avail: number
total_human?: string
used_human?: string
avail_human?: string
}
interface VMBackup {
volid: string
storage: string
type: string
size: number
size_human: string
timestamp: number
date: string
notes?: string
}
const fetcher = async (url: string) => {
return fetchApi(url)
}
@@ -196,6 +224,28 @@ const getUsageColor = (percent: number): string => {
return "text-foreground"
}
// Generate consistent color for storage names
const storageColors = [
{ bg: "bg-blue-500/20", text: "text-blue-400", border: "border-blue-500/30" },
{ bg: "bg-emerald-500/20", text: "text-emerald-400", border: "border-emerald-500/30" },
{ bg: "bg-purple-500/20", text: "text-purple-400", border: "border-purple-500/30" },
{ bg: "bg-amber-500/20", text: "text-amber-400", border: "border-amber-500/30" },
{ bg: "bg-pink-500/20", text: "text-pink-400", border: "border-pink-500/30" },
{ bg: "bg-cyan-500/20", text: "text-cyan-400", border: "border-cyan-500/30" },
{ bg: "bg-rose-500/20", text: "text-rose-400", border: "border-rose-500/30" },
{ bg: "bg-indigo-500/20", text: "text-indigo-400", border: "border-indigo-500/30" },
]
const getStorageColor = (storageName: string) => {
// Generate a consistent hash from storage name
let hash = 0
for (let i = 0; i < storageName.length; i++) {
hash = storageName.charCodeAt(i) + ((hash << 5) - hash)
}
const index = Math.abs(hash) % storageColors.length
return storageColors[index]
}
const getIconColor = (percent: number): string => {
if (percent >= 95) return "text-red-500"
if (percent >= 86) return "text-orange-500"
@@ -245,10 +295,10 @@ export function VirtualMachines() {
isLoading,
mutate,
} = useSWR<VMData[]>("/api/vms", fetcher, {
refreshInterval: 23000,
revalidateOnFocus: false,
refreshInterval: 2500,
revalidateOnFocus: true,
revalidateOnReconnect: true,
dedupingInterval: 10000,
dedupingInterval: 1000,
errorRetryCount: 2,
})
@@ -256,6 +306,9 @@ export function VirtualMachines() {
const [vmDetails, setVMDetails] = useState<VMDetails | null>(null)
const [controlLoading, setControlLoading] = useState(false)
const [detailsLoading, setDetailsLoading] = useState(false)
const [terminalOpen, setTerminalOpen] = useState(false)
const [terminalVmid, setTerminalVmid] = useState<number | null>(null)
const [terminalVmName, setTerminalVmName] = useState<string>("")
const [vmConfigs, setVmConfigs] = useState<Record<number, string>>({})
const [currentView, setCurrentView] = useState<"main" | "metrics">("main")
const [showAdditionalInfo, setShowAdditionalInfo] = useState(false)
@@ -267,6 +320,40 @@ export function VirtualMachines() {
const [ipsLoaded, setIpsLoaded] = useState(false)
const [loadingIPs, setLoadingIPs] = useState(false)
const [networkUnit, setNetworkUnit] = useState<"Bytes" | "Bits">("Bytes")
// Backup states
const [vmBackups, setVmBackups] = useState<VMBackup[]>([])
const [backupStorages, setBackupStorages] = useState<BackupStorage[]>([])
const [selectedBackupStorage, setSelectedBackupStorage] = useState<string>("")
const [loadingBackups, setLoadingBackups] = useState(false)
const [creatingBackup, setCreatingBackup] = useState(false)
// Backup modal states
const [showBackupModal, setShowBackupModal] = useState(false)
const [backupMode, setBackupMode] = useState<string>("snapshot")
const [backupProtected, setBackupProtected] = useState(false)
const [backupNotification, setBackupNotification] = useState<string>("auto")
const [backupNotes, setBackupNotes] = useState<string>("{{guestname}}")
const [backupPbsChangeMode, setBackupPbsChangeMode] = useState<string>("default")
// Tab state for modal
const [activeModalTab, setActiveModalTab] = useState<"status" | "backups">("status")
// Detect standalone mode (webapp vs browser)
const [isStandalone, setIsStandalone] = useState(false)
useEffect(() => {
const checkStandalone = () => {
const standalone = window.matchMedia('(display-mode: standalone)').matches ||
(window.navigator as Navigator & { standalone?: boolean }).standalone === true
setIsStandalone(standalone)
}
checkStandalone()
const mediaQuery = window.matchMedia('(display-mode: standalone)')
mediaQuery.addEventListener('change', checkStandalone)
return () => mediaQuery.removeEventListener('change', checkStandalone)
}, [])
useEffect(() => {
const fetchLXCIPs = async () => {
@@ -336,6 +423,16 @@ export function VirtualMachines() {
}
}, [])
// Keep the open modal's VM in sync with the /api/vms poll so CPU/RAM/I-O values
// don't stay frozen at click-time. Single data source (/cluster/resources) shared
// with the list — no source mismatch, no flicker.
useEffect(() => {
if (!selectedVM || !vmData) return
const updated = vmData.find((v) => v.vmid === selectedVM.vmid)
if (!updated || updated === selectedVM) return
setSelectedVM(updated)
}, [vmData])
const handleVMClick = async (vm: VMData) => {
setSelectedVM(vm)
setCurrentView("main")
@@ -344,6 +441,11 @@ export function VirtualMachines() {
setIsEditingNotes(false)
setEditedNotes("")
setDetailsLoading(true)
// Load backups immediately (independent of config)
fetchBackupStorages()
fetchVmBackups(vm.vmid)
try {
const details = await fetchApi(`/api/vms/${vm.vmid}`)
setVMDetails(details)
@@ -362,6 +464,77 @@ export function VirtualMachines() {
setCurrentView("main")
}
// Backup functions
const fetchBackupStorages = async () => {
try {
const response = await fetchApi("/api/backup-storages")
if (response.storages) {
setBackupStorages(response.storages)
if (response.storages.length > 0 && !selectedBackupStorage) {
setSelectedBackupStorage(response.storages[0].storage)
}
}
} catch (error) {
console.error("Error fetching backup storages:", error)
}
}
const fetchVmBackups = async (vmid: number) => {
setLoadingBackups(true)
try {
const response = await fetchApi(`/api/vms/${vmid}/backups`)
if (response.backups) {
setVmBackups(response.backups)
}
} catch (error) {
console.error("Error fetching VM backups:", error)
setVmBackups([])
} finally {
setLoadingBackups(false)
}
}
const openBackupModal = () => {
// Reset modal to defaults
setBackupMode("snapshot")
setBackupProtected(false)
setBackupNotification("auto")
setBackupNotes("{{guestname}}")
setBackupPbsChangeMode("default")
// Auto-select first storage if none selected
if (!selectedBackupStorage && backupStorages.length > 0) {
setSelectedBackupStorage(backupStorages[0].storage)
}
setShowBackupModal(true)
}
const handleCreateBackup = async () => {
if (!selectedVM || !selectedBackupStorage) return
setCreatingBackup(true)
setShowBackupModal(false)
try {
await fetchApi(`/api/vms/${selectedVM.vmid}/backup`, {
method: "POST",
body: JSON.stringify({
storage: selectedBackupStorage,
mode: backupMode,
compress: "zstd",
protected: backupProtected,
notification: backupNotification,
notes: backupNotes,
pbs_change_detection: backupPbsChangeMode
}),
})
setTimeout(() => fetchVmBackups(selectedVM.vmid), 2000)
} catch (error) {
console.error("Error creating backup:", error)
} finally {
setCreatingBackup(false)
}
}
const handleVMControl = async (vmid: number, action: string) => {
setControlLoading(true)
try {
@@ -380,7 +553,14 @@ export function VirtualMachines() {
}
}
const handleDownloadLogs = async (vmid: number, vmName: string) => {
// Open terminal for LXC container
const openLxcTerminal = (vmid: number, vmName: string) => {
setTerminalVmid(vmid)
setTerminalVmName(vmName)
setTerminalOpen(true)
}
const handleDownloadLogs = async (vmid: number, vmName: string) => {
try {
const data = await fetchApi(`/api/vms/${vmid}/logs`)
@@ -451,12 +631,21 @@ export function VirtualMachines() {
}
}
const safeVMData = vmData || []
// Ensure vmData is always an array (backend may return object on error)
const safeVMData = Array.isArray(vmData) ? vmData : []
// Total allocated RAM for ALL VMs/LXCs (running + stopped)
const totalAllocatedMemoryGB = useMemo(() => {
return (safeVMData.reduce((sum, vm) => sum + (vm.maxmem || 0), 0) / 1024 ** 3).toFixed(1)
}, [safeVMData])
// Allocated RAM only for RUNNING VMs/LXCs (this is what actually matters for overcommit)
const runningAllocatedMemoryGB = useMemo(() => {
return (safeVMData
.filter((vm) => vm.status === "running")
.reduce((sum, vm) => sum + (vm.maxmem || 0), 0) / 1024 ** 3).toFixed(1)
}, [safeVMData])
const { data: systemData } = useSWR<{ memory_total: number; memory_used: number; memory_usage: number }>(
"/api/system",
fetcher,
@@ -470,7 +659,9 @@ export function VirtualMachines() {
const usedMemoryGB = systemData?.memory_used ?? null
const memoryUsagePercent = systemData?.memory_usage ?? null
const allocatedMemoryGB = Number.parseFloat(totalAllocatedMemoryGB)
const isMemoryOvercommit = physicalMemoryGB !== null && allocatedMemoryGB > physicalMemoryGB
const runningAllocatedGB = Number.parseFloat(runningAllocatedMemoryGB)
// Overcommit warning should be based on RUNNING VMs allocation, not total
const isMemoryOvercommit = physicalMemoryGB !== null && runningAllocatedGB > physicalMemoryGB
const getMemoryUsageColor = (percent: number | null) => {
if (percent === null) return "bg-blue-500"
@@ -490,8 +681,13 @@ export function VirtualMachines() {
if (isLoading) {
return (
<div className="space-y-6">
<div className="text-center py-8 text-muted-foreground">Loading virtual machines...</div>
<div className="flex flex-col items-center justify-center min-h-[400px] gap-4">
<div className="relative">
<div className="h-12 w-12 rounded-full border-2 border-muted"></div>
<div className="absolute inset-0 h-12 w-12 rounded-full border-2 border-transparent border-t-primary animate-spin"></div>
</div>
<div className="text-sm font-medium text-foreground">Loading virtual machines...</div>
<p className="text-xs text-muted-foreground">Fetching VM and LXC container status</p>
</div>
)
}
@@ -758,13 +954,21 @@ export function VirtualMachines() {
</div>
)}
{/* Allocated RAM (configured) */}
{/* Allocated RAM (configured) - Split into Running and Total */}
<div className="pt-3 border-t border-border">
{/* Layout para desktop (sin cambios) */}
{/* Layout para desktop */}
<div className="hidden lg:flex items-center justify-between">
<div>
<div className="text-lg font-semibold text-foreground">{totalAllocatedMemoryGB} GB</div>
<div className="text-xs text-muted-foreground">Allocated RAM</div>
<div className="flex gap-6">
{/* Running allocation - most important */}
<div>
<div className="text-lg font-semibold text-foreground">{runningAllocatedMemoryGB} GB</div>
<div className="text-xs text-muted-foreground">Running Allocated</div>
</div>
{/* Total allocation */}
<div>
<div className="text-lg font-semibold text-muted-foreground">{totalAllocatedMemoryGB} GB</div>
<div className="text-xs text-muted-foreground">Total Allocated</div>
</div>
</div>
{physicalMemoryGB !== null && (
<div>
@@ -781,10 +985,20 @@ export function VirtualMachines() {
)}
</div>
{/* Layout para móvil (44.0 GB solo, Allocated RAM en otra línea, badge en tercera línea) */}
<div className="lg:hidden space-y-1">
<div className="text-lg font-semibold text-foreground">{totalAllocatedMemoryGB} GB</div>
<div className="text-xs text-muted-foreground">Allocated RAM</div>
{/* Layout para movil */}
<div className="lg:hidden space-y-2">
<div className="flex gap-4">
{/* Running allocation */}
<div>
<div className="text-lg font-semibold text-foreground">{runningAllocatedMemoryGB} GB</div>
<div className="text-xs text-muted-foreground">Running</div>
</div>
{/* Total allocation */}
<div>
<div className="text-lg font-semibold text-muted-foreground">{totalAllocatedMemoryGB} GB</div>
<div className="text-xs text-muted-foreground">Total</div>
</div>
</div>
{physicalMemoryGB !== null && (
<div>
{isMemoryOvercommit ? (
@@ -1042,10 +1256,15 @@ export function VirtualMachines() {
setShowNotes(false)
setIsEditingNotes(false)
setEditedNotes("")
setActiveModalTab("status")
}}
>
<DialogContent
className="max-w-4xl h-[95vh] sm:h-[90vh] flex flex-col p-0 overflow-hidden"
className={`max-w-4xl flex flex-col p-0 overflow-hidden ${
isStandalone
? "h-[95vh] sm:h-[90vh]"
: "h-[85vh] sm:h-[85vh] max-h-[calc(100dvh-env(safe-area-inset-top)-env(safe-area-inset-bottom)-40px)]"
}`}
key={selectedVM?.vmid || "no-vm"}
>
{currentView === "main" ? (
@@ -1105,8 +1324,39 @@ export function VirtualMachines() {
</DialogTitle>
</DialogHeader>
<div className="flex-1 overflow-y-auto px-6 py-4">
<div className="space-y-6">
{/* Tab Navigation */}
<div className="flex border-b border-border px-6 shrink-0">
<button
onClick={() => setActiveModalTab("status")}
className={`flex items-center gap-2 px-4 py-2.5 text-sm font-medium transition-colors border-b-2 -mb-px ${
activeModalTab === "status"
? "border-cyan-500 text-cyan-500"
: "border-transparent text-muted-foreground hover:text-foreground"
}`}
>
<Activity className="h-4 w-4" />
Status
</button>
<button
onClick={() => setActiveModalTab("backups")}
className={`flex items-center gap-2 px-4 py-2.5 text-sm font-medium transition-colors border-b-2 -mb-px ${
activeModalTab === "backups"
? "border-amber-500 text-amber-500"
: "border-transparent text-muted-foreground hover:text-foreground"
}`}
>
<Archive className="h-4 w-4" />
Backups
{vmBackups.length > 0 && (
<Badge variant="secondary" className="text-xs h-5 ml-1">{vmBackups.length}</Badge>
)}
</button>
</div>
<div className="flex-1 overflow-y-auto px-6 py-4 min-h-0">
{/* Status Tab */}
{activeModalTab === "status" && (
<div className="space-y-4">
{selectedVM && (
<>
<div key={`metrics-${selectedVM.vmid}`}>
@@ -1118,7 +1368,13 @@ export function VirtualMachines() {
<div className="grid grid-cols-2 lg:grid-cols-3 gap-4">
{/* CPU Usage */}
<div>
<div className="text-xs text-muted-foreground mb-2">CPU Usage</div>
<div className="flex items-center gap-1.5 text-xs text-muted-foreground mb-2">
<Cpu className="h-3.5 w-3.5" />
<span>CPU Usage</span>
{vmDetails?.config?.cores && (
<span className="text-muted-foreground/60">({vmDetails.config.cores} cores)</span>
)}
</div>
<div className={`text-base font-semibold mb-2 ${getUsageColor(selectedVM.cpu * 100)}`}>
{(selectedVM.cpu * 100).toFixed(1)}%
</div>
@@ -1130,7 +1386,10 @@ export function VirtualMachines() {
{/* Memory */}
<div>
<div className="text-xs text-muted-foreground mb-2">Memory</div>
<div className="flex items-center gap-1.5 text-xs text-muted-foreground mb-2">
<MemoryStick className="h-3.5 w-3.5" />
<span>Memory</span>
</div>
<div
className={`text-base font-semibold mb-2 ${getUsageColor((selectedVM.mem / selectedVM.maxmem) * 100)}`}
>
@@ -1145,7 +1404,10 @@ export function VirtualMachines() {
{/* Disk */}
<div>
<div className="text-xs text-muted-foreground mb-2">Disk</div>
<div className="flex items-center gap-1.5 text-xs text-muted-foreground mb-2">
<HardDrive className="h-3.5 w-3.5" />
<span>Disk</span>
</div>
<div
className={`text-base font-semibold mb-2 ${getUsageColor((selectedVM.disk / selectedVM.maxdisk) * 100)}`}
>
@@ -1160,7 +1422,10 @@ export function VirtualMachines() {
{/* Disk I/O */}
<div>
<div className="text-xs text-muted-foreground mb-2">Disk I/O</div>
<div className="flex items-center gap-1.5 text-xs text-muted-foreground mb-2">
<HardDrive className="h-3.5 w-3.5" />
<span>Disk I/O</span>
</div>
<div className="space-y-1">
<div className="text-sm text-green-500 flex items-center gap-1">
<span></span>
@@ -1175,7 +1440,10 @@ export function VirtualMachines() {
{/* Network I/O */}
<div>
<div className="text-xs text-muted-foreground mb-2">Network I/O</div>
<div className="flex items-center gap-1.5 text-xs text-muted-foreground mb-2">
<Network className="h-3.5 w-3.5" />
<span>Network I/O</span>
</div>
<div className="space-y-1">
<div className="text-sm text-green-500 flex items-center gap-1">
<span></span>
@@ -1203,9 +1471,12 @@ export function VirtualMachines() {
<Card className="border border-border bg-card/50" key={`config-${selectedVM.vmid}`}>
<CardContent className="p-4">
<div className="flex items-center justify-between mb-4">
<h3 className="text-sm font-semibold text-muted-foreground uppercase tracking-wide">
Resources
</h3>
<div className="flex items-center gap-2">
<div className="p-1.5 rounded-md bg-blue-500/10">
<Cpu className="h-4 w-4 text-blue-500" />
</div>
<h3 className="text-sm font-semibold text-foreground">Resources</h3>
</div>
<div className="flex gap-2">
<Button
variant="outline"
@@ -1249,19 +1520,28 @@ export function VirtualMachines() {
<div className="grid grid-cols-3 lg:grid-cols-4 gap-3 lg:gap-4">
{vmDetails.config.cores && (
<div>
<div className="text-xs text-muted-foreground mb-1">CPU Cores</div>
<div className="flex items-center gap-1.5 text-xs text-muted-foreground mb-1">
<Cpu className="h-3.5 w-3.5" />
<span>CPU Cores</span>
</div>
<div className="font-semibold text-blue-500">{vmDetails.config.cores}</div>
</div>
)}
{vmDetails.config.memory && (
<div>
<div className="text-xs text-muted-foreground mb-1">Memory</div>
<div className="flex items-center gap-1.5 text-xs text-muted-foreground mb-1">
<MemoryStick className="h-3.5 w-3.5" />
<span>Memory</span>
</div>
<div className="font-semibold text-blue-500">{vmDetails.config.memory} MB</div>
</div>
)}
{vmDetails.config.swap && (
{vmDetails.config.swap !== undefined && (
<div>
<div className="text-xs text-muted-foreground mb-1">Swap</div>
<div className="flex items-center gap-1.5 text-xs text-muted-foreground mb-1">
<RotateCcw className="h-3.5 w-3.5" />
<span>Swap</span>
</div>
<div className="font-semibold text-foreground">{vmDetails.config.swap} MB</div>
</div>
)}
@@ -1270,7 +1550,8 @@ export function VirtualMachines() {
{/* IP Addresses with proper keys */}
{selectedVM?.type === "lxc" && vmDetails?.lxc_ip_info && (
<div className="mt-4 lg:mt-6 pt-4 lg:pt-6 border-t border-border">
<h4 className="text-sm font-semibold text-muted-foreground mb-3 uppercase tracking-wide">
<h4 className="flex items-center gap-2 text-sm font-semibold text-muted-foreground mb-3 uppercase tracking-wide">
<Network className="h-4 w-4" />
IP Addresses
</h4>
<div className="flex flex-wrap gap-2">
@@ -1373,7 +1654,8 @@ export function VirtualMachines() {
<div className="mt-6 pt-6 border-t border-border space-y-6">
{selectedVM?.type === "lxc" && vmDetails?.hardware_info && (
<div>
<h4 className="text-sm font-semibold text-muted-foreground mb-3 uppercase tracking-wide">
<h4 className="flex items-center gap-2 text-sm font-semibold text-muted-foreground mb-3 uppercase tracking-wide">
<Container className="h-4 w-4" />
Container Configuration
</h4>
<div className="space-y-4">
@@ -1381,7 +1663,10 @@ export function VirtualMachines() {
{vmDetails.hardware_info.privileged !== null &&
vmDetails.hardware_info.privileged !== undefined && (
<div>
<div className="text-xs text-muted-foreground mb-2">Privilege Level</div>
<div className="flex items-center gap-1.5 text-xs text-muted-foreground mb-2">
<Shield className="h-3.5 w-3.5" />
<span>Privilege Level</span>
</div>
<Badge
variant="outline"
className={
@@ -1399,7 +1684,10 @@ export function VirtualMachines() {
{vmDetails.hardware_info.gpu_passthrough &&
vmDetails.hardware_info.gpu_passthrough.length > 0 && (
<div>
<div className="text-xs text-muted-foreground mb-2">GPU Passthrough</div>
<div className="flex items-center gap-1.5 text-xs text-muted-foreground mb-2">
<Cpu className="h-3.5 w-3.5" />
<span>GPU Passthrough</span>
</div>
<div className="flex flex-wrap gap-2">
{vmDetails.hardware_info.gpu_passthrough.map((gpu, index) => (
<Badge
@@ -1422,7 +1710,10 @@ export function VirtualMachines() {
{vmDetails.hardware_info.devices &&
vmDetails.hardware_info.devices.length > 0 && (
<div>
<div className="text-xs text-muted-foreground mb-2">Hardware Devices</div>
<div className="flex items-center gap-1.5 text-xs text-muted-foreground mb-2">
<Server className="h-3.5 w-3.5" />
<span>Hardware Devices</span>
</div>
<div className="flex flex-wrap gap-2">
{vmDetails.hardware_info.devices.map((device, index) => (
<Badge
@@ -1442,7 +1733,8 @@ export function VirtualMachines() {
{/* Hardware Section */}
<div>
<h4 className="text-sm font-semibold text-muted-foreground mb-3 uppercase tracking-wide">
<h4 className="flex items-center gap-2 text-sm font-semibold text-muted-foreground mb-3 uppercase tracking-wide">
<Settings2 className="h-4 w-4" />
Hardware
</h4>
<div className="grid grid-cols-2 lg:grid-cols-3 gap-4">
@@ -1543,7 +1835,8 @@ export function VirtualMachines() {
{/* Storage Section */}
<div>
<h4 className="text-sm font-semibold text-muted-foreground mb-3 uppercase tracking-wide">
<h4 className="flex items-center gap-2 text-sm font-semibold text-muted-foreground mb-3 uppercase tracking-wide">
<HardDrive className="h-4 w-4" />
Storage
</h4>
<div className="space-y-3">
@@ -1608,7 +1901,8 @@ export function VirtualMachines() {
{/* Network Section */}
<div>
<h4 className="text-sm font-semibold text-muted-foreground mb-3 uppercase tracking-wide">
<h4 className="flex items-center gap-2 text-sm font-semibold text-muted-foreground mb-3 uppercase tracking-wide">
<Network className="h-4 w-4" />
Network
</h4>
<div className="space-y-3">
@@ -1657,7 +1951,8 @@ export function VirtualMachines() {
{/* PCI Devices with proper keys */}
{Object.keys(vmDetails.config).some((key) => key.match(/^hostpci\d+$/)) && (
<div>
<h4 className="text-sm font-semibold text-muted-foreground mb-3 uppercase tracking-wide">
<h4 className="flex items-center gap-2 text-sm font-semibold text-muted-foreground mb-3 uppercase tracking-wide">
<Cpu className="h-4 w-4" />
PCI Passthrough
</h4>
<div className="space-y-3">
@@ -1680,7 +1975,8 @@ export function VirtualMachines() {
{/* USB Devices with proper keys */}
{Object.keys(vmDetails.config).some((key) => key.match(/^usb\d+$/)) && (
<div>
<h4 className="text-sm font-semibold text-muted-foreground mb-3 uppercase tracking-wide">
<h4 className="flex items-center gap-2 text-sm font-semibold text-muted-foreground mb-3 uppercase tracking-wide">
<Server className="h-4 w-4" />
USB Devices
</h4>
<div className="space-y-3">
@@ -1703,7 +1999,8 @@ export function VirtualMachines() {
{/* Serial Ports with proper keys */}
{Object.keys(vmDetails.config).some((key) => key.match(/^serial\d+$/)) && (
<div>
<h4 className="text-sm font-semibold text-muted-foreground mb-3 uppercase tracking-wide">
<h4 className="flex items-center gap-2 text-sm font-semibold text-muted-foreground mb-3 uppercase tracking-wide">
<Terminal className="h-4 w-4" />
Serial Ports
</h4>
<div className="space-y-3">
@@ -1731,12 +2028,102 @@ export function VirtualMachines() {
</>
)}
</div>
)}
{/* Backups Tab */}
{activeModalTab === "backups" && (
<div className="space-y-4">
<Card className="border border-border bg-card/50">
<CardContent className="p-4">
<div className="flex items-center justify-between mb-4">
<div className="flex items-center gap-2">
<div className="p-1.5 rounded-md bg-amber-500/10">
<Archive className="h-4 w-4 text-amber-500" />
</div>
<h3 className="text-sm font-semibold text-foreground">Backups</h3>
</div>
<Button
size="sm"
className="h-7 text-xs bg-amber-600/20 border border-amber-600/50 text-amber-400 hover:bg-amber-600/30 gap-1"
onClick={openBackupModal}
disabled={creatingBackup}
>
{creatingBackup ? (
<Loader2 className="h-3 w-3 animate-spin" />
) : (
<Plus className="h-3 w-3" />
)}
<span>Create Backup</span>
</Button>
</div>
{/* Divider */}
<div className="border-t border-border/50 mb-4" />
{/* Backup List */}
<div className="flex items-center justify-between mb-3">
<span className="text-xs text-muted-foreground">Available backups</span>
<Badge variant="secondary" className="text-xs h-5">{vmBackups.length}</Badge>
</div>
{loadingBackups ? (
<div className="flex items-center justify-center py-6 text-muted-foreground">
<Loader2 className="h-4 w-4 animate-spin mr-2" />
<span className="text-sm">Loading backups...</span>
</div>
) : vmBackups.length === 0 ? (
<div className="flex flex-col items-center justify-center py-8 text-muted-foreground">
<Archive className="h-12 w-12 mb-3 opacity-30" />
<span className="text-sm">No backups found</span>
<span className="text-xs mt-1">Create your first backup using the button above</span>
</div>
) : (
<div className="space-y-2">
{vmBackups.map((backup, index) => (
<div
key={`backup-${backup.volid}-${index}`}
className="flex items-center justify-between p-3 rounded-lg bg-muted/30 hover:bg-muted/50 transition-colors"
>
<div className="flex items-center gap-2 flex-1 min-w-0">
<div className="w-2 h-2 rounded-full bg-green-500 flex-shrink-0" />
<Clock className="h-4 w-4 text-muted-foreground flex-shrink-0" />
<span className="text-sm text-foreground">{backup.date}</span>
<Badge
variant="outline"
className={`text-xs ml-auto flex-shrink-0 ${getStorageColor(backup.storage).bg} ${getStorageColor(backup.storage).text} ${getStorageColor(backup.storage).border}`}
>
{backup.storage}
</Badge>
</div>
<Badge variant="outline" className="text-xs font-mono ml-2 flex-shrink-0">
{backup.size_human}
</Badge>
</div>
))}
</div>
)}
</CardContent>
</Card>
</div>
)}
</div>
<div className="border-t border-border bg-background px-6 py-4 mt-auto">
<div className="border-t border-border bg-background px-6 py-4 mt-auto shrink-0">
{/* Terminal button for LXC containers - only when running */}
{selectedVM?.type === "lxc" && selectedVM?.status === "running" && (
<div className="mb-3">
<Button
className="w-full bg-zinc-600/20 border border-zinc-600/50 text-zinc-300 hover:bg-zinc-600/30"
onClick={() => selectedVM && openLxcTerminal(selectedVM.vmid, selectedVM.name)}
>
<Terminal className="h-4 w-4 mr-2" />
Open Terminal
</Button>
</div>
)}
<div className="grid grid-cols-2 gap-3">
<Button
className="w-full bg-green-600 hover:bg-green-700 text-white"
className="w-full bg-green-600/20 border border-green-600/50 text-green-400 hover:bg-green-600/30"
disabled={selectedVM?.status === "running" || controlLoading}
onClick={() => selectedVM && handleVMControl(selectedVM.vmid, "start")}
>
@@ -1744,7 +2131,7 @@ export function VirtualMachines() {
Start
</Button>
<Button
className="w-full bg-blue-600 hover:bg-blue-700 text-white"
className="w-full bg-blue-600/20 border border-blue-600/50 text-blue-400 hover:bg-blue-600/30"
disabled={selectedVM?.status !== "running" || controlLoading}
onClick={() => selectedVM && handleVMControl(selectedVM.vmid, "shutdown")}
>
@@ -1752,7 +2139,7 @@ export function VirtualMachines() {
Shutdown
</Button>
<Button
className="w-full bg-blue-600 hover:bg-blue-700 text-white"
className="w-full bg-blue-600/20 border border-blue-600/50 text-blue-400 hover:bg-blue-600/30"
disabled={selectedVM?.status !== "running" || controlLoading}
onClick={() => selectedVM && handleVMControl(selectedVM.vmid, "reboot")}
>
@@ -1760,7 +2147,7 @@ export function VirtualMachines() {
Reboot
</Button>
<Button
className="w-full bg-blue-600 hover:bg-blue-700 text-white"
className="w-full bg-red-600/20 border border-red-600/50 text-red-400 hover:bg-red-600/30"
disabled={selectedVM?.status !== "running" || controlLoading}
onClick={() => selectedVM && handleVMControl(selectedVM.vmid, "stop")}
>
@@ -1782,6 +2169,173 @@ export function VirtualMachines() {
)}
</DialogContent>
</Dialog>
{/* Backup Configuration Modal */}
<Dialog open={showBackupModal} onOpenChange={setShowBackupModal}>
<DialogContent className="sm:max-w-[500px]">
<DialogHeader>
<DialogTitle className="flex items-center gap-2 text-amber-500">
<Archive className="h-5 w-5" />
Backup {selectedVM?.type?.toUpperCase()} {selectedVM?.vmid} ({selectedVM?.name})
</DialogTitle>
<DialogDescription>
Configure backup options for this {selectedVM?.type === 'lxc' ? 'container' : 'virtual machine'}
</DialogDescription>
</DialogHeader>
<div className="grid gap-4 py-4">
{/* Storage & Mode Row */}
<div className="grid grid-cols-2 gap-4">
<div className="space-y-2">
<Label className="text-sm flex items-center gap-1.5">
<Database className="h-3.5 w-3.5" />
Storage
</Label>
<Select value={selectedBackupStorage} onValueChange={setSelectedBackupStorage}>
<SelectTrigger>
<SelectValue placeholder="Select storage" />
</SelectTrigger>
<SelectContent>
{backupStorages.map((storage) => (
<SelectItem key={`modal-storage-${storage.storage}`} value={storage.storage}>
{storage.storage} ({storage.avail_human} free)
</SelectItem>
))}
</SelectContent>
</Select>
</div>
<div className="space-y-2">
<Label className="text-sm flex items-center gap-1.5">
<Settings2 className="h-3.5 w-3.5" />
Mode
</Label>
<Select value={backupMode} onValueChange={setBackupMode}>
<SelectTrigger>
<SelectValue />
</SelectTrigger>
<SelectContent>
<SelectItem value="snapshot">Snapshot</SelectItem>
<SelectItem value="suspend">Suspend</SelectItem>
<SelectItem value="stop">Stop</SelectItem>
</SelectContent>
</Select>
</div>
</div>
{/* Notification Row */}
<div className="space-y-2">
<Label className="text-sm flex items-center gap-1.5">
<Bell className="h-3.5 w-3.5" />
Notification
</Label>
<Select value={backupNotification} onValueChange={setBackupNotification}>
<SelectTrigger>
<SelectValue />
</SelectTrigger>
<SelectContent>
<SelectItem value="auto">Use global settings</SelectItem>
<SelectItem value="always">Always notify</SelectItem>
<SelectItem value="failure">Notify on failure</SelectItem>
<SelectItem value="never">Never notify</SelectItem>
</SelectContent>
</Select>
</div>
{/* Protected Checkbox */}
<div className="flex items-center space-x-2">
<Checkbox
id="backup-protected"
checked={backupProtected}
onCheckedChange={(checked) => setBackupProtected(checked === true)}
/>
<Label htmlFor="backup-protected" className="text-sm flex items-center gap-1.5 cursor-pointer">
<Shield className="h-3.5 w-3.5" />
Protected (prevent accidental deletion)
</Label>
</div>
{/* PBS Change Detection Mode (only for LXC) */}
{selectedVM?.type === 'lxc' && (
<div className="space-y-2">
<Label className="text-sm flex items-center gap-1.5">
<Settings2 className="h-3.5 w-3.5" />
PBS change detection mode
<span className="text-xs text-muted-foreground ml-1">(for PBS storage)</span>
</Label>
<Select value={backupPbsChangeMode} onValueChange={setBackupPbsChangeMode}>
<SelectTrigger>
<SelectValue />
</SelectTrigger>
<SelectContent>
<SelectItem value="default">Default</SelectItem>
<SelectItem value="legacy">Legacy</SelectItem>
<SelectItem value="data">Data</SelectItem>
</SelectContent>
</Select>
</div>
)}
{/* Notes */}
<div className="space-y-2">
<Label className="text-sm flex items-center gap-1.5">
<FileText className="h-3.5 w-3.5" />
Notes
</Label>
<Textarea
value={backupNotes}
onChange={(e) => setBackupNotes(e.target.value)}
placeholder="{{guestname}}"
className="min-h-[80px] resize-none"
/>
<p className="text-xs text-muted-foreground">
{'Variables: {{cluster}}, {{guestname}}, {{node}}, {{vmid}}'}
</p>
</div>
</div>
<div className="flex items-center gap-3 pt-4">
<Button
variant="outline"
onClick={() => setShowBackupModal(false)}
className="flex-1 bg-zinc-800/50 border-zinc-700 text-zinc-300 hover:bg-zinc-700/50"
>
Cancel
</Button>
<Button
onClick={handleCreateBackup}
disabled={creatingBackup || !selectedBackupStorage}
className="flex-1 bg-amber-600/20 border border-amber-600/50 text-amber-400 hover:bg-amber-600/30"
>
{creatingBackup ? (
<>
<Loader2 className="h-4 w-4 animate-spin mr-2" />
Creating...
</>
) : (
<>
<Archive className="h-4 w-4 mr-2" />
Backup
</>
)}
</Button>
</div>
</DialogContent>
</Dialog>
{/* LXC Terminal Modal */}
{terminalVmid !== null && (
<LxcTerminalModal
open={terminalOpen}
onClose={() => {
setTerminalOpen(false)
setTerminalVmid(null)
setTerminalVmName("")
}}
vmid={terminalVmid}
vmName={terminalVmName}
/>
)}
</div>
)
}
+66
View File
@@ -0,0 +1,66 @@
{
"_description": "Verified AI models for ProxMenux notifications. Only models listed here will be shown to users. Models are tested to work with the chat/completions API format.",
"_updated": "2026-03-20",
"groq": {
"models": [
"llama-3.3-70b-versatile",
"llama-3.1-70b-versatile",
"llama-3.1-8b-instant",
"llama3-70b-8192",
"llama3-8b-8192",
"mixtral-8x7b-32768",
"gemma2-9b-it"
],
"recommended": "llama-3.3-70b-versatile"
},
"gemini": {
"models": [
"gemini-2.5-flash",
"gemini-2.5-flash-lite",
"gemini-2.5-pro"
],
"recommended": "gemini-2.5-flash",
"_note": "gemini-2.5-flash-lite is cheaper but may struggle with complex prompts. Use with simple/custom prompts.",
"_deprecated": ["gemini-2.0-flash", "gemini-2.0-flash-lite", "gemini-1.5-flash", "gemini-1.0-pro", "gemini-pro"]
},
"openai": {
"models": [
"gpt-4.1-mini",
"gpt-4o-mini"
],
"recommended": "gpt-4o-mini"
},
"anthropic": {
"models": [
"claude-3-5-haiku-latest",
"claude-3-5-sonnet-latest",
"claude-3-opus-latest"
],
"recommended": "claude-3-5-haiku-latest"
},
"openrouter": {
"models": [
"meta-llama/llama-3.3-70b-instruct",
"meta-llama/llama-3.1-70b-instruct",
"meta-llama/llama-3.1-8b-instruct",
"anthropic/claude-3.5-haiku",
"anthropic/claude-3.5-sonnet",
"google/gemini-flash-2.5-flash-lite",
"openai/gpt-4o-mini",
"mistralai/mistral-7b-instruct",
"mistralai/mixtral-8x7b-instruct"
],
"recommended": "meta-llama/llama-3.3-70b-instruct"
},
"ollama": {
"_note": "Ollama models are local, we don't filter them. User manages their own models.",
"models": [],
"recommended": ""
}
}
+21 -33
View File
@@ -19,29 +19,19 @@ export const API_PORT = process.env.NEXT_PUBLIC_API_PORT || "8008"
*/
export function getApiBaseUrl(): string {
if (typeof window === "undefined") {
console.log("[v0] getApiBaseUrl: Running on server (SSR)")
return ""
}
const { protocol, hostname, port } = window.location
console.log("[v0] getApiBaseUrl - protocol:", protocol, "hostname:", hostname, "port:", port)
// If accessing via standard ports (80/443) or no port, assume we're behind a proxy
// In this case, use relative URLs so the proxy handles routing
const isStandardPort = port === "" || port === "80" || port === "443"
console.log("[v0] getApiBaseUrl - isStandardPort:", isStandardPort)
if (isStandardPort) {
// Behind a proxy - use relative URL
console.log("[v0] getApiBaseUrl: Detected proxy access, using relative URLs")
return ""
} else {
// Direct access - use explicit API port
const baseUrl = `${protocol}//${hostname}:${API_PORT}`
console.log("[v0] getApiBaseUrl: Direct access detected, using:", baseUrl)
return baseUrl
return `${protocol}//${hostname}:${API_PORT}`
}
}
@@ -69,12 +59,7 @@ export function getAuthToken(): string | null {
if (typeof window === "undefined") {
return null
}
const token = localStorage.getItem("proxmenux-auth-token")
console.log(
"[v0] getAuthToken called:",
token ? `Token found (length: ${token.length})` : "No token found in localStorage",
)
return token
return localStorage.getItem("proxmenux-auth-token")
}
/**
@@ -96,19 +81,13 @@ export async function fetchApi<T>(endpoint: string, options?: RequestInit): Prom
if (token) {
headers["Authorization"] = `Bearer ${token}`
console.log("[v0] fetchApi:", endpoint, "- Authorization header ADDED")
} else {
console.log("[v0] fetchApi:", endpoint, "- NO TOKEN - Request will fail if endpoint is protected")
}
try {
const response = await fetch(url, {
...options,
headers,
cache: "no-store",
})
console.log("[v0] fetchApi:", endpoint, "- Response status:", response.status)
const response = await fetch(url, {
...options,
headers,
cache: "no-store",
})
if (!response.ok) {
if (response.status === 401) {
@@ -118,9 +97,18 @@ export async function fetchApi<T>(endpoint: string, options?: RequestInit): Prom
throw new Error(`API request failed: ${response.status} ${response.statusText}`)
}
return response.json()
} catch (error) {
console.error("[v0] fetchApi error for", endpoint, ":", error)
throw error
}
// Check content type to ensure we're getting JSON
const contentType = response.headers.get("content-type")
if (!contentType || !contentType.includes("application/json")) {
const text = await response.text()
console.error("[v0] fetchApi: Expected JSON but got:", contentType, "- Body preview:", text.substring(0, 200))
throw new Error(`Expected JSON response but got ${contentType || "unknown content type"}`)
}
try {
return await response.json()
} catch (jsonError) {
console.error("[v0] fetchApi: JSON parse error for", endpoint, "-", jsonError)
throw new Error(`Invalid JSON response from ${endpoint}`)
}
}
+1 -1
View File
@@ -1,6 +1,6 @@
{
"name": "ProxMenux-Monitor",
"version": "1.0.2",
"version": "1.2.0",
"description": "Proxmox System Monitoring Dashboard",
"private": true,
"scripts": {
@@ -0,0 +1,10 @@
<?xml version="1.0" encoding="UTF-8"?>
<svg id="Layer_2" data-name="Layer 2" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 200.18 69.76">
<g id="Layer_1-2" data-name="Layer 1">
<path d="M114.26.13c-13.19,0-23.88,10.68-23.88,23.88s10.68,23.9,23.88,23.9,23.88-10.68,23.88-23.88h0c-.02-13.19-10.71-23.88-23.88-23.9ZM114.26,38.94c-8.24,0-14.93-6.69-14.93-14.93s6.69-14.93,14.93-14.93,14.93,6.69,14.93,14.93c-.02,8.24-6.71,14.93-14.93,14.93h0Z"/>
<path d="M24.11,0C10.92-.11.13,10.47,0,23.66c-.13,13.19,10.47,23.98,23.66,24.11h8.31v-8.94h-7.86c-8.24.11-15-6.5-15.1-14.74-.11-8.24,6.5-15,14.74-15.1h.34c8.22,0,14.95,6.69,14.95,14.93h0v21.98h0c0,8.18-6.65,14.83-14.81,14.93-3.91-.04-7.63-1.59-10.39-4.38l-6.33,6.31c4.4,4.42,10.34,6.92,16.57,6.99h.32c13.02-.19,23.49-10.75,23.56-23.77v-22.69C47.65,10.35,37.05.02,24.11,0Z"/>
<path d="M191.28,68.74V23.43c-.32-12.96-10.92-23.28-23.88-23.3-13.19-.13-23.98,10.47-24.11,23.66-.13,13.19,10.49,23.98,23.68,24.11h8.31v-8.94h-7.86c-8.24.11-15-6.5-15.1-14.74s6.5-15,14.74-15.1h.34c8.22,0,14.95,6.69,14.95,14.93h0v44.63h0l8.92.06Z"/>
<path d="M54.8,47.9h8.92v-23.88c0-8.24,6.69-14.93,14.93-14.93,2.72,0,5.25.72,7.46,2l4.48-7.75c-3.5-2.02-7.58-3.19-11.92-3.19-13.19,0-23.88,10.68-23.88,23.88v23.88Z"/>
<path d="M198.01.74c.68.38,1.21.91,1.59,1.59.38.68.57,1.42.57,2.25s-.19,1.57-.59,2.27c-.4.68-.93,1.23-1.61,1.61-.68.4-1.44.59-2.25.59s-1.57-.19-2.25-.59c-.68-.4-1.21-.93-1.59-1.61-.38-.68-.59-1.42-.59-2.25s.19-1.57.59-2.25c.38-.68.93-1.21,1.61-1.61s1.44-.59,2.27-.59c.83,0,1.57.19,2.25.59ZM197.57,7.75c.55-.32.98-.76,1.3-1.32.32-.55.47-1.17.47-1.85s-.15-1.3-.47-1.85-.74-.98-1.27-1.3c-.55-.32-1.17-.47-1.85-.47s-1.3.17-1.85.49c-.55.32-.98.76-1.3,1.32s-.47,1.17-.47,1.85.15,1.3.47,1.85c.32.55.74,1,1.27,1.32.55.32,1.15.49,1.83.49.7-.04,1.32-.21,1.87-.53ZM197.84,4.82c-.15.25-.38.45-.68.59l1.06,1.64h-1.32l-.91-1.42h-.87v1.42h-1.32V2.17h2.12c.66,0,1.19.15,1.57.47.38.32.57.74.57,1.27,0,.34-.08.66-.23.91ZM195.85,4.65c.3,0,.53-.06.68-.19.17-.13.25-.32.25-.55s-.08-.42-.25-.57-.4-.19-.68-.19h-.74v1.53h.74v-.02Z"/>
</g>
</svg>

After

Width:  |  Height:  |  Size: 2.0 KiB

@@ -0,0 +1,17 @@
<?xml version="1.0" encoding="UTF-8"?>
<svg id="Layer_2" data-name="Layer 2" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 200.18 69.76">
<defs>
<style>
.cls-1 {
fill: #fff;
}
</style>
</defs>
<g id="Layer_1-2" data-name="Layer 1">
<path class="cls-1" d="M114.26.13c-13.19,0-23.88,10.68-23.88,23.88s10.68,23.9,23.88,23.9,23.88-10.68,23.88-23.88h0c-.02-13.19-10.71-23.88-23.88-23.9ZM114.26,38.94c-8.24,0-14.93-6.69-14.93-14.93s6.69-14.93,14.93-14.93,14.93,6.69,14.93,14.93c-.02,8.24-6.71,14.93-14.93,14.93h0Z"/>
<path class="cls-1" d="M24.11,0C10.92-.11.13,10.47,0,23.66c-.13,13.19,10.47,23.98,23.66,24.11h8.31v-8.94h-7.86c-8.24.11-15-6.5-15.1-14.74-.11-8.24,6.5-15,14.74-15.1h.34c8.22,0,14.95,6.69,14.95,14.93h0v21.98h0c0,8.18-6.65,14.83-14.81,14.93-3.91-.04-7.63-1.59-10.39-4.38l-6.33,6.31c4.4,4.42,10.34,6.92,16.57,6.99h.32c13.02-.19,23.49-10.75,23.56-23.77v-22.69C47.65,10.35,37.05.02,24.11,0Z"/>
<path class="cls-1" d="M191.28,68.74V23.43c-.32-12.96-10.92-23.28-23.88-23.3-13.19-.13-23.98,10.47-24.11,23.66-.13,13.19,10.49,23.98,23.68,24.11h8.31v-8.94h-7.86c-8.24.11-15-6.5-15.1-14.74s6.5-15,14.74-15.1h.34c8.22,0,14.95,6.69,14.95,14.93h0v44.63h0l8.92.06Z"/>
<path class="cls-1" d="M54.8,47.9h8.92v-23.88c0-8.24,6.69-14.93,14.93-14.93,2.72,0,5.25.72,7.46,2l4.48-7.75c-3.5-2.02-7.58-3.19-11.92-3.19-13.19,0-23.88,10.68-23.88,23.88v23.88Z"/>
<path class="cls-1" d="M198.01.74c.68.38,1.21.91,1.59,1.59.38.68.57,1.42.57,2.25s-.19,1.57-.59,2.27c-.4.68-.93,1.23-1.61,1.61-.68.4-1.44.59-2.25.59s-1.57-.19-2.25-.59c-.68-.4-1.21-.93-1.59-1.61-.38-.68-.59-1.42-.59-2.25s.19-1.57.59-2.25c.38-.68.93-1.21,1.61-1.61s1.44-.59,2.27-.59c.83,0,1.57.19,2.25.59ZM197.57,7.75c.55-.32.98-.76,1.3-1.32.32-.55.47-1.17.47-1.85s-.15-1.3-.47-1.85-.74-.98-1.27-1.3c-.55-.32-1.17-.47-1.85-.47s-1.3.17-1.85.49c-.55.32-.98.76-1.3,1.32s-.47,1.17-.47,1.85.15,1.3.47,1.85c.32.55.74,1,1.27,1.32.55.32,1.15.49,1.83.49.7-.04,1.32-.21,1.87-.53ZM197.84,4.82c-.15.25-.38.45-.68.59l1.06,1.64h-1.32l-.91-1.42h-.87v1.42h-1.32V2.17h2.12c.66,0,1.19.15,1.57.47.38.32.57.74.57,1.27,0,.34-.08.66-.23.91ZM195.85,4.65c.3,0,.53-.06.68-.19.17-.13.25-.32.25-.55s-.08-.42-.25-.57-.4-.19-.68-.19h-.74v1.53h.74v-.02Z"/>
</g>
</svg>

After

Width:  |  Height:  |  Size: 2.2 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 30 KiB

+390
View File
@@ -0,0 +1,390 @@
#!/usr/bin/env python3
"""
AI Context Enrichment Module
Enriches notification context with additional information to help AI provide
more accurate and helpful responses:
1. Event frequency - how often this error has occurred
2. System uptime - helps distinguish startup issues from runtime failures
3. SMART disk data - for disk-related errors
4. Known error matching - from proxmox_known_errors database
Author: MacRimi
"""
import os
import re
import subprocess
from datetime import datetime, timedelta
from typing import Optional, Dict, Any
import sqlite3
from pathlib import Path
# Import known errors database
try:
from proxmox_known_errors import get_error_context, find_matching_error
except ImportError:
def get_error_context(*args, **kwargs):
return None
def find_matching_error(*args, **kwargs):
return None
DB_PATH = Path('/usr/local/share/proxmenux/health_monitor.db')
def get_system_uptime() -> str:
"""Get system uptime in human-readable format.
Returns:
String like "2 minutes (recently booted)" or "89 days, 4 hours (stable system)"
"""
try:
with open('/proc/uptime', 'r') as f:
uptime_seconds = float(f.readline().split()[0])
days = int(uptime_seconds // 86400)
hours = int((uptime_seconds % 86400) // 3600)
minutes = int((uptime_seconds % 3600) // 60)
# Build human-readable string
parts = []
if days > 0:
parts.append(f"{days} day{'s' if days != 1 else ''}")
if hours > 0:
parts.append(f"{hours} hour{'s' if hours != 1 else ''}")
if not parts: # Less than an hour
parts.append(f"{minutes} minute{'s' if minutes != 1 else ''}")
uptime_str = ", ".join(parts)
# Add context hint
if uptime_seconds < 600: # Less than 10 minutes
return f"{uptime_str} (just booted - likely startup issue)"
elif uptime_seconds < 3600: # Less than 1 hour
return f"{uptime_str} (recently booted)"
elif days >= 30:
return f"{uptime_str} (stable system)"
else:
return uptime_str
except Exception:
return "unknown"
def get_event_frequency(error_id: str = None, error_key: str = None,
category: str = None, hours: int = 24) -> Optional[Dict[str, Any]]:
"""Get frequency information for an error from the database.
Args:
error_id: Specific error ID to look up
error_key: Alternative error key
category: Error category
hours: Time window to check (default 24h)
Returns:
Dict with frequency info or None
"""
if not DB_PATH.exists():
return None
try:
conn = sqlite3.connect(str(DB_PATH), timeout=5)
cursor = conn.cursor()
# Try to find the error
if error_id:
cursor.execute('''
SELECT first_seen, last_seen, occurrences, category
FROM errors WHERE error_key = ? OR error_id = ?
ORDER BY last_seen DESC LIMIT 1
''', (error_id, error_id))
elif error_key:
cursor.execute('''
SELECT first_seen, last_seen, occurrences, category
FROM errors WHERE error_key = ?
ORDER BY last_seen DESC LIMIT 1
''', (error_key,))
elif category:
cursor.execute('''
SELECT first_seen, last_seen, occurrences, category
FROM errors WHERE category = ? AND resolved_at IS NULL
ORDER BY last_seen DESC LIMIT 1
''', (category,))
else:
conn.close()
return None
row = cursor.fetchone()
conn.close()
if not row:
return None
first_seen, last_seen, occurrences, cat = row
# Calculate age
try:
first_dt = datetime.fromisoformat(first_seen) if first_seen else None
last_dt = datetime.fromisoformat(last_seen) if last_seen else None
now = datetime.now()
result = {
'occurrences': occurrences or 1,
'category': cat
}
if first_dt:
age = now - first_dt
if age.total_seconds() < 3600:
result['first_seen_ago'] = f"{int(age.total_seconds() / 60)} minutes ago"
elif age.total_seconds() < 86400:
result['first_seen_ago'] = f"{int(age.total_seconds() / 3600)} hours ago"
else:
result['first_seen_ago'] = f"{age.days} days ago"
if last_dt and first_dt and occurrences and occurrences > 1:
# Calculate average interval
span = (last_dt - first_dt).total_seconds()
if span > 0 and occurrences > 1:
avg_interval = span / (occurrences - 1)
if avg_interval < 60:
result['pattern'] = f"recurring every ~{int(avg_interval)} seconds"
elif avg_interval < 3600:
result['pattern'] = f"recurring every ~{int(avg_interval / 60)} minutes"
else:
result['pattern'] = f"recurring every ~{int(avg_interval / 3600)} hours"
return result
except (ValueError, TypeError):
return {'occurrences': occurrences or 1, 'category': cat}
except Exception as e:
print(f"[AIContext] Error getting frequency: {e}")
return None
def get_smart_data(disk_device: str) -> Optional[str]:
"""Get SMART health data for a disk.
Args:
disk_device: Device path like /dev/sda or just sda
Returns:
Formatted SMART summary or None
"""
if not disk_device:
return None
# Normalize device path
if not disk_device.startswith('/dev/'):
disk_device = f'/dev/{disk_device}'
# Check device exists
if not os.path.exists(disk_device):
return None
try:
# Get health status
result = subprocess.run(
['smartctl', '-H', disk_device],
capture_output=True, text=True, timeout=10
)
health_status = "UNKNOWN"
if "PASSED" in result.stdout:
health_status = "PASSED"
elif "FAILED" in result.stdout:
health_status = "FAILED"
# Get key attributes
result = subprocess.run(
['smartctl', '-A', disk_device],
capture_output=True, text=True, timeout=10
)
attributes = {}
critical_attrs = [
'Reallocated_Sector_Ct', 'Current_Pending_Sector',
'Offline_Uncorrectable', 'UDMA_CRC_Error_Count',
'Reallocated_Event_Count', 'Reported_Uncorrect'
]
for line in result.stdout.split('\n'):
for attr in critical_attrs:
if attr in line:
parts = line.split()
# Typical format: ID ATTRIBUTE_NAME FLAGS VALUE WORST THRESH TYPE UPDATED RAW_VALUE
if len(parts) >= 10:
raw_value = parts[-1]
attributes[attr] = raw_value
# Build summary
lines = [f"SMART Health: {health_status}"]
# Add critical attributes if non-zero
for attr, value in attributes.items():
try:
if int(value) > 0:
lines.append(f" {attr}: {value}")
except ValueError:
pass
return "\n".join(lines) if len(lines) > 1 or health_status == "FAILED" else f"SMART Health: {health_status}"
except subprocess.TimeoutExpired:
return None
except FileNotFoundError:
# smartctl not installed
return None
except Exception:
return None
def extract_disk_device(text: str) -> Optional[str]:
"""Extract disk device name from error text.
Args:
text: Error message or log content
Returns:
Device name like 'sda' or None
"""
if not text:
return None
# Common patterns for disk devices in errors
patterns = [
r'/dev/(sd[a-z]\d*)',
r'/dev/(nvme\d+n\d+(?:p\d+)?)',
r'/dev/(hd[a-z]\d*)',
r'/dev/(vd[a-z]\d*)',
r'\b(sd[a-z])\b',
r'disk[_\s]+(sd[a-z])',
r'ata\d+\.\d+: (sd[a-z])',
]
for pattern in patterns:
match = re.search(pattern, text, re.IGNORECASE)
if match:
return match.group(1)
return None
def enrich_context_for_ai(
title: str,
body: str,
event_type: str,
data: Dict[str, Any],
journal_context: str = '',
detail_level: str = 'standard'
) -> str:
"""Build enriched context string for AI processing.
Combines:
- Original journal context
- Event frequency information
- System uptime
- SMART data (for disk errors)
- Known error matching
Args:
title: Notification title
body: Notification body
event_type: Type of event
data: Event data dict
journal_context: Original journal log context
detail_level: Level of detail (minimal, standard, detailed)
Returns:
Enriched context string
"""
context_parts = []
combined_text = f"{title} {body} {journal_context}"
# 1. System uptime - ONLY for critical system-level failures
# Uptime helps distinguish startup issues from runtime failures
# BUT it's noise for disk errors, warnings, or routine operations
# Only include for: system crash, kernel panic, OOM, cluster failures
uptime_critical_types = [
'crash', 'panic', 'oom', 'kernel',
'split_brain', 'quorum_lost', 'node_offline', 'node_fail',
'system_fail', 'boot_fail'
]
# Check if this is a critical system-level event (not disk/service/hardware)
event_lower = event_type.lower()
is_critical_system_event = any(t in event_lower for t in uptime_critical_types)
# Only add uptime for critical system failures, nothing else
if is_critical_system_event:
uptime = get_system_uptime()
if uptime and uptime != "unknown":
context_parts.append(f"System uptime: {uptime}")
# 2. Event frequency
error_key = data.get('error_key') or data.get('error_id')
category = data.get('category')
freq = get_event_frequency(error_id=error_key, category=category)
if freq:
freq_line = f"Event frequency: {freq.get('occurrences', 1)} occurrence(s)"
if freq.get('first_seen_ago'):
freq_line += f", first seen {freq['first_seen_ago']}"
if freq.get('pattern'):
freq_line += f", {freq['pattern']}"
context_parts.append(freq_line)
# 3. SMART data for disk-related events
disk_related = any(x in event_type.lower() for x in ['disk', 'smart', 'storage', 'io_error'])
if not disk_related:
disk_related = any(x in combined_text.lower() for x in ['disk', 'smart', '/dev/sd', 'ata', 'i/o error'])
if disk_related:
disk_device = extract_disk_device(combined_text)
if disk_device:
smart_data = get_smart_data(disk_device)
if smart_data:
context_parts.append(smart_data)
# 4. Known error matching
known_error_ctx = get_error_context(combined_text, category=category, detail_level=detail_level)
if known_error_ctx:
context_parts.append(known_error_ctx)
# 5. Add original journal context
if journal_context:
context_parts.append(f"Journal logs:\n{journal_context}")
# Combine all parts
if context_parts:
return "\n\n".join(context_parts)
return journal_context or ""
def get_enriched_context(
event: 'NotificationEvent',
detail_level: str = 'standard'
) -> str:
"""Convenience function to enrich context from a NotificationEvent.
Args:
event: NotificationEvent object
detail_level: Level of detail
Returns:
Enriched context string
"""
journal_context = event.data.get('_journal_context', '')
return enrich_context_for_ai(
title=event.data.get('title', ''),
body=event.data.get('body', event.data.get('message', '')),
event_type=event.event_type,
data=event.data,
journal_context=journal_context,
detail_level=detail_level
)
+106
View File
@@ -0,0 +1,106 @@
"""AI Providers for ProxMenux notification enhancement.
This module provides a pluggable architecture for different AI providers
to enhance and translate notification messages.
Supported providers:
- Groq: Fast inference, generous free tier (30 req/min)
- OpenAI: Industry standard, widely used
- Anthropic: Excellent for text generation, Claude Haiku is fast and affordable
- Gemini: Google's model, free tier available, good quality/price ratio
- Ollama: 100% local execution, no costs, complete privacy
- OpenRouter: Aggregator with access to 100+ models using a single API key
"""
from .base import AIProvider, AIProviderError
from .groq_provider import GroqProvider
from .openai_provider import OpenAIProvider
from .anthropic_provider import AnthropicProvider
from .gemini_provider import GeminiProvider
from .ollama_provider import OllamaProvider
from .openrouter_provider import OpenRouterProvider
PROVIDERS = {
'groq': GroqProvider,
'openai': OpenAIProvider,
'anthropic': AnthropicProvider,
'gemini': GeminiProvider,
'ollama': OllamaProvider,
'openrouter': OpenRouterProvider,
}
# Provider metadata for UI display
# Note: No hardcoded models - users load models dynamically from each provider
PROVIDER_INFO = {
'groq': {
'name': 'Groq',
'description': 'Fast inference, generous free tier (30 req/min). Ideal to get started.',
'requires_api_key': True,
},
'openai': {
'name': 'OpenAI',
'description': 'Industry standard. Very accurate and widely used.',
'requires_api_key': True,
},
'anthropic': {
'name': 'Anthropic (Claude)',
'description': 'Excellent for writing and translation. Fast and affordable.',
'requires_api_key': True,
},
'gemini': {
'name': 'Google Gemini',
'description': 'Free tier available, very good quality/price ratio.',
'requires_api_key': True,
},
'ollama': {
'name': 'Ollama (Local)',
'description': '100% local execution. No costs, complete privacy, no internet required.',
'requires_api_key': False,
},
'openrouter': {
'name': 'OpenRouter',
'description': 'Aggregator with access to 100+ models using a single API key. Maximum flexibility.',
'requires_api_key': True,
},
}
def get_provider(name: str, **kwargs) -> AIProvider:
"""Factory function to get provider instance.
Args:
name: Provider name (groq, openai, anthropic, gemini, ollama, openrouter)
**kwargs: Provider-specific arguments (api_key, model, base_url)
Returns:
AIProvider instance
Raises:
AIProviderError: If provider name is unknown
"""
if name not in PROVIDERS:
raise AIProviderError(f"Unknown provider: {name}. Available: {list(PROVIDERS.keys())}")
return PROVIDERS[name](**kwargs)
def get_provider_info(name: str = None) -> dict:
"""Get provider metadata for UI display.
Args:
name: Optional provider name. If None, returns all providers info.
Returns:
Provider info dict or dict of all providers
"""
if name:
return PROVIDER_INFO.get(name, {})
return PROVIDER_INFO
__all__ = [
'AIProvider',
'AIProviderError',
'PROVIDERS',
'PROVIDER_INFO',
'get_provider',
'get_provider_info',
]
@@ -0,0 +1,80 @@
"""Anthropic (Claude) provider implementation.
Anthropic's Claude models are excellent for text generation and translation.
Models use "-latest" aliases that auto-update to newest versions.
"""
from typing import Optional, List
from .base import AIProvider, AIProviderError
class AnthropicProvider(AIProvider):
"""Anthropic provider using their Messages API."""
NAME = "anthropic"
REQUIRES_API_KEY = True
API_URL = "https://api.anthropic.com/v1/messages"
API_VERSION = "2023-06-01"
# Known stable model aliases (Anthropic doesn't have a public models list API)
# These use "-latest" which auto-updates to the newest version
KNOWN_MODELS = [
"claude-3-5-haiku-latest",
"claude-3-5-sonnet-latest",
"claude-3-opus-latest",
]
def list_models(self) -> List[str]:
"""Return known Anthropic model aliases.
Anthropic doesn't have a public models list API, but their "-latest"
aliases auto-update to the newest versions, making them reliable choices.
"""
return self.KNOWN_MODELS
def generate(self, system_prompt: str, user_message: str,
max_tokens: int = 200) -> Optional[str]:
"""Generate a response using Anthropic's API.
Note: Anthropic uses a different API format than OpenAI.
The system prompt goes in a separate field, not in messages.
Args:
system_prompt: System instructions
user_message: User message to process
max_tokens: Maximum response length
Returns:
Generated text or None if failed
Raises:
AIProviderError: If API key is missing or request fails
"""
if not self.api_key:
raise AIProviderError("API key required for Anthropic")
# Anthropic uses a different format - system is a top-level field
payload = {
'model': self.model,
'system': system_prompt,
'messages': [
{'role': 'user', 'content': user_message},
],
'max_tokens': max_tokens,
}
headers = {
'Content-Type': 'application/json',
'x-api-key': self.api_key,
'anthropic-version': self.API_VERSION,
}
result = self._make_request(self.API_URL, payload, headers)
try:
# Anthropic returns content as array of content blocks
content = result['content']
if isinstance(content, list) and len(content) > 0:
return content[0].get('text', '').strip()
return str(content).strip()
except (KeyError, IndexError) as e:
raise AIProviderError(f"Unexpected response format: {e}")
+177
View File
@@ -0,0 +1,177 @@
"""Base class for AI providers."""
from abc import ABC, abstractmethod
from typing import Optional, Dict, Any, List
class AIProviderError(Exception):
"""Exception for AI provider errors."""
pass
class AIProvider(ABC):
"""Abstract base class for AI providers.
All provider implementations must inherit from this class and implement
the generate() method.
"""
# Provider metadata (override in subclasses)
NAME = "base"
REQUIRES_API_KEY = True
def __init__(self, api_key: str = "", model: str = "", base_url: str = ""):
"""Initialize the AI provider.
Args:
api_key: API key for authentication (not required for local providers)
model: Model name to use (required - user selects from loaded models)
base_url: Base URL for API calls (used by Ollama and custom endpoints)
"""
self.api_key = api_key
self.model = model # Model must be provided by user after loading from provider
self.base_url = base_url
@abstractmethod
def generate(self, system_prompt: str, user_message: str,
max_tokens: int = 200) -> Optional[str]:
"""Generate a response from the AI model.
Args:
system_prompt: System instructions for the model
user_message: User message/query to process
max_tokens: Maximum tokens in the response
Returns:
Generated text or None if failed
Raises:
AIProviderError: If there's an error communicating with the provider
"""
pass
def test_connection(self) -> Dict[str, Any]:
"""Test the connection to the AI provider.
Sends a simple test message to verify the provider is accessible
and the API key is valid.
Returns:
Dictionary with:
- success: bool indicating if connection succeeded
- message: Human-readable status message
- model: Model name being used
"""
try:
response = self.generate(
system_prompt="You are a test assistant. Respond with exactly: CONNECTION_OK",
user_message="Test connection",
max_tokens=50 # Some providers (Gemini) need more tokens to return any content
)
if response:
# Check if response contains our expected text
if "CONNECTION_OK" in response.upper() or "CONNECTION" in response.upper():
return {
'success': True,
'message': 'Connection successful',
'model': self.model
}
# Even if different response, connection worked
return {
'success': True,
'message': f'Connected (response received)',
'model': self.model
}
return {
'success': False,
'message': 'No response received from provider',
'model': self.model
}
except AIProviderError as e:
return {
'success': False,
'message': str(e),
'model': self.model
}
except Exception as e:
return {
'success': False,
'message': f'Unexpected error: {str(e)}',
'model': self.model
}
def list_models(self) -> List[str]:
"""List available models from the provider.
Returns:
List of model IDs available for use.
Returns empty list if the provider doesn't support listing.
"""
# Default implementation - subclasses should override
return []
def get_recommended_model(self) -> str:
"""Get the recommended model for this provider.
Checks if the current model is available. If not, returns
the first available model from the provider's model list.
This is fully dynamic - no hardcoded fallback models.
Returns:
Recommended model ID, or empty string if no models available
"""
available = self.list_models()
if not available:
# Can't get model list - keep current model and hope it works
return self.model
# Check if current model is available
if self.model and self.model in available:
return self.model
# Current model not available - return first available model
# Models are typically sorted, so first one is usually a good default
return available[0]
def _make_request(self, url: str, payload: dict, headers: dict,
timeout: int = 15) -> dict:
"""Make HTTP request to AI provider API.
Args:
url: API endpoint URL
payload: JSON payload to send
headers: HTTP headers
timeout: Request timeout in seconds
Returns:
Parsed JSON response
Raises:
AIProviderError: If request fails
"""
import json
import urllib.request
import urllib.error
# Ensure User-Agent is set (Cloudflare blocks requests without it - error 1010)
if 'User-Agent' not in headers:
headers['User-Agent'] = 'ProxMenux/1.0'
data = json.dumps(payload).encode('utf-8')
req = urllib.request.Request(url, data=data, headers=headers, method='POST')
try:
with urllib.request.urlopen(req, timeout=timeout) as resp:
return json.loads(resp.read().decode('utf-8'))
except urllib.error.HTTPError as e:
error_body = ""
try:
error_body = e.read().decode('utf-8')
except Exception:
pass
raise AIProviderError(f"HTTP {e.code}: {error_body or e.reason}")
except urllib.error.URLError as e:
raise AIProviderError(f"Connection error: {e.reason}")
except json.JSONDecodeError as e:
raise AIProviderError(f"Invalid JSON response: {e}")
except Exception as e:
raise AIProviderError(f"Request failed: {str(e)}")
@@ -0,0 +1,181 @@
"""Google Gemini provider implementation.
Google's Gemini models offer a free tier and excellent quality/price ratio.
Models are loaded dynamically from the API - no hardcoded model names.
"""
from typing import Optional, List
import json
import urllib.request
import urllib.error
from .base import AIProvider, AIProviderError
class GeminiProvider(AIProvider):
"""Google Gemini provider using the Generative Language API."""
NAME = "gemini"
REQUIRES_API_KEY = True
API_BASE = "https://generativelanguage.googleapis.com/v1beta/models"
# Patterns to exclude from model list (experimental, preview, specialized)
EXCLUDED_PATTERNS = [
'preview', 'exp', 'experimental', 'computer-use',
'deep-research', 'image', 'embedding', 'aqa', 'tts',
'learnlm', 'imagen', 'veo'
]
# Deprecated models that may still appear in API but return 404
DEPRECATED_MODELS = [
'gemini-2.0-flash',
'gemini-1.0-pro',
'gemini-pro',
]
def list_models(self) -> List[str]:
"""List available Gemini models that support generateContent.
Filters to only stable text generation models, excluding:
- Preview/experimental models
- Image generation models
- Embedding models
- Specialized models (computer-use, deep-research, etc.)
Returns:
List of model IDs available for text generation.
"""
if not self.api_key:
return []
try:
url = f"{self.API_BASE}?key={self.api_key}"
req = urllib.request.Request(url, method='GET', headers={'User-Agent': 'ProxMenux/1.0'})
with urllib.request.urlopen(req, timeout=10) as resp:
data = json.loads(resp.read().decode('utf-8'))
models = []
for model in data.get('models', []):
model_name = model.get('name', '')
# Extract just the model ID (e.g., "models/gemini-pro" -> "gemini-pro")
if model_name.startswith('models/'):
model_id = model_name[7:]
else:
model_id = model_name
# Only include models that support generateContent
supported_methods = model.get('supportedGenerationMethods', [])
if 'generateContent' not in supported_methods:
continue
# Exclude experimental, preview, and specialized models
model_lower = model_id.lower()
if any(pattern in model_lower for pattern in self.EXCLUDED_PATTERNS):
continue
# Exclude deprecated models that return 404
if model_id in self.DEPRECATED_MODELS:
continue
models.append(model_id)
# Sort with recommended models first (flash-lite, flash, pro)
def sort_key(m):
m_lower = m.lower()
if 'flash-lite' in m_lower:
return (0, m) # Best for notifications (fast, cheap)
if 'flash' in m_lower:
return (1, m)
if 'pro' in m_lower:
return (2, m)
return (3, m)
return sorted(models, key=sort_key)
except Exception as e:
print(f"[GeminiProvider] Failed to list models: {e}")
return []
def generate(self, system_prompt: str, user_message: str,
max_tokens: int = 200) -> Optional[str]:
"""Generate a response using Google's Gemini API.
Note: Gemini uses a different API format. System instructions
go in a separate systemInstruction field.
Args:
system_prompt: System instructions
user_message: User message to process
max_tokens: Maximum response length
Returns:
Generated text or None if failed
Raises:
AIProviderError: If API key is missing or request fails
"""
if not self.api_key:
raise AIProviderError("API key required for Gemini")
url = f"{self.API_BASE}/{self.model}:generateContent?key={self.api_key}"
# Gemini uses a specific format with contents array
payload = {
'systemInstruction': {
'parts': [{'text': system_prompt}]
},
'contents': [
{
'role': 'user',
'parts': [{'text': user_message}]
}
],
'generationConfig': {
'maxOutputTokens': max_tokens,
'temperature': 0.3,
}
}
headers = {
'Content-Type': 'application/json',
}
result = self._make_request(url, payload, headers)
try:
# Gemini returns candidates array with content parts
candidates = result.get('candidates', [])
if not candidates:
# Check for blocked content or other issues
prompt_feedback = result.get('promptFeedback', {})
block_reason = prompt_feedback.get('blockReason', '')
if block_reason:
raise AIProviderError(f"Content blocked by Gemini: {block_reason}")
raise AIProviderError("No candidates in response - model may be overloaded")
# Check if response was blocked
finish_reason = candidates[0].get('finishReason', '')
if finish_reason == 'SAFETY':
safety_ratings = candidates[0].get('safetyRatings', [])
blocked_categories = [r.get('category', 'UNKNOWN') for r in safety_ratings
if r.get('blocked', False)]
raise AIProviderError(f"Response blocked by safety filter: {blocked_categories}")
content = candidates[0].get('content', {})
parts = content.get('parts', [])
if parts:
text = parts[0].get('text', '').strip()
if text:
return text
# No text content - check if it's a known issue
if finish_reason == 'MAX_TOKENS':
# MAX_TOKENS with no content could mean prompt too long OR model overload
raise AIProviderError("No response generated (MAX_TOKENS). Model may be overloaded - try again.")
elif finish_reason == 'STOP':
# Normal stop but no content - unusual
raise AIProviderError("Model returned empty response")
else:
raise AIProviderError(f"No response from model (reason: {finish_reason}). Try again later.")
except AIProviderError:
raise
except (KeyError, IndexError) as e:
raise AIProviderError(f"Unexpected response format: {e}")
@@ -0,0 +1,116 @@
"""Groq AI provider implementation.
Groq provides fast inference with a generous free tier (30 requests/minute).
Uses the OpenAI-compatible API format.
"""
from typing import Optional, List
import json
import urllib.request
import urllib.error
from .base import AIProvider, AIProviderError
class GroqProvider(AIProvider):
"""Groq AI provider using their OpenAI-compatible API."""
NAME = "groq"
REQUIRES_API_KEY = True
API_URL = "https://api.groq.com/openai/v1/chat/completions"
MODELS_URL = "https://api.groq.com/openai/v1/models"
# Exclude non-chat models
EXCLUDED_PATTERNS = ['whisper', 'tts', 'guard', 'tool-use']
# Recommended models (in priority order - versatile/large models first)
RECOMMENDED_PREFIXES = ['llama-3.3', 'llama-3.1-70b', 'llama-3.1-8b', 'mixtral', 'gemma']
def list_models(self) -> List[str]:
"""List available Groq models for chat completions.
Filters out non-chat models (whisper, guard, etc.)
Returns:
List of model IDs suitable for chat completions.
"""
if not self.api_key:
return []
try:
req = urllib.request.Request(
self.MODELS_URL,
headers={
'Authorization': f'Bearer {self.api_key}',
'User-Agent': 'ProxMenux/1.0' # Cloudflare blocks requests without User-Agent
},
method='GET'
)
with urllib.request.urlopen(req, timeout=10) as resp:
data = json.loads(resp.read().decode('utf-8'))
models = []
for model in data.get('data', []):
model_id = model.get('id', '')
if not model_id:
continue
model_lower = model_id.lower()
# Exclude non-chat models
if any(pattern in model_lower for pattern in self.EXCLUDED_PATTERNS):
continue
models.append(model_id)
# Sort with recommended models first
def sort_key(m):
m_lower = m.lower()
for i, prefix in enumerate(self.RECOMMENDED_PREFIXES):
if m_lower.startswith(prefix):
return (i, m)
return (len(self.RECOMMENDED_PREFIXES), m)
return sorted(models, key=sort_key)
except Exception as e:
print(f"[GroqProvider] Failed to list models: {e}")
return []
def generate(self, system_prompt: str, user_message: str,
max_tokens: int = 200) -> Optional[str]:
"""Generate a response using Groq's API.
Args:
system_prompt: System instructions
user_message: User message to process
max_tokens: Maximum response length
Returns:
Generated text or None if failed
Raises:
AIProviderError: If API key is missing or request fails
"""
if not self.api_key:
raise AIProviderError("API key required for Groq")
payload = {
'model': self.model,
'messages': [
{'role': 'system', 'content': system_prompt},
{'role': 'user', 'content': user_message},
],
'max_tokens': max_tokens,
'temperature': 0.3,
}
headers = {
'Content-Type': 'application/json',
'Authorization': f'Bearer {self.api_key}',
}
result = self._make_request(self.API_URL, payload, headers)
try:
return result['choices'][0]['message']['content'].strip()
except (KeyError, IndexError) as e:
raise AIProviderError(f"Unexpected response format: {e}")
@@ -0,0 +1,149 @@
"""Ollama provider implementation.
Ollama enables 100% local AI execution with no costs and complete privacy.
No internet connection required - perfect for sensitive enterprise environments.
"""
from typing import Optional
from .base import AIProvider, AIProviderError
class OllamaProvider(AIProvider):
"""Ollama provider for local AI execution."""
NAME = "ollama"
REQUIRES_API_KEY = False
DEFAULT_URL = "http://localhost:11434"
def __init__(self, api_key: str = "", model: str = "", base_url: str = ""):
"""Initialize Ollama provider.
Args:
api_key: Not used for Ollama (local execution)
model: Model name (user must select from loaded models)
base_url: Ollama server URL (default: http://localhost:11434)
"""
super().__init__(api_key, model, base_url)
# Use default URL if not provided
if not self.base_url:
self.base_url = self.DEFAULT_URL
def generate(self, system_prompt: str, user_message: str,
max_tokens: int = 200) -> Optional[str]:
"""Generate a response using local Ollama server.
Args:
system_prompt: System instructions
user_message: User message to process
max_tokens: Maximum response length (maps to num_predict)
Returns:
Generated text or None if failed
Raises:
AIProviderError: If Ollama server is unreachable
"""
url = f"{self.base_url.rstrip('/')}/api/chat"
payload = {
'model': self.model,
'messages': [
{'role': 'system', 'content': system_prompt},
{'role': 'user', 'content': user_message},
],
'stream': False,
'options': {
'num_predict': max_tokens,
'temperature': 0.3,
}
}
headers = {
'Content-Type': 'application/json',
}
# Cloud models (e.g., kimi-k2.5:cloud, minimax-m2.7:cloud) need longer timeout
# because requests go through: ProxMenux -> Ollama -> Cloud Provider -> back
# Local models also need generous timeout for slower hardware (e.g., low-end CPUs,
# no GPU acceleration, larger models like 8B parameters)
is_cloud_model = ':cloud' in self.model.lower()
timeout = 120 if is_cloud_model else 90 # 2 minutes for cloud, 90s for local
try:
result = self._make_request(url, payload, headers, timeout=timeout)
except AIProviderError as e:
if "Connection" in str(e) or "refused" in str(e).lower():
raise AIProviderError(
f"Cannot connect to Ollama at {self.base_url}. "
"Make sure Ollama is running (ollama serve)"
)
raise
try:
message = result.get('message', {})
return message.get('content', '').strip()
except (KeyError, AttributeError) as e:
raise AIProviderError(f"Unexpected response format: {e}")
def test_connection(self):
"""Test connection to Ollama server.
Also checks if the specified model is available.
"""
import json
import urllib.request
import urllib.error
# First check if server is running
try:
url = f"{self.base_url.rstrip('/')}/api/tags"
req = urllib.request.Request(url, method='GET', headers={'User-Agent': 'ProxMenux/1.0'})
with urllib.request.urlopen(req, timeout=5) as resp:
data = json.loads(resp.read().decode('utf-8'))
# Get full model names (with tags) for comparison
full_model_names = [m.get('name', '') for m in data.get('models', [])]
# Also get base names (without tags) for fallback matching
base_model_names = [name.split(':')[0] for name in full_model_names]
# Check if the requested model matches any available model
# Match by: exact name, base name, or requested model without tag
requested_base = self.model.split(':')[0] if ':' in self.model else self.model
model_found = (
self.model in full_model_names or # Exact match (e.g., "llama3.2:latest")
self.model in base_model_names or # Base name match (e.g., "llama3.2")
requested_base in base_model_names # Requested base matches available base
)
if not model_found:
display_models = full_model_names[:5] if full_model_names else ['none']
return {
'success': False,
'message': f"Model '{self.model}' not found. Available: {', '.join(display_models)}{'...' if len(full_model_names) > 5 else ''}",
'model': self.model
}
except urllib.error.URLError:
return {
'success': False,
'message': f"Cannot connect to Ollama at {self.base_url}. Make sure Ollama is running.",
'model': self.model
}
except Exception as e:
return {
'success': False,
'message': f"Error checking Ollama: {str(e)}",
'model': self.model
}
# If server is up and model exists, do the actual test
# For cloud models, we skip the full test (which sends a message)
# because it would take too long. The model availability check above is sufficient.
is_cloud_model = ':cloud' in self.model.lower()
if is_cloud_model:
return {
'success': True,
'message': f"Cloud model '{self.model}' is available via Ollama",
'model': self.model
}
return super().test_connection()
@@ -0,0 +1,158 @@
"""OpenAI provider implementation.
OpenAI is the industry standard for AI APIs.
Models are loaded dynamically from the API.
"""
from typing import Optional, List
import json
import urllib.request
import urllib.error
from .base import AIProvider, AIProviderError
class OpenAIProvider(AIProvider):
"""OpenAI provider using their Chat Completions API.
Also compatible with OpenAI-compatible APIs like:
- BytePlus/ByteDance (Kimi K2.5)
- LocalAI
- LM Studio
- vLLM
- Together AI
- Any OpenAI-compatible endpoint
"""
NAME = "openai"
REQUIRES_API_KEY = True
DEFAULT_API_URL = "https://api.openai.com/v1/chat/completions"
DEFAULT_MODELS_URL = "https://api.openai.com/v1/models"
# Models to exclude (not suitable for chat/text generation)
EXCLUDED_PATTERNS = [
'embedding', 'whisper', 'tts', 'dall-e', 'image',
'instruct', 'realtime', 'audio', 'moderation',
'search', 'code-search', 'text-similarity', 'babbage', 'davinci',
'curie', 'ada', 'transcribe'
]
# Recommended models for chat (in priority order)
RECOMMENDED_PREFIXES = ['gpt-4o-mini', 'gpt-4o', 'gpt-4-turbo', 'gpt-4', 'gpt-3.5-turbo']
def list_models(self) -> List[str]:
"""List available OpenAI models for chat completions.
Filters to only chat-capable models, excluding:
- Embedding models
- Audio/speech models (whisper, tts)
- Image models (dall-e)
- Instruct models (different API)
- Legacy models (babbage, davinci, etc.)
Returns:
List of model IDs suitable for chat completions.
"""
if not self.api_key:
return []
try:
# Determine models URL from base_url if set
if self.base_url:
base = self.base_url.rstrip('/')
if not base.endswith('/v1'):
base = f"{base}/v1"
models_url = f"{base}/models"
else:
models_url = self.DEFAULT_MODELS_URL
req = urllib.request.Request(
models_url,
headers={'Authorization': f'Bearer {self.api_key}'},
method='GET'
)
with urllib.request.urlopen(req, timeout=10) as resp:
data = json.loads(resp.read().decode('utf-8'))
models = []
for model in data.get('data', []):
model_id = model.get('id', '')
if not model_id:
continue
model_lower = model_id.lower()
# Must be a GPT model
if 'gpt' not in model_lower:
continue
# Exclude non-chat models
if any(pattern in model_lower for pattern in self.EXCLUDED_PATTERNS):
continue
models.append(model_id)
# Sort with recommended models first
def sort_key(m):
m_lower = m.lower()
for i, prefix in enumerate(self.RECOMMENDED_PREFIXES):
if m_lower.startswith(prefix):
return (i, m)
return (len(self.RECOMMENDED_PREFIXES), m)
return sorted(models, key=sort_key)
except Exception as e:
print(f"[OpenAIProvider] Failed to list models: {e}")
return []
def _get_api_url(self) -> str:
"""Get the API URL, using custom base_url if provided."""
if self.base_url:
# Ensure the URL ends with the correct path
base = self.base_url.rstrip('/')
if not base.endswith('/chat/completions'):
if not base.endswith('/v1'):
base = f"{base}/v1"
base = f"{base}/chat/completions"
return base
return self.DEFAULT_API_URL
def generate(self, system_prompt: str, user_message: str,
max_tokens: int = 200) -> Optional[str]:
"""Generate a response using OpenAI's API or compatible endpoint.
Args:
system_prompt: System instructions
user_message: User message to process
max_tokens: Maximum response length
Returns:
Generated text or None if failed
Raises:
AIProviderError: If API key is missing or request fails
"""
if not self.api_key:
raise AIProviderError("API key required for OpenAI")
payload = {
'model': self.model,
'messages': [
{'role': 'system', 'content': system_prompt},
{'role': 'user', 'content': user_message},
],
'max_tokens': max_tokens,
'temperature': 0.3,
}
headers = {
'Content-Type': 'application/json',
'Authorization': f'Bearer {self.api_key}',
}
api_url = self._get_api_url()
result = self._make_request(api_url, payload, headers)
try:
return result['choices'][0]['message']['content'].strip()
except (KeyError, IndexError) as e:
raise AIProviderError(f"Unexpected response format: {e}")
@@ -0,0 +1,123 @@
"""OpenRouter provider implementation.
OpenRouter is an aggregator that provides access to 100+ AI models
using a single API key. Maximum flexibility for choosing models.
Uses OpenAI-compatible API format.
"""
from typing import Optional, List
import json
import urllib.request
import urllib.error
from .base import AIProvider, AIProviderError
class OpenRouterProvider(AIProvider):
"""OpenRouter provider for multi-model access."""
NAME = "openrouter"
REQUIRES_API_KEY = True
API_URL = "https://openrouter.ai/api/v1/chat/completions"
MODELS_URL = "https://openrouter.ai/api/v1/models"
# Exclude non-text models
EXCLUDED_PATTERNS = ['image', 'vision', 'audio', 'video', 'embedding', 'moderation']
# Recommended model prefixes (popular, reliable, good for notifications)
RECOMMENDED_PREFIXES = [
'meta-llama/llama-3', 'anthropic/claude', 'google/gemini',
'openai/gpt', 'mistralai/mistral', 'mistralai/mixtral'
]
def list_models(self) -> List[str]:
"""List available OpenRouter models for chat completions.
OpenRouter has 300+ models. This filters to text generation models
and prioritizes popular, reliable options.
Returns:
List of model IDs suitable for text generation.
"""
if not self.api_key:
return []
try:
req = urllib.request.Request(
self.MODELS_URL,
headers={'Authorization': f'Bearer {self.api_key}'},
method='GET'
)
with urllib.request.urlopen(req, timeout=10) as resp:
data = json.loads(resp.read().decode('utf-8'))
models = []
for model in data.get('data', []):
model_id = model.get('id', '')
if not model_id:
continue
model_lower = model_id.lower()
# Exclude non-text models
if any(pattern in model_lower for pattern in self.EXCLUDED_PATTERNS):
continue
models.append(model_id)
# Sort with recommended models first
def sort_key(m):
m_lower = m.lower()
for i, prefix in enumerate(self.RECOMMENDED_PREFIXES):
if m_lower.startswith(prefix):
return (i, m)
return (len(self.RECOMMENDED_PREFIXES), m)
return sorted(models, key=sort_key)
except Exception as e:
print(f"[OpenRouterProvider] Failed to list models: {e}")
return []
def generate(self, system_prompt: str, user_message: str,
max_tokens: int = 200) -> Optional[str]:
"""Generate a response using OpenRouter's API.
OpenRouter uses OpenAI-compatible format with additional
headers for app identification.
Args:
system_prompt: System instructions
user_message: User message to process
max_tokens: Maximum response length
Returns:
Generated text or None if failed
Raises:
AIProviderError: If API key is missing or request fails
"""
if not self.api_key:
raise AIProviderError("API key required for OpenRouter")
payload = {
'model': self.model,
'messages': [
{'role': 'system', 'content': system_prompt},
{'role': 'user', 'content': user_message},
],
'max_tokens': max_tokens,
'temperature': 0.3,
}
headers = {
'Content-Type': 'application/json',
'Authorization': f'Bearer {self.api_key}',
'HTTP-Referer': 'https://github.com/MacRimi/ProxMenux',
'X-Title': 'ProxMenux Monitor',
}
result = self._make_request(self.API_URL, payload, headers)
try:
return result['choices'][0]['message']['content'].strip()
except (KeyError, IndexError) as e:
raise AIProviderError(f"Unexpected response format: {e}")
+303 -4
View File
@@ -57,7 +57,9 @@ def load_auth_config():
"configured": bool,
"totp_enabled": bool, # 2FA enabled flag
"totp_secret": str, # TOTP secret key
"backup_codes": list # List of backup codes
"backup_codes": list, # List of backup codes
"api_tokens": list, # List of stored API token metadata
"revoked_tokens": list # List of revoked token hashes
}
"""
if not AUTH_CONFIG_FILE.exists():
@@ -69,7 +71,9 @@ def load_auth_config():
"configured": False,
"totp_enabled": False,
"totp_secret": None,
"backup_codes": []
"backup_codes": [],
"api_tokens": [],
"revoked_tokens": []
}
try:
@@ -81,6 +85,8 @@ def load_auth_config():
config.setdefault("totp_enabled", False)
config.setdefault("totp_secret", None)
config.setdefault("backup_codes", [])
config.setdefault("api_tokens", [])
config.setdefault("revoked_tokens", [])
return config
except Exception as e:
print(f"Error loading auth config: {e}")
@@ -92,7 +98,9 @@ def load_auth_config():
"configured": False,
"totp_enabled": False,
"totp_secret": None,
"backup_codes": []
"backup_codes": [],
"api_tokens": [],
"revoked_tokens": []
}
@@ -141,11 +149,18 @@ def verify_token(token):
"""
Verify a JWT token
Returns username if valid, None otherwise
Also checks if the token has been revoked
"""
if not JWT_AVAILABLE or not token:
return None
try:
# Check if the token has been revoked
token_hash = hashlib.sha256(token.encode()).hexdigest()
config = load_auth_config()
if token_hash in config.get("revoked_tokens", []):
return None
payload = jwt.decode(token, JWT_SECRET, algorithms=[JWT_ALGORITHM])
return payload.get('username')
except jwt.ExpiredSignatureError:
@@ -156,6 +171,88 @@ def verify_token(token):
return None
def store_api_token_metadata(token, token_name="API Token"):
"""
Store API token metadata (hash, name, creation date) for listing and revocation.
The actual token is never stored - only a hash for identification.
"""
config = load_auth_config()
token_hash = hashlib.sha256(token.encode()).hexdigest()
token_id = token_hash[:16]
token_entry = {
"id": token_id,
"name": token_name,
"token_hash": token_hash,
"token_prefix": token[:12] + "...",
"created_at": datetime.utcnow().isoformat() + "Z",
"expires_at": (datetime.utcnow() + timedelta(days=365)).isoformat() + "Z"
}
config.setdefault("api_tokens", [])
config["api_tokens"].append(token_entry)
save_auth_config(config)
return token_entry
def list_api_tokens():
"""
List all stored API token metadata (no actual tokens are returned).
Returns list of token entries with id, name, prefix, creation and expiration dates.
"""
config = load_auth_config()
tokens = config.get("api_tokens", [])
revoked = set(config.get("revoked_tokens", []))
result = []
for t in tokens:
entry = {
"id": t.get("id"),
"name": t.get("name", "API Token"),
"token_prefix": t.get("token_prefix", "***"),
"created_at": t.get("created_at"),
"expires_at": t.get("expires_at"),
"revoked": t.get("token_hash") in revoked
}
result.append(entry)
return result
def revoke_api_token(token_id):
"""
Revoke an API token by its ID.
Adds the token hash to the revoked list so it fails verification.
Returns (success: bool, message: str)
"""
config = load_auth_config()
tokens = config.get("api_tokens", [])
target = None
for t in tokens:
if t.get("id") == token_id:
target = t
break
if not target:
return False, "Token not found"
token_hash = target.get("token_hash")
config.setdefault("revoked_tokens", [])
if token_hash in config["revoked_tokens"]:
return False, "Token is already revoked"
config["revoked_tokens"].append(token_hash)
# Remove from the active tokens list
config["api_tokens"] = [t for t in tokens if t.get("id") != token_id]
if save_auth_config(config):
return True, "Token revoked successfully"
else:
return False, "Failed to save configuration"
def get_auth_status():
"""
Get current authentication status
@@ -243,6 +340,8 @@ def disable_auth():
config["totp_enabled"] = False
config["totp_secret"] = None
config["backup_codes"] = []
config["api_tokens"] = []
config["revoked_tokens"] = []
if save_auth_config(config):
return True, "Authentication disabled"
@@ -472,6 +571,203 @@ def disable_totp(username, password):
return False, "Failed to disable 2FA"
# -------------------------------------------------------------------
# SSL/HTTPS Certificate Management
# -------------------------------------------------------------------
SSL_CONFIG_FILE = Path(os.environ.get("PROXMENUX_SSL_CONFIG", "/etc/proxmenux/ssl_config.json"))
# Default Proxmox certificate paths
PROXMOX_CERT_PATH = "/etc/pve/local/pve-ssl.pem"
PROXMOX_KEY_PATH = "/etc/pve/local/pve-ssl.key"
def load_ssl_config():
"""Load SSL configuration from file"""
if not SSL_CONFIG_FILE.exists():
return {
"enabled": False,
"cert_path": "",
"key_path": "",
"source": "none" # "none", "proxmox", "custom"
}
try:
with open(SSL_CONFIG_FILE, 'r') as f:
config = json.load(f)
config.setdefault("enabled", False)
config.setdefault("cert_path", "")
config.setdefault("key_path", "")
config.setdefault("source", "none")
return config
except Exception:
return {
"enabled": False,
"cert_path": "",
"key_path": "",
"source": "none"
}
def save_ssl_config(config):
"""Save SSL configuration to file"""
try:
SSL_CONFIG_FILE.parent.mkdir(parents=True, exist_ok=True)
with open(SSL_CONFIG_FILE, 'w') as f:
json.dump(config, f, indent=2)
return True
except Exception as e:
print(f"Error saving SSL config: {e}")
return False
def detect_proxmox_certificates():
"""
Detect available Proxmox certificates.
Returns dict with detection results.
"""
result = {
"proxmox_available": False,
"proxmox_cert": PROXMOX_CERT_PATH,
"proxmox_key": PROXMOX_KEY_PATH,
"cert_info": None
}
if os.path.isfile(PROXMOX_CERT_PATH) and os.path.isfile(PROXMOX_KEY_PATH):
result["proxmox_available"] = True
# Try to get certificate info
try:
import subprocess
cert_output = subprocess.run(
["openssl", "x509", "-in", PROXMOX_CERT_PATH, "-noout", "-subject", "-enddate", "-issuer"],
capture_output=True, text=True, timeout=5
)
if cert_output.returncode == 0:
lines = cert_output.stdout.strip().split('\n')
info = {}
for line in lines:
if line.startswith("subject="):
info["subject"] = line.replace("subject=", "").strip()
elif line.startswith("notAfter="):
info["expires"] = line.replace("notAfter=", "").strip()
elif line.startswith("issuer="):
issuer = line.replace("issuer=", "").strip()
info["issuer"] = issuer
info["is_self_signed"] = info.get("subject", "") == issuer
result["cert_info"] = info
except Exception:
pass
return result
def validate_certificate_files(cert_path, key_path):
"""
Validate that cert and key files exist and are readable.
Returns (valid: bool, message: str)
"""
if not cert_path or not key_path:
return False, "Certificate and key paths are required"
if not os.path.isfile(cert_path):
return False, f"Certificate file not found: {cert_path}"
if not os.path.isfile(key_path):
return False, f"Key file not found: {key_path}"
# Verify files are readable
try:
with open(cert_path, 'r') as f:
content = f.read(100)
if "BEGIN CERTIFICATE" not in content and "BEGIN TRUSTED CERTIFICATE" not in content:
return False, "Certificate file does not appear to be a valid PEM certificate"
with open(key_path, 'r') as f:
content = f.read(100)
if "BEGIN" not in content or "KEY" not in content:
return False, "Key file does not appear to be a valid PEM key"
except PermissionError:
return False, "Cannot read certificate files. Check file permissions."
except Exception as e:
return False, f"Error reading certificate files: {str(e)}"
# Verify cert and key match
try:
import subprocess
cert_mod = subprocess.run(
["openssl", "x509", "-noout", "-modulus", "-in", cert_path],
capture_output=True, text=True, timeout=5
)
key_mod = subprocess.run(
["openssl", "rsa", "-noout", "-modulus", "-in", key_path],
capture_output=True, text=True, timeout=5
)
if cert_mod.returncode == 0 and key_mod.returncode == 0:
if cert_mod.stdout.strip() != key_mod.stdout.strip():
return False, "Certificate and key do not match"
except Exception:
pass # Non-critical, proceed anyway
return True, "Certificate files are valid"
def configure_ssl(cert_path, key_path, source="custom"):
"""
Configure SSL with given certificate and key paths.
Returns (success: bool, message: str)
"""
valid, message = validate_certificate_files(cert_path, key_path)
if not valid:
return False, message
config = {
"enabled": True,
"cert_path": cert_path,
"key_path": key_path,
"source": source
}
if save_ssl_config(config):
return True, "SSL configured successfully. Restart the monitor service to apply changes."
else:
return False, "Failed to save SSL configuration"
def disable_ssl():
"""Disable SSL and return to HTTP"""
config = {
"enabled": False,
"cert_path": "",
"key_path": "",
"source": "none"
}
if save_ssl_config(config):
return True, "SSL disabled. Restart the monitor service to apply changes."
else:
return False, "Failed to save SSL configuration"
def get_ssl_context():
"""
Get SSL context for Flask if SSL is configured and enabled.
Returns tuple (cert_path, key_path) or None
"""
config = load_ssl_config()
if not config.get("enabled"):
return None
cert_path = config.get("cert_path", "")
key_path = config.get("key_path", "")
if cert_path and key_path and os.path.isfile(cert_path) and os.path.isfile(key_path):
return (cert_path, key_path)
return None
def authenticate(username, password, totp_token=None):
"""
Authenticate a user with username, password, and optional TOTP
@@ -490,12 +786,15 @@ def authenticate(username, password, totp_token=None):
if config.get("totp_enabled"):
if not totp_token:
# First step: password OK, now request TOTP code (not a failure)
return False, None, True, "2FA code required"
# Verify TOTP token or backup code
success, message = verify_totp(username, totp_token, use_backup=len(totp_token) == 9) # Backup codes are formatted XXXX-XXXX
if not success:
return False, None, True, message
# TOTP code is wrong: return requires_totp=False so the caller
# logs it as a real authentication failure for Fail2Ban
return False, None, False, "Invalid 2FA code"
token = generate_token(username)
if token:
+42
View File
@@ -89,6 +89,41 @@ cp "$SCRIPT_DIR/flask_terminal_routes.py" "$APP_DIR/usr/bin/" 2>/dev/null || ech
cp "$SCRIPT_DIR/hardware_monitor.py" "$APP_DIR/usr/bin/" 2>/dev/null || echo "⚠️ hardware_monitor.py not found"
cp "$SCRIPT_DIR/proxmox_storage_monitor.py" "$APP_DIR/usr/bin/" 2>/dev/null || echo "⚠️ proxmox_storage_monitor.py not found"
cp "$SCRIPT_DIR/flask_script_runner.py" "$APP_DIR/usr/bin/" 2>/dev/null || echo "⚠️ flask_script_runner.py not found"
cp "$SCRIPT_DIR/security_manager.py" "$APP_DIR/usr/bin/" 2>/dev/null || echo "⚠️ security_manager.py not found"
cp "$SCRIPT_DIR/flask_security_routes.py" "$APP_DIR/usr/bin/" 2>/dev/null || echo "⚠️ flask_security_routes.py not found"
cp "$SCRIPT_DIR/notification_manager.py" "$APP_DIR/usr/bin/" 2>/dev/null || echo "⚠️ notification_manager.py not found"
cp "$SCRIPT_DIR/notification_channels.py" "$APP_DIR/usr/bin/" 2>/dev/null || echo "⚠️ notification_channels.py not found"
cp "$SCRIPT_DIR/notification_templates.py" "$APP_DIR/usr/bin/" 2>/dev/null || echo "⚠️ notification_templates.py not found"
cp "$SCRIPT_DIR/notification_events.py" "$APP_DIR/usr/bin/" 2>/dev/null || echo "⚠️ notification_events.py not found"
cp "$SCRIPT_DIR/proxmox_known_errors.py" "$APP_DIR/usr/bin/" 2>/dev/null || echo "⚠️ proxmox_known_errors.py not found"
cp "$SCRIPT_DIR/ai_context_enrichment.py" "$APP_DIR/usr/bin/" 2>/dev/null || echo "⚠️ ai_context_enrichment.py not found"
cp "$SCRIPT_DIR/startup_grace.py" "$APP_DIR/usr/bin/" 2>/dev/null || echo "⚠️ startup_grace.py not found"
cp "$SCRIPT_DIR/flask_notification_routes.py" "$APP_DIR/usr/bin/" 2>/dev/null || echo "⚠️ flask_notification_routes.py not found"
cp "$SCRIPT_DIR/oci_manager.py" "$APP_DIR/usr/bin/" 2>/dev/null || echo "⚠️ oci_manager.py not found"
cp "$SCRIPT_DIR/flask_oci_routes.py" "$APP_DIR/usr/bin/" 2>/dev/null || echo "⚠️ flask_oci_routes.py not found"
cp "$SCRIPT_DIR/oci/description_templates.py" "$APP_DIR/usr/bin/" 2>/dev/null || echo "⚠️ description_templates.py not found"
# Copy AI providers module for notification enhancement
echo "📋 Copying AI providers module..."
if [ -d "$SCRIPT_DIR/ai_providers" ]; then
mkdir -p "$APP_DIR/usr/bin/ai_providers"
cp "$SCRIPT_DIR/ai_providers/"*.py "$APP_DIR/usr/bin/ai_providers/"
echo "✅ AI providers module copied"
else
echo "⚠️ ai_providers directory not found"
fi
# Copy config files (verified AI models, prompts, etc.)
echo "📋 Copying config files..."
CONFIG_DIR="$APPIMAGE_ROOT/config"
if [ -d "$CONFIG_DIR" ]; then
mkdir -p "$APP_DIR/usr/bin/config"
cp "$CONFIG_DIR/"*.json "$APP_DIR/usr/bin/config/" 2>/dev/null || true
cp "$CONFIG_DIR/"*.txt "$APP_DIR/usr/bin/config/" 2>/dev/null || true
echo "✅ Config files copied"
else
echo "⚠️ config directory not found"
fi
echo "📋 Adding translation support..."
cat > "$APP_DIR/usr/bin/translate_cli.py" << 'PYEOF'
@@ -293,6 +328,7 @@ pip3 install --target "$APP_DIR/usr/lib/python3/dist-packages" \
h11==0.9.0 || true
# Phase 2: Install modern Flask/WebSocket dependencies (will upgrade h11 and related packages)
# Note: cryptography removed due to Python version compatibility issues (PyO3 modules)
pip3 install --target "$APP_DIR/usr/lib/python3/dist-packages" --upgrade --no-deps \
flask \
flask-cors \
@@ -310,6 +346,12 @@ pip3 install --target "$APP_DIR/usr/lib/python3/dist-packages" --upgrade \
simple-websocket>=0.10.0 \
flask-sock>=0.6.0
# Phase 3b: Install gevent for SSL+WebSocket support (WSS)
pip3 install --target "$APP_DIR/usr/lib/python3/dist-packages" --upgrade \
gevent>=24.2.1 \
gevent-websocket>=0.10.1 \
greenlet>=3.0.0
cat > "$APP_DIR/usr/lib/python3/dist-packages/cgi.py" << 'PYEOF'
from typing import Tuple, Dict
try:
+222 -11
View File
@@ -3,11 +3,48 @@ Flask Authentication Routes
Provides REST API endpoints for authentication management
"""
import logging
import logging.handlers
import os
import subprocess
import threading
import time
from flask import Blueprint, jsonify, request
import auth_manager
import jwt
import datetime
# Dedicated logger for auth failures (Fail2Ban reads this file)
auth_logger = logging.getLogger("proxmenux-auth")
auth_logger.setLevel(logging.WARNING)
# Handler 1: File for Fail2Ban
_auth_file_handler = logging.FileHandler("/var/log/proxmenux-auth.log")
_auth_file_handler.setFormatter(logging.Formatter("%(asctime)s proxmenux-auth: %(message)s"))
auth_logger.addHandler(_auth_file_handler)
# Handler 2: Syslog for JournalWatcher notifications
# This sends to the systemd journal so notification_events.py can detect auth failures
try:
_auth_syslog_handler = logging.handlers.SysLogHandler(address='/dev/log', facility=logging.handlers.SysLogHandler.LOG_AUTH)
_auth_syslog_handler.setFormatter(logging.Formatter("proxmenux-auth: %(message)s"))
_auth_syslog_handler.ident = "proxmenux-auth"
auth_logger.addHandler(_auth_syslog_handler)
except Exception:
pass # Syslog may not be available in all environments
def _get_client_ip():
"""Get the real client IP, supporting reverse proxies (X-Forwarded-For, X-Real-IP)"""
forwarded = request.headers.get("X-Forwarded-For", "")
if forwarded:
# First IP in the chain is the real client
return forwarded.split(",")[0].strip()
real_ip = request.headers.get("X-Real-IP", "")
if real_ip:
return real_ip.strip()
return request.remote_addr or "unknown"
auth_bp = Blueprint('auth', __name__)
@auth_bp.route('/api/auth/status', methods=['GET'])
@@ -24,27 +61,132 @@ def auth_status():
return jsonify(status)
except Exception as e:
return jsonify({"error": str(e)}), 500
return jsonify({"success": False, "message": str(e)}), 500
@auth_bp.route('/api/auth/setup', methods=['POST'])
def auth_setup():
"""Set up authentication with username and password"""
# -------------------------------------------------------------------
# SSL/HTTPS Certificate Management
# -------------------------------------------------------------------
@auth_bp.route('/api/ssl/status', methods=['GET'])
def ssl_status():
"""Get current SSL configuration status and detect available certificates"""
try:
data = request.json
username = data.get('username')
password = data.get('password')
config = auth_manager.load_ssl_config()
detection = auth_manager.detect_proxmox_certificates()
success, message = auth_manager.setup_auth(username, password)
return jsonify({
"success": True,
"ssl_enabled": config.get("enabled", False),
"source": config.get("source", "none"),
"cert_path": config.get("cert_path", ""),
"key_path": config.get("key_path", ""),
"proxmox_available": detection.get("proxmox_available", False),
"proxmox_cert": detection.get("proxmox_cert", ""),
"proxmox_key": detection.get("proxmox_key", ""),
"cert_info": detection.get("cert_info")
})
except Exception as e:
return jsonify({"success": False, "message": str(e)}), 500
def _schedule_service_restart(delay=1.5):
"""Schedule a restart of the monitor service via systemctl after a short delay.
This gives time for the HTTP response to reach the client before the process restarts."""
def _do_restart():
time.sleep(delay)
print("[ProxMenux] Restarting monitor service to apply SSL changes...")
# Use systemctl restart which properly stops and starts the service.
# This works because systemd manages proxmenux-monitor.service.
try:
subprocess.Popen(
["systemctl", "restart", "proxmenux-monitor"],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL
)
except Exception as e:
print(f"[ProxMenux] Failed to restart via systemctl: {e}")
# Fallback: try to restart the process directly
os.kill(os.getpid(), 15) # SIGTERM
t = threading.Thread(target=_do_restart, daemon=True)
t.start()
@auth_bp.route('/api/ssl/configure', methods=['POST'])
def ssl_configure():
"""Configure SSL with Proxmox or custom certificates"""
try:
data = request.json or {}
source = data.get("source", "proxmox")
auto_restart = data.get("auto_restart", True)
if source == "proxmox":
cert_path = auth_manager.PROXMOX_CERT_PATH
key_path = auth_manager.PROXMOX_KEY_PATH
elif source == "custom":
cert_path = data.get("cert_path", "")
key_path = data.get("key_path", "")
else:
return jsonify({"success": False, "message": "Invalid source. Use 'proxmox' or 'custom'."}), 400
success, message = auth_manager.configure_ssl(cert_path, key_path, source)
if success:
return jsonify({"success": True, "message": message})
if auto_restart:
_schedule_service_restart()
return jsonify({
"success": True,
"message": "SSL enabled. The service is restarting...",
"restarting": auto_restart,
"new_protocol": "https"
})
else:
return jsonify({"success": False, "message": message}), 400
except Exception as e:
return jsonify({"success": False, "message": str(e)}), 500
@auth_bp.route('/api/ssl/disable', methods=['POST'])
def ssl_disable():
"""Disable SSL and return to HTTP"""
try:
data = request.json or {}
auto_restart = data.get("auto_restart", True)
success, message = auth_manager.disable_ssl()
if success:
if auto_restart:
_schedule_service_restart()
return jsonify({
"success": True,
"message": "SSL disabled. The service is restarting...",
"restarting": auto_restart,
"new_protocol": "http"
})
else:
return jsonify({"success": False, "message": message}), 400
except Exception as e:
return jsonify({"success": False, "message": str(e)}), 500
@auth_bp.route('/api/ssl/validate', methods=['POST'])
def ssl_validate():
"""Validate custom certificate and key file paths"""
try:
data = request.json or {}
cert_path = data.get("cert_path", "")
key_path = data.get("key_path", "")
valid, message = auth_manager.validate_certificate_files(cert_path, key_path)
return jsonify({"success": valid, "message": message})
except Exception as e:
return jsonify({"success": False, "message": str(e)}), 500
@auth_bp.route('/api/auth/decline', methods=['POST'])
def auth_decline():
"""Decline authentication setup"""
@@ -73,16 +215,50 @@ def auth_login():
if success:
return jsonify({"success": True, "token": token, "message": message})
elif requires_totp:
# First step: password OK, requesting TOTP code (not a failure)
return jsonify({"success": False, "requires_totp": True, "message": message}), 200
else:
return jsonify({"success": False, "message": message}), 401
# Authentication failure (wrong password or wrong TOTP code)
client_ip = _get_client_ip()
auth_logger.warning(
"authentication failure; rhost=%s user=%s",
client_ip, username or "unknown"
)
# If user submitted a TOTP token that was wrong, tell frontend
# to keep showing the TOTP field (not go back to password step)
is_totp_failure = totp_token and "2FA" in message
return jsonify({
"success": False,
"message": message,
"requires_totp": is_totp_failure
}), 401
except Exception as e:
return jsonify({"success": False, "message": str(e)}), 500
@auth_bp.route('/api/auth/setup', methods=['POST'])
def auth_setup():
"""Set up authentication with username and password (create user + enable auth)"""
try:
data = request.json
username = data.get('username')
password = data.get('password')
success, message = auth_manager.setup_auth(username, password)
if success:
# Generate a token so the user is logged in immediately
token = auth_manager.generate_token(username)
return jsonify({"success": True, "token": token, "message": message})
else:
return jsonify({"success": False, "error": message}), 400
except Exception as e:
return jsonify({"success": False, "error": str(e)}), 500
@auth_bp.route('/api/auth/enable', methods=['POST'])
def auth_enable():
"""Enable authentication"""
"""Enable authentication (must already be configured)"""
try:
success, message = auth_manager.enable_auth()
@@ -262,6 +438,9 @@ def generate_api_token():
'iat': datetime.datetime.utcnow()
}, auth_manager.JWT_SECRET, algorithm='HS256')
# Store token metadata for listing and revocation
auth_manager.store_api_token_metadata(api_token, token_name)
return jsonify({
"success": True,
"token": api_token,
@@ -276,3 +455,35 @@ def generate_api_token():
except Exception as e:
print(f"[ERROR] generate_api_token: {str(e)}") # Log error for debugging
return jsonify({"success": False, "message": f"Internal error: {str(e)}"}), 500
@auth_bp.route('/api/auth/api-tokens', methods=['GET'])
def list_api_tokens():
"""List all generated API tokens (metadata only, no actual token values)"""
try:
token = request.headers.get('Authorization', '').replace('Bearer ', '')
if not token or not auth_manager.verify_token(token):
return jsonify({"success": False, "message": "Unauthorized"}), 401
tokens = auth_manager.list_api_tokens()
return jsonify({"success": True, "tokens": tokens})
except Exception as e:
return jsonify({"success": False, "message": str(e)}), 500
@auth_bp.route('/api/auth/api-tokens/<token_id>', methods=['DELETE'])
def revoke_api_token_route(token_id):
"""Revoke an API token by its ID"""
try:
token = request.headers.get('Authorization', '').replace('Bearer ', '')
if not token or not auth_manager.verify_token(token):
return jsonify({"success": False, "message": "Unauthorized"}), 401
success, message = auth_manager.revoke_api_token(token_id)
if success:
return jsonify({"success": True, "message": message})
else:
return jsonify({"success": False, "message": message}), 400
except Exception as e:
return jsonify({"success": False, "message": str(e)}), 500
+529 -3
View File
@@ -51,15 +51,74 @@ def get_system_info():
@health_bp.route('/api/health/acknowledge', methods=['POST'])
def acknowledge_error():
"""Acknowledge an error manually (user dismissed it)"""
"""
Acknowledge/dismiss an error manually.
Returns details about the acknowledged error including original severity
and suppression period info.
"""
try:
data = request.get_json()
if not data or 'error_key' not in data:
return jsonify({'error': 'error_key is required'}), 400
error_key = data['error_key']
health_persistence.acknowledge_error(error_key)
return jsonify({'success': True, 'message': 'Error acknowledged'})
result = health_persistence.acknowledge_error(error_key)
if result.get('success'):
# Invalidate cached health results so next fetch reflects the dismiss
# Use the error's category to clear the correct cache
category = result.get('category', '')
cache_key_map = {
'logs': 'logs_analysis',
'pve_services': 'pve_services',
'updates': 'updates_check',
'security': 'security_check',
'temperature': 'cpu_check',
'network': 'network_check',
'disks': 'storage_check',
'vms': 'vms_check',
}
cache_key = cache_key_map.get(category)
if cache_key:
health_monitor.last_check_times.pop(cache_key, None)
health_monitor.cached_results.pop(cache_key, None)
# Also invalidate ALL background/overall caches so next fetch reflects dismiss
for ck in ['_bg_overall', '_bg_detailed', 'overall_health']:
health_monitor.last_check_times.pop(ck, None)
health_monitor.cached_results.pop(ck, None)
# Use the per-record suppression hours from acknowledge_error()
sup_hours = result.get('suppression_hours', 24)
if sup_hours == -1:
suppression_label = 'permanently'
elif sup_hours >= 8760:
suppression_label = f'{sup_hours // 8760} year(s)'
elif sup_hours >= 720:
suppression_label = f'{sup_hours // 720} month(s)'
elif sup_hours >= 168:
suppression_label = f'{sup_hours // 168} week(s)'
elif sup_hours >= 72:
suppression_label = f'{sup_hours // 24} day(s)'
else:
suppression_label = f'{sup_hours} hours'
return jsonify({
'success': True,
'message': f'Error dismissed for {suppression_label}',
'error_key': error_key,
'original_severity': result.get('original_severity', 'WARNING'),
'category': category,
'suppression_hours': sup_hours,
'suppression_label': suppression_label,
'acknowledged_at': result.get('acknowledged_at')
})
else:
return jsonify({
'success': False,
'message': 'Error not found or already dismissed',
'error_key': error_key
}), 404
except Exception as e:
return jsonify({'error': str(e)}), 500
@@ -72,3 +131,470 @@ def get_active_errors():
return jsonify({'errors': errors})
except Exception as e:
return jsonify({'error': str(e)}), 500
@health_bp.route('/api/health/dismissed', methods=['GET'])
def get_dismissed_errors():
"""
Get dismissed errors that are still within their suppression period.
These are shown as INFO items with a 'Dismissed' badge in the frontend.
"""
try:
dismissed = health_persistence.get_dismissed_errors()
return jsonify({'dismissed': dismissed})
except Exception as e:
return jsonify({'error': str(e)}), 500
@health_bp.route('/api/health/full', methods=['GET'])
def get_full_health():
"""
Get complete health data in a single request: detailed status + active errors + dismissed.
Uses background-cached results if fresh (< 6 min) for instant response,
otherwise runs a fresh check.
"""
import time as _time
try:
# Try to use the background-cached detailed result for instant response
bg_key = '_bg_detailed'
bg_last = health_monitor.last_check_times.get(bg_key, 0)
bg_age = _time.time() - bg_last
if bg_age < 360 and bg_key in health_monitor.cached_results:
# Use cached result (at most ~5 min old)
details = health_monitor.cached_results[bg_key]
else:
# No fresh cache, run live (first load or cache expired)
details = health_monitor.get_detailed_status()
active_errors = health_persistence.get_active_errors()
dismissed = health_persistence.get_dismissed_errors()
custom_suppressions = health_persistence.get_custom_suppressions()
return jsonify({
'health': details,
'active_errors': active_errors,
'dismissed': dismissed,
'custom_suppressions': custom_suppressions,
'timestamp': details.get('timestamp')
})
except Exception as e:
return jsonify({'error': str(e)}), 500
@health_bp.route('/api/health/cleanup-orphans', methods=['POST'])
def cleanup_orphan_errors():
"""
Clean up errors for devices that no longer exist in the system.
Useful when USB drives or temporary devices are disconnected.
"""
import os
import re
try:
cleaned = []
# Get all active disk errors
disk_errors = health_persistence.get_active_errors(category='disks')
for err in disk_errors:
err_key = err.get('error_key', '')
details = err.get('details', {})
if isinstance(details, str):
try:
import json as _json
details = _json.loads(details)
except Exception:
details = {}
device = details.get('device', '')
base_disk = details.get('disk', '')
# Try to determine the device path
dev_path = None
if base_disk:
dev_path = f'/dev/{base_disk}'
elif device:
dev_path = device if device.startswith('/dev/') else f'/dev/{device}'
elif err_key.startswith('disk_'):
# Extract device from error_key
dev_name = err_key.replace('disk_fs_', '').replace('disk_', '')
dev_name = re.sub(r'_.*$', '', dev_name) # Remove suffix
if dev_name:
dev_path = f'/dev/{dev_name}'
if dev_path:
# Also check base disk (remove partition number)
base_path = re.sub(r'\d+$', '', dev_path)
if not os.path.exists(dev_path) and not os.path.exists(base_path):
health_persistence.resolve_error(err_key, 'Device no longer present (manual cleanup)')
cleaned.append({'error_key': err_key, 'device': dev_path})
# Also cleanup disk_observations for non-existent devices
try:
health_persistence.cleanup_orphan_observations()
except Exception:
pass
return jsonify({
'success': True,
'cleaned_count': len(cleaned),
'cleaned_errors': cleaned
})
except Exception as e:
return jsonify({'error': str(e)}), 500
@health_bp.route('/api/health/pending-notifications', methods=['GET'])
def get_pending_notifications():
"""
Get events pending notification (for future Telegram/Gotify/Discord integration).
This endpoint will be consumed by the Notification Service (Bloque A).
"""
try:
pending = health_persistence.get_pending_notifications()
return jsonify({'pending': pending, 'count': len(pending)})
except Exception as e:
return jsonify({'error': str(e)}), 500
@health_bp.route('/api/health/mark-notified', methods=['POST'])
def mark_events_notified():
"""
Mark events as notified after notification was sent successfully.
Used by the Notification Service (Bloque A) after sending alerts.
"""
try:
data = request.get_json()
if not data or 'event_ids' not in data:
return jsonify({'error': 'event_ids array is required'}), 400
event_ids = data['event_ids']
if not isinstance(event_ids, list):
return jsonify({'error': 'event_ids must be an array'}), 400
health_persistence.mark_events_notified(event_ids)
return jsonify({'success': True, 'marked_count': len(event_ids)})
except Exception as e:
return jsonify({'error': str(e)}), 500
@health_bp.route('/api/health/settings', methods=['GET'])
def get_health_settings():
"""
Get per-category suppression duration settings.
Returns all health categories with their current configured hours.
"""
try:
categories = health_persistence.get_suppression_categories()
return jsonify({'categories': categories})
except Exception as e:
return jsonify({'error': str(e)}), 500
@health_bp.route('/api/health/settings', methods=['POST'])
def save_health_settings():
"""
Save per-category suppression duration settings.
Expects JSON body with key-value pairs like: {"suppress_cpu": "168", "suppress_memory": "-1"}
Valid values: 24, 72, 168, 720, 8760, -1 (permanent), or any positive integer for custom.
"""
try:
data = request.get_json()
if not data:
return jsonify({'error': 'No settings provided'}), 400
valid_keys = set(health_persistence.CATEGORY_SETTING_MAP.values())
updated = []
for key, value in data.items():
if key not in valid_keys:
continue
try:
hours = int(value)
# Validate: must be -1 (permanent) or positive
if hours != -1 and hours < 1:
continue
health_persistence.set_setting(key, str(hours))
updated.append(key)
except (ValueError, TypeError):
continue
# Retroactively sync all existing dismissed errors
# so changes are effective immediately, not just on next dismiss
synced_count = health_persistence.sync_dismissed_suppression()
return jsonify({
'success': True,
'updated': updated,
'count': len(updated),
'synced_dismissed': synced_count
})
except Exception as e:
return jsonify({'error': str(e)}), 500
# ── Remote Storage Exclusions Endpoints ──
@health_bp.route('/api/health/remote-storages', methods=['GET'])
def get_remote_storages():
"""
Get list of all remote storages with their exclusion status.
Remote storages are those that can be offline (PBS, NFS, CIFS, etc.)
"""
try:
from proxmox_storage_monitor import proxmox_storage_monitor
# Get current storage status
storage_status = proxmox_storage_monitor.get_storage_status()
all_storages = storage_status.get('available', []) + storage_status.get('unavailable', [])
# Filter to only remote types
remote_types = health_persistence.REMOTE_STORAGE_TYPES
remote_storages = [s for s in all_storages if s.get('type', '').lower() in remote_types]
# Get current exclusions
exclusions = {e['storage_name']: e for e in health_persistence.get_excluded_storages()}
# Combine info
result = []
for storage in remote_storages:
name = storage.get('name', '')
exclusion = exclusions.get(name, {})
result.append({
'name': name,
'type': storage.get('type', 'unknown'),
'status': storage.get('status', 'unknown'),
'total': storage.get('total', 0),
'used': storage.get('used', 0),
'available': storage.get('available', 0),
'percent': storage.get('percent', 0),
'exclude_health': exclusion.get('exclude_health', 0) == 1,
'exclude_notifications': exclusion.get('exclude_notifications', 0) == 1,
'excluded_at': exclusion.get('excluded_at'),
'reason': exclusion.get('reason')
})
return jsonify({
'storages': result,
'remote_types': list(remote_types)
})
except ImportError:
return jsonify({'error': 'Storage monitor not available', 'storages': []}), 200
except Exception as e:
return jsonify({'error': str(e)}), 500
@health_bp.route('/api/health/storage-exclusions', methods=['GET'])
def get_storage_exclusions():
"""Get all storage exclusions."""
try:
exclusions = health_persistence.get_excluded_storages()
return jsonify({'exclusions': exclusions})
except Exception as e:
return jsonify({'error': str(e)}), 500
@health_bp.route('/api/health/storage-exclusions', methods=['POST'])
def save_storage_exclusion():
"""
Add or update a storage exclusion.
Request body:
{
"storage_name": "pbs-backup",
"storage_type": "pbs",
"exclude_health": true,
"exclude_notifications": true,
"reason": "PBS server is offline daily"
}
"""
try:
data = request.get_json()
if not data or 'storage_name' not in data:
return jsonify({'error': 'storage_name is required'}), 400
storage_name = data['storage_name']
storage_type = data.get('storage_type', 'unknown')
exclude_health = data.get('exclude_health', True)
exclude_notifications = data.get('exclude_notifications', True)
reason = data.get('reason')
# Check if already excluded
existing = health_persistence.get_excluded_storages()
exists = any(e['storage_name'] == storage_name for e in existing)
if exists:
# Update existing
success = health_persistence.update_storage_exclusion(
storage_name, exclude_health, exclude_notifications
)
else:
# Add new
success = health_persistence.exclude_storage(
storage_name, storage_type, exclude_health, exclude_notifications, reason
)
if success:
return jsonify({
'success': True,
'message': f'Storage {storage_name} exclusion saved',
'storage_name': storage_name
})
else:
return jsonify({'error': 'Failed to save exclusion'}), 500
except Exception as e:
return jsonify({'error': str(e)}), 500
@health_bp.route('/api/health/storage-exclusions/<storage_name>', methods=['DELETE'])
def delete_storage_exclusion(storage_name):
"""Remove a storage from the exclusion list."""
try:
success = health_persistence.remove_storage_exclusion(storage_name)
if success:
return jsonify({
'success': True,
'message': f'Storage {storage_name} removed from exclusions'
})
else:
return jsonify({'error': 'Storage not found in exclusions'}), 404
except Exception as e:
return jsonify({'error': str(e)}), 500
# ═══════════════════════════════════════════════════════════════════════════
# NETWORK INTERFACE EXCLUSION ROUTES
# ═══════════════════════════════════════════════════════════════════════════
@health_bp.route('/api/health/interfaces', methods=['GET'])
def get_network_interfaces():
"""Get all network interfaces with their exclusion status."""
try:
import psutil
# Get all interfaces
net_if_stats = psutil.net_if_stats()
net_if_addrs = psutil.net_if_addrs()
# Get current exclusions
exclusions = {e['interface_name']: e for e in health_persistence.get_excluded_interfaces()}
result = []
for iface, stats in net_if_stats.items():
if iface == 'lo':
continue
# Determine interface type
if iface.startswith('vmbr'):
iface_type = 'bridge'
elif iface.startswith('bond'):
iface_type = 'bond'
elif iface.startswith(('vlan', 'veth')):
iface_type = 'vlan'
elif iface.startswith(('eth', 'ens', 'enp', 'eno')):
iface_type = 'physical'
else:
iface_type = 'other'
# Get IP address if any
ip_addr = None
if iface in net_if_addrs:
for addr in net_if_addrs[iface]:
if addr.family == 2: # IPv4
ip_addr = addr.address
break
exclusion = exclusions.get(iface, {})
result.append({
'name': iface,
'type': iface_type,
'is_up': stats.isup,
'speed': stats.speed,
'ip_address': ip_addr,
'exclude_health': exclusion.get('exclude_health', 0) == 1,
'exclude_notifications': exclusion.get('exclude_notifications', 0) == 1,
'excluded_at': exclusion.get('excluded_at'),
'reason': exclusion.get('reason')
})
# Sort: bridges first, then physical, then others
type_order = {'bridge': 0, 'bond': 1, 'physical': 2, 'vlan': 3, 'other': 4}
result.sort(key=lambda x: (type_order.get(x['type'], 5), x['name']))
return jsonify({'interfaces': result})
except Exception as e:
return jsonify({'error': str(e)}), 500
@health_bp.route('/api/health/interface-exclusions', methods=['GET'])
def get_interface_exclusions():
"""Get all interface exclusions."""
try:
exclusions = health_persistence.get_excluded_interfaces()
return jsonify({'exclusions': exclusions})
except Exception as e:
return jsonify({'error': str(e)}), 500
@health_bp.route('/api/health/interface-exclusions', methods=['POST'])
def save_interface_exclusion():
"""
Add or update an interface exclusion.
Request body:
{
"interface_name": "vmbr0",
"interface_type": "bridge",
"exclude_health": true,
"exclude_notifications": true,
"reason": "Intentionally disabled bridge"
}
"""
try:
data = request.get_json()
if not data or 'interface_name' not in data:
return jsonify({'error': 'interface_name is required'}), 400
interface_name = data['interface_name']
interface_type = data.get('interface_type', 'unknown')
exclude_health = data.get('exclude_health', True)
exclude_notifications = data.get('exclude_notifications', True)
reason = data.get('reason')
# Check if already excluded
existing = health_persistence.get_excluded_interfaces()
exists = any(e['interface_name'] == interface_name for e in existing)
if exists:
# Update existing
success = health_persistence.update_interface_exclusion(
interface_name, exclude_health, exclude_notifications
)
else:
# Add new
success = health_persistence.exclude_interface(
interface_name, interface_type, exclude_health, exclude_notifications, reason
)
if success:
return jsonify({
'success': True,
'message': f'Interface {interface_name} exclusion saved',
'interface_name': interface_name
})
else:
return jsonify({'error': 'Failed to save exclusion'}), 500
except Exception as e:
return jsonify({'error': str(e)}), 500
@health_bp.route('/api/health/interface-exclusions/<interface_name>', methods=['DELETE'])
def delete_interface_exclusion(interface_name):
"""Remove an interface from the exclusion list."""
try:
success = health_persistence.remove_interface_exclusion(interface_name)
if success:
return jsonify({
'success': True,
'message': f'Interface {interface_name} removed from exclusions'
})
else:
return jsonify({'error': 'Interface not found in exclusions'}), 404
except Exception as e:
return jsonify({'error': str(e)}), 500
@@ -0,0 +1,996 @@
"""
Flask routes for notification service configuration and management.
Blueprint pattern matching flask_health_routes.py / flask_security_routes.py.
"""
import hmac
import time
import json
import hashlib
from pathlib import Path
from collections import deque
from flask import Blueprint, jsonify, request
from notification_manager import notification_manager
# ─── Webhook Hardening Helpers ───────────────────────────────────
class WebhookRateLimiter:
"""Simple sliding-window rate limiter for the webhook endpoint."""
def __init__(self, max_requests: int = 60, window_seconds: int = 60):
self._max = max_requests
self._window = window_seconds
self._timestamps: deque = deque()
def allow(self) -> bool:
now = time.time()
# Prune entries outside the window
while self._timestamps and now - self._timestamps[0] > self._window:
self._timestamps.popleft()
if len(self._timestamps) >= self._max:
return False
self._timestamps.append(now)
return True
class ReplayCache:
"""Bounded in-memory cache of recently seen request signatures (60s TTL)."""
_MAX_SIZE = 2000 # Hard cap to prevent memory growth
def __init__(self, ttl: int = 60):
self._ttl = ttl
self._seen: dict = {} # signature -> timestamp
def check_and_record(self, signature: str) -> bool:
"""Return True if this signature was already seen (replay). Records it otherwise."""
now = time.time()
# Periodic cleanup
if len(self._seen) > self._MAX_SIZE // 2:
cutoff = now - self._ttl
self._seen = {k: v for k, v in self._seen.items() if v > cutoff}
if signature in self._seen and now - self._seen[signature] < self._ttl:
return True # Replay detected
self._seen[signature] = now
return False
# Module-level singletons (one per process)
_webhook_limiter = WebhookRateLimiter(max_requests=60, window_seconds=60)
_replay_cache = ReplayCache(ttl=60)
# Timestamp validation window (seconds)
_TIMESTAMP_MAX_DRIFT = 60
notification_bp = Blueprint('notifications', __name__)
@notification_bp.route('/api/notifications/settings', methods=['GET'])
def get_notification_settings():
"""Get all notification settings for the UI."""
try:
settings = notification_manager.get_settings()
return jsonify(settings)
except Exception as e:
return jsonify({'error': str(e)}), 500
@notification_bp.route('/api/notifications/settings', methods=['POST'])
def save_notification_settings():
"""Save notification settings from the UI."""
try:
payload = request.get_json()
if not payload:
return jsonify({'error': 'No data provided'}), 400
result = notification_manager.save_settings(payload)
return jsonify(result)
except Exception as e:
return jsonify({'error': str(e)}), 500
@notification_bp.route('/api/notifications/test', methods=['POST'])
def test_notification():
"""Send a test notification to one or all channels."""
try:
data = request.get_json() or {}
channel = data.get('channel', 'all')
result = notification_manager.test_channel(channel)
return jsonify(result)
except Exception as e:
return jsonify({'error': str(e)}), 500
def load_verified_models():
"""Load verified models from config file.
Checks multiple paths:
1. Same directory as script (AppImage: /usr/bin/config/)
2. Parent directory config folder (dev: AppImage/config/)
"""
try:
# Try AppImage path first (scripts and config both in /usr/bin/)
script_dir = Path(__file__).parent
config_path = script_dir / 'config' / 'verified_ai_models.json'
if not config_path.exists():
# Try development path (AppImage/scripts/ -> AppImage/config/)
config_path = script_dir.parent / 'config' / 'verified_ai_models.json'
if config_path.exists():
with open(config_path, 'r') as f:
return json.load(f)
else:
print(f"[flask_notification_routes] Config not found at {config_path}")
except Exception as e:
print(f"[flask_notification_routes] Failed to load verified models: {e}")
return {}
@notification_bp.route('/api/notifications/provider-models', methods=['POST'])
def get_provider_models():
"""Fetch available models from AI provider, filtered by verified models list.
Only returns models that:
1. Are available from the provider's API
2. Are in our verified_ai_models.json list (tested to work)
Request body:
{
"provider": "gemini|groq|openai|openrouter|ollama|anthropic",
"api_key": "your-api-key", // Not needed for ollama
"ollama_url": "http://localhost:11434", // Only for ollama
"openai_base_url": "https://custom.endpoint/v1" // Optional for openai
}
Returns:
{
"success": true/false,
"models": ["model1", "model2", ...],
"recommended": "recommended-model",
"message": "status message"
}
"""
try:
data = request.get_json() or {}
provider = data.get('provider', '')
api_key = data.get('api_key', '')
ollama_url = data.get('ollama_url', 'http://localhost:11434')
openai_base_url = data.get('openai_base_url', '')
if not provider:
return jsonify({'success': False, 'models': [], 'message': 'Provider not specified'})
# Load verified models config
verified_config = load_verified_models()
provider_config = verified_config.get(provider, {})
verified_models = set(provider_config.get('models', []))
recommended = provider_config.get('recommended', '')
# Handle Ollama separately (local, no filtering)
if provider == 'ollama':
import urllib.request
import urllib.error
url = f"{ollama_url.rstrip('/')}/api/tags"
req = urllib.request.Request(url, method='GET')
req.add_header('User-Agent', 'ProxMenux-Monitor/1.1')
with urllib.request.urlopen(req, timeout=10) as resp:
result = json.loads(resp.read().decode('utf-8'))
models = [m.get('name', '') for m in result.get('models', []) if m.get('name')]
models = sorted(models)
return jsonify({
'success': True,
'models': models,
'recommended': models[0] if models else '',
'message': f'Found {len(models)} local models'
})
# Handle Anthropic - no models list API, return verified models directly
if provider == 'anthropic':
models = list(verified_models) if verified_models else [
'claude-3-5-haiku-latest',
'claude-3-5-sonnet-latest',
'claude-3-opus-latest',
]
return jsonify({
'success': True,
'models': sorted(models),
'recommended': recommended or models[0],
'message': f'{len(models)} verified models'
})
# For other providers, fetch from API and filter by verified list
if not api_key:
return jsonify({'success': False, 'models': [], 'message': 'API key required'})
from ai_providers import get_provider
ai_provider = get_provider(
provider,
api_key=api_key,
model='',
base_url=openai_base_url if provider == 'openai' else None
)
if not ai_provider:
return jsonify({'success': False, 'models': [], 'message': f'Unknown provider: {provider}'})
# Get all models from provider API
api_models = ai_provider.list_models()
if not api_models:
# API failed, fall back to verified list only
if verified_models:
models = sorted(verified_models)
return jsonify({
'success': True,
'models': models,
'recommended': recommended or models[0],
'message': f'{len(models)} verified models (API unavailable)'
})
return jsonify({
'success': False,
'models': [],
'message': 'Could not retrieve models. Check your API key.'
})
# Filter: only models that are BOTH in API and verified list
if verified_models:
api_models_set = set(api_models)
filtered_models = [m for m in verified_models if m in api_models_set]
if not filtered_models:
# No intersection - maybe verified list is outdated
# Return verified list anyway (will fail on use if truly unavailable)
filtered_models = list(verified_models)
# Sort with recommended first
def sort_key(m):
if m == recommended:
return (0, m)
return (1, m)
models = sorted(filtered_models, key=sort_key)
else:
# No verified list for this provider, return all from API
models = sorted(api_models)
return jsonify({
'success': True,
'models': models,
'recommended': recommended if recommended in models else (models[0] if models else ''),
'message': f'{len(models)} verified models available'
})
except Exception as e:
return jsonify({
'success': False,
'models': [],
'message': f'Error: {str(e)}'
})
@notification_bp.route('/api/notifications/test-ai', methods=['POST'])
def test_ai_connection():
"""Test AI provider connection and configuration.
Request body:
{
"provider": "groq" | "openai" | "anthropic" | "gemini" | "ollama" | "openrouter",
"api_key": "...",
"model": "..." (optional),
"ollama_url": "http://localhost:11434" (optional, for ollama)
}
Returns:
{
"success": true/false,
"message": "Connection successful" or error message,
"model": "model used for test"
}
"""
try:
data = request.get_json() or {}
provider = data.get('provider', 'groq')
api_key = data.get('api_key', '')
model = data.get('model', '')
ollama_url = data.get('ollama_url', 'http://localhost:11434')
openai_base_url = data.get('openai_base_url', '')
# Validate required fields
if provider != 'ollama' and not api_key:
return jsonify({
'success': False,
'message': 'API key is required',
'model': ''
}), 400
if provider == 'ollama' and not ollama_url:
return jsonify({
'success': False,
'message': 'Ollama URL is required',
'model': ''
}), 400
# Import and use the AI providers module
import sys
import os
script_dir = os.path.dirname(os.path.abspath(__file__))
if script_dir not in sys.path:
sys.path.insert(0, script_dir)
from ai_providers import get_provider, AIProviderError
# Determine base_url based on provider
if provider == 'ollama':
base_url = ollama_url
elif provider == 'openai':
base_url = openai_base_url # Empty string means use default OpenAI API
else:
base_url = ''
try:
ai_provider = get_provider(
provider,
api_key=api_key,
model=model,
base_url=base_url
)
result = ai_provider.test_connection()
return jsonify(result)
except AIProviderError as e:
return jsonify({
'success': False,
'message': str(e),
'model': model
}), 400
except Exception as e:
return jsonify({
'success': False,
'message': f'Unexpected error: {str(e)}',
'model': ''
}), 500
@notification_bp.route('/api/notifications/status', methods=['GET'])
def get_notification_status():
"""Get notification service status."""
try:
status = notification_manager.get_status()
return jsonify(status)
except Exception as e:
return jsonify({'error': str(e)}), 500
@notification_bp.route('/api/notifications/history', methods=['GET'])
def get_notification_history():
"""Get notification history with optional filters."""
try:
limit = request.args.get('limit', 100, type=int)
offset = request.args.get('offset', 0, type=int)
severity = request.args.get('severity', '')
channel = request.args.get('channel', '')
result = notification_manager.get_history(limit, offset, severity, channel)
return jsonify(result)
except Exception as e:
return jsonify({'error': str(e)}), 500
@notification_bp.route('/api/notifications/history', methods=['DELETE'])
def clear_notification_history():
"""Clear all notification history."""
try:
result = notification_manager.clear_history()
return jsonify(result)
except Exception as e:
return jsonify({'error': str(e)}), 500
@notification_bp.route('/api/notifications/send', methods=['POST'])
def send_notification():
"""Send a notification via API (for testing or external triggers)."""
try:
data = request.get_json()
if not data:
return jsonify({'error': 'No data provided'}), 400
result = notification_manager.send_notification(
event_type=data.get('event_type', 'custom'),
severity=data.get('severity', 'INFO'),
title=data.get('title', ''),
message=data.get('message', ''),
data=data.get('data', {}),
source='api'
)
return jsonify(result)
except Exception as e:
return jsonify({'error': str(e)}), 500
# ── PVE config constants ──
_PVE_ENDPOINT_ID = 'proxmenux-webhook'
_PVE_MATCHER_ID = 'proxmenux-default'
_PVE_WEBHOOK_URL = 'http://127.0.0.1:8008/api/notifications/webhook'
_PVE_NOTIFICATIONS_CFG = '/etc/pve/notifications.cfg'
_PVE_PRIV_CFG = '/etc/pve/priv/notifications.cfg'
_PVE_OUR_HEADERS = {
f'webhook: {_PVE_ENDPOINT_ID}',
f'matcher: {_PVE_MATCHER_ID}',
}
def _pve_read_file(path):
"""Read file, return (content, error). Content is '' if missing."""
try:
with open(path, 'r') as f:
return f.read(), None
except FileNotFoundError:
return '', None
except PermissionError:
return None, f'Permission denied reading {path}'
except Exception as e:
return None, str(e)
def _pve_backup_file(path):
"""Create timestamped backup if file exists. Never fails fatally."""
import os, shutil
from datetime import datetime
try:
if os.path.exists(path):
ts = datetime.now().strftime('%Y%m%d_%H%M%S')
backup = f"{path}.proxmenux_backup_{ts}"
shutil.copy2(path, backup)
except Exception:
pass
def _pve_remove_our_blocks(text, headers_to_remove):
"""Remove only blocks whose header line matches one of ours.
Preserves ALL other content byte-for-byte.
A block = header line + indented continuation lines + trailing blank line.
"""
lines = text.splitlines(keepends=True)
cleaned = []
skip_block = False
for line in lines:
stripped = line.strip()
if stripped and not line[0:1].isspace() and ':' in stripped:
if stripped in headers_to_remove:
skip_block = True
continue
else:
skip_block = False
if skip_block:
if not stripped:
skip_block = False
continue
elif line[0:1].isspace():
continue
else:
skip_block = False
cleaned.append(line)
return ''.join(cleaned)
def _build_webhook_fallback():
"""Build fallback manual commands for webhook setup."""
import base64
body_tpl = '{"title":"{{ escape title }}","message":"{{ escape message }}","severity":"{{ severity }}","timestamp":"{{ timestamp }}","fields":{{ json fields }}}'
body_b64 = base64.b64encode(body_tpl.encode()).decode()
return [
"# 1. Append to END of /etc/pve/notifications.cfg",
"# (do NOT delete existing content):",
"",
f"webhook: {_PVE_ENDPOINT_ID}",
f"\tbody {body_b64}",
f"\tmethod post",
f"\turl {_PVE_WEBHOOK_URL}",
"",
f"matcher: {_PVE_MATCHER_ID}",
f"\ttarget {_PVE_ENDPOINT_ID}",
"\tmode all",
"",
"# 2. Append to /etc/pve/priv/notifications.cfg :",
f"webhook: {_PVE_ENDPOINT_ID}",
]
def setup_pve_webhook_core() -> dict:
"""Core logic to configure PVE webhook. Callable from anywhere.
Returns dict with 'configured', 'error', 'fallback_commands' keys.
Idempotent: safe to call multiple times.
"""
import secrets as secrets_mod
result = {
'configured': False,
'endpoint_id': _PVE_ENDPOINT_ID,
'matcher_id': _PVE_MATCHER_ID,
'url': _PVE_WEBHOOK_URL,
'fallback_commands': [],
'error': None,
}
try:
# ── Step 1: Ensure webhook secret exists (for our own internal use) ──
secret = notification_manager.get_webhook_secret()
if not secret:
secret = secrets_mod.token_urlsafe(32)
notification_manager._save_setting('webhook_secret', secret)
# ── Step 2: Read main config ──
cfg_text, err = _pve_read_file(_PVE_NOTIFICATIONS_CFG)
if err:
result['error'] = err
result['fallback_commands'] = _build_webhook_fallback()
return result
# ── Step 3: Read priv config (to clean up any broken blocks we wrote before) ──
priv_text, err = _pve_read_file(_PVE_PRIV_CFG)
if err:
priv_text = None
# ── Step 4: Create backups before ANY modification ──
_pve_backup_file(_PVE_NOTIFICATIONS_CFG)
if priv_text is not None:
_pve_backup_file(_PVE_PRIV_CFG)
# ── Step 5: Remove any previous proxmenux blocks from BOTH files ──
cleaned_cfg = _pve_remove_our_blocks(cfg_text, _PVE_OUR_HEADERS)
if priv_text is not None:
cleaned_priv = _pve_remove_our_blocks(priv_text, _PVE_OUR_HEADERS)
# ── Step 6: Build new blocks ──
# Exact format from a real working PVE server:
# webhook: name
# \tmethod post
# \turl http://...
#
# NO header lines -- localhost webhook doesn't need them.
# PVE header format is: header name=X-Key,value=<base64>
# PVE secret format is: secret name=key,value=<base64>
# Neither is needed for localhost calls.
# PVE stores body as base64 in the config file.
# {{ escape title/message }} -- JSON-safe escaping of quotes/newlines.
# {{ json fields }} -- renders ALL PVE metadata as a JSON object
# (type, hostname, job-id). This is a single Handlebars helper
# that always works, even if fields is empty (renders {}).
import base64
body_template = '{"title":"{{ escape title }}","message":"{{ escape message }}","severity":"{{ severity }}","timestamp":"{{ timestamp }}","fields":{{ json fields }}}'
body_b64 = base64.b64encode(body_template.encode()).decode()
endpoint_block = (
f"webhook: {_PVE_ENDPOINT_ID}\n"
f"\tbody {body_b64}\n"
f"\tmethod post\n"
f"\turl {_PVE_WEBHOOK_URL}\n"
)
matcher_block = (
f"matcher: {_PVE_MATCHER_ID}\n"
f"\ttarget {_PVE_ENDPOINT_ID}\n"
f"\tmode all\n"
)
# ── Step 7: Append our blocks to cleaned main config ──
if cleaned_cfg and not cleaned_cfg.endswith('\n'):
cleaned_cfg += '\n'
if cleaned_cfg and not cleaned_cfg.endswith('\n\n'):
cleaned_cfg += '\n'
new_cfg = cleaned_cfg + endpoint_block + '\n' + matcher_block
# ── Step 8: Write main config ──
try:
with open(_PVE_NOTIFICATIONS_CFG, 'w') as f:
f.write(new_cfg)
except PermissionError:
result['error'] = f'Permission denied writing {_PVE_NOTIFICATIONS_CFG}'
result['fallback_commands'] = _build_webhook_fallback()
return result
except Exception as e:
try:
with open(_PVE_NOTIFICATIONS_CFG, 'w') as f:
f.write(cfg_text)
except Exception:
pass
result['error'] = str(e)
result['fallback_commands'] = _build_webhook_fallback()
return result
# ── Step 9: Write priv config with our webhook entry ──
# PVE REQUIRES a matching block in priv/notifications.cfg for every
# webhook endpoint, even if it has no secrets. Without it PVE throws:
# "Could not instantiate endpoint: private config does not exist"
priv_block = (
f"webhook: {_PVE_ENDPOINT_ID}\n"
)
if priv_text is not None:
# Start from cleaned priv (our old blocks removed)
if cleaned_priv and not cleaned_priv.endswith('\n'):
cleaned_priv += '\n'
if cleaned_priv and not cleaned_priv.endswith('\n\n'):
cleaned_priv += '\n'
new_priv = cleaned_priv + priv_block
else:
new_priv = priv_block
try:
with open(_PVE_PRIV_CFG, 'w') as f:
f.write(new_priv)
except PermissionError:
result['error'] = f'Permission denied writing {_PVE_PRIV_CFG}'
result['fallback_commands'] = _build_webhook_fallback()
return result
except Exception:
pass
result['configured'] = True
result['secret'] = secret
return result
except Exception as e:
result['error'] = str(e)
result['fallback_commands'] = _build_webhook_fallback()
return result
@notification_bp.route('/api/notifications/proxmox/setup-webhook', methods=['POST'])
def setup_proxmox_webhook():
"""HTTP endpoint wrapper for webhook setup."""
return jsonify(setup_pve_webhook_core()), 200
def cleanup_pve_webhook_core() -> dict:
"""Core logic to remove PVE webhook blocks. Callable from anywhere.
Returns dict with 'cleaned', 'error' keys.
Only removes blocks named 'proxmenux-webhook' / 'proxmenux-default'.
"""
result = {'cleaned': False, 'error': None}
try:
# Read both files
cfg_text, err = _pve_read_file(_PVE_NOTIFICATIONS_CFG)
if err:
result['error'] = err
return result
priv_text, err = _pve_read_file(_PVE_PRIV_CFG)
if err:
priv_text = None
# Check if our blocks actually exist before doing anything
has_our_blocks = any(
h in cfg_text for h in [f'webhook: {_PVE_ENDPOINT_ID}', f'matcher: {_PVE_MATCHER_ID}']
)
has_priv_blocks = priv_text and f'webhook: {_PVE_ENDPOINT_ID}' in priv_text
if not has_our_blocks and not has_priv_blocks:
result['cleaned'] = True
return result
# Backup before modification
_pve_backup_file(_PVE_NOTIFICATIONS_CFG)
if priv_text is not None:
_pve_backup_file(_PVE_PRIV_CFG)
# Remove our blocks
if has_our_blocks:
cleaned_cfg = _pve_remove_our_blocks(cfg_text, _PVE_OUR_HEADERS)
try:
with open(_PVE_NOTIFICATIONS_CFG, 'w') as f:
f.write(cleaned_cfg)
except PermissionError:
result['error'] = f'Permission denied writing {_PVE_NOTIFICATIONS_CFG}'
return result
except Exception as e:
# Rollback
try:
with open(_PVE_NOTIFICATIONS_CFG, 'w') as f:
f.write(cfg_text)
except Exception:
pass
result['error'] = str(e)
return result
if has_priv_blocks and priv_text is not None:
cleaned_priv = _pve_remove_our_blocks(priv_text, _PVE_OUR_HEADERS)
try:
with open(_PVE_PRIV_CFG, 'w') as f:
f.write(cleaned_priv)
except Exception:
pass # Best-effort
result['cleaned'] = True
return result
except Exception as e:
result['error'] = str(e)
return result
@notification_bp.route('/api/notifications/proxmox/cleanup-webhook', methods=['POST'])
def cleanup_proxmox_webhook():
"""HTTP endpoint wrapper for webhook cleanup."""
return jsonify(cleanup_pve_webhook_core()), 200
@notification_bp.route('/api/notifications/proxmox/read-cfg', methods=['GET'])
def read_pve_notification_cfg():
"""Diagnostic: return raw content of PVE notification config files.
GET /api/notifications/proxmox/read-cfg
Returns both notifications.cfg and priv/notifications.cfg content.
"""
import os
files = {
'notifications_cfg': '/etc/pve/notifications.cfg',
'priv_cfg': '/etc/pve/priv/notifications.cfg',
}
# Also look for any backups we created
backup_dir = '/etc/pve'
priv_backup_dir = '/etc/pve/priv'
result = {}
for key, path in files.items():
try:
with open(path, 'r') as f:
result[key] = {
'path': path,
'content': f.read(),
'size': os.path.getsize(path),
'error': None,
}
except FileNotFoundError:
result[key] = {'path': path, 'content': None, 'size': 0, 'error': 'file_not_found'}
except PermissionError:
result[key] = {'path': path, 'content': None, 'size': 0, 'error': 'permission_denied'}
except Exception as e:
result[key] = {'path': path, 'content': None, 'size': 0, 'error': str(e)}
# Find backups
backups = []
for d in [backup_dir, priv_backup_dir]:
try:
for fname in sorted(os.listdir(d)):
if 'proxmenux_backup' in fname:
fpath = os.path.join(d, fname)
try:
with open(fpath, 'r') as f:
backups.append({
'path': fpath,
'content': f.read(),
'size': os.path.getsize(fpath),
})
except Exception:
backups.append({'path': fpath, 'content': None, 'error': 'read_failed'})
except Exception:
pass
result['backups'] = backups
return jsonify(result), 200
@notification_bp.route('/api/notifications/proxmox/restore-cfg', methods=['POST'])
def restore_pve_notification_cfg():
"""Restore PVE notification config from our backup.
POST /api/notifications/proxmox/restore-cfg
Finds the most recent proxmenux_backup and restores it.
"""
import os
import shutil
files_to_restore = {
'/etc/pve': '/etc/pve/notifications.cfg',
'/etc/pve/priv': '/etc/pve/priv/notifications.cfg',
}
restored = []
errors = []
for search_dir, target_path in files_to_restore.items():
try:
candidates = sorted([
f for f in os.listdir(search_dir)
if 'proxmenux_backup' in f and f.startswith('notifications.cfg')
], reverse=True)
if candidates:
backup_path = os.path.join(search_dir, candidates[0])
shutil.copy2(backup_path, target_path)
restored.append({'target': target_path, 'from_backup': backup_path})
else:
errors.append({'target': target_path, 'error': 'no_backup_found'})
except Exception as e:
errors.append({'target': target_path, 'error': str(e)})
return jsonify({
'restored': restored,
'errors': errors,
'success': len(errors) == 0 and len(restored) > 0,
}), 200
@notification_bp.route('/api/notifications/webhook', methods=['POST'])
def proxmox_webhook():
"""Receive native Proxmox VE notification webhooks (hardened).
Security layers:
Localhost (127.0.0.1 / ::1): rate limiting only.
PVE calls us on localhost and cannot send custom auth headers,
so we trust the loopback interface (only local processes can reach it).
Remote: rate limiting + shared secret + timestamp + replay + IP allowlist.
"""
_reject = lambda code, error, status: (jsonify({'accepted': False, 'error': error}), status)
client_ip = request.remote_addr or ''
is_localhost = client_ip in ('127.0.0.1', '::1')
# ── Layer 1: Rate limiting (always) ──
if not _webhook_limiter.allow():
resp = jsonify({'accepted': False, 'error': 'rate_limited'})
resp.headers['Retry-After'] = '60'
return resp, 429
# ── Layers 2-5: Remote-only checks ──
if not is_localhost:
# Layer 2: Shared secret
try:
configured_secret = notification_manager.get_webhook_secret()
except Exception:
configured_secret = ''
if configured_secret:
request_secret = request.headers.get('X-Webhook-Secret', '')
if not request_secret:
return _reject(401, 'missing_secret', 401)
if not hmac.compare_digest(configured_secret, request_secret):
return _reject(401, 'invalid_secret', 401)
# Layer 3: Anti-replay timestamp
ts_header = request.headers.get('X-ProxMenux-Timestamp', '')
if not ts_header:
return _reject(401, 'missing_timestamp', 401)
try:
ts_value = int(ts_header)
except (ValueError, TypeError):
return _reject(401, 'invalid_timestamp', 401)
if abs(time.time() - ts_value) > _TIMESTAMP_MAX_DRIFT:
return _reject(401, 'timestamp_expired', 401)
# Layer 4: Replay cache
raw_body = request.get_data(as_text=True) or ''
signature = hashlib.sha256(f"{ts_value}:{raw_body}".encode(errors='replace')).hexdigest()
if _replay_cache.check_and_record(signature):
return _reject(409, 'replay_detected', 409)
# Layer 5: IP allowlist
try:
allowed_ips = notification_manager.get_webhook_allowed_ips()
if allowed_ips and client_ip not in allowed_ips:
return _reject(403, 'forbidden_ip', 403)
except Exception:
pass
# ── Parse and process payload ──
try:
content_type = request.content_type or ''
raw_data = request.get_data(as_text=True) or ''
# Try JSON first
payload = request.get_json(silent=True) or {}
# If not JSON, try form data
if not payload:
payload = dict(request.form)
# If still empty, try parsing raw data as JSON (PVE may not set Content-Type)
if not payload and raw_data:
import json
try:
payload = json.loads(raw_data)
except (json.JSONDecodeError, ValueError):
# PVE's {{ message }} may contain unescaped newlines/quotes
# that break JSON. Try to repair common issues.
try:
repaired = raw_data.replace('\n', '\\n').replace('\r', '\\r')
payload = json.loads(repaired)
except (json.JSONDecodeError, ValueError):
# Try to extract fields with regex from broken JSON
import re
title_m = re.search(r'"title"\s*:\s*"([^"]*)"', raw_data)
sev_m = re.search(r'"severity"\s*:\s*"([^"]*)"', raw_data)
if title_m:
payload = {
'title': title_m.group(1),
'body': raw_data[:1000],
'severity': sev_m.group(1) if sev_m else 'info',
'source': 'proxmox_hook',
}
# If still empty, try to salvage data from raw body
if not payload:
if raw_data:
# Last resort: treat raw text as the message body
payload = {
'title': 'PVE Notification',
'body': raw_data[:1000],
'severity': 'info',
'source': 'proxmox_hook',
}
else:
return _reject(400, 'empty_payload', 400)
result = notification_manager.process_webhook(payload)
# Always return 200 to PVE -- a non-200 makes PVE report the webhook as broken.
# The 'accepted' field in the JSON body indicates actual processing status.
return jsonify(result), 200
except Exception as e:
# Still return 200 to avoid PVE flagging the webhook as broken
return jsonify({'accepted': False, 'error': 'internal_error', 'detail': str(e)}), 200
# ─── Internal Shutdown Event Endpoint ─────────────────────────────
@notification_bp.route('/api/internal/shutdown-event', methods=['POST'])
def internal_shutdown_event():
"""
Internal endpoint called by systemd ExecStop script to emit shutdown/reboot notification.
This allows the service to send a notification BEFORE it terminates.
Only accepts requests from localhost (127.0.0.1) for security.
"""
# Security: Only allow localhost
remote_addr = request.remote_addr
if remote_addr not in ('127.0.0.1', '::1', 'localhost'):
return jsonify({'error': 'forbidden', 'detail': 'localhost only'}), 403
try:
data = request.get_json(silent=True) or {}
event_type = data.get('event_type', 'system_shutdown')
hostname = data.get('hostname', 'unknown')
reason = data.get('reason', 'System is shutting down.')
# Validate event type
if event_type not in ('system_shutdown', 'system_reboot'):
return jsonify({'error': 'invalid_event_type'}), 400
# Emit the notification directly through notification_manager
notification_manager.emit_event(
event_type=event_type,
severity='INFO',
data={
'hostname': hostname,
'reason': reason,
},
source='systemd',
entity='node',
entity_id='',
)
return jsonify({'success': True, 'event_type': event_type}), 200
except Exception as e:
return jsonify({'error': 'internal_error', 'detail': str(e)}), 500
+545
View File
@@ -0,0 +1,545 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
ProxMenux OCI Routes
REST API endpoints for OCI container app management.
"""
import logging
from flask import Blueprint, jsonify, request
import oci_manager
from jwt_middleware import require_auth
# Logging
logger = logging.getLogger("proxmenux.oci.routes")
# Blueprint
oci_bp = Blueprint("oci", __name__, url_prefix="/api/oci")
# =================================================================
# Catalog Endpoints
# =================================================================
@oci_bp.route("/catalog", methods=["GET"])
@require_auth
def get_catalog():
"""
List all available apps from the catalog.
Returns:
List of apps with basic info and installation status.
"""
try:
apps = oci_manager.list_available_apps()
return jsonify({
"success": True,
"apps": apps
})
except Exception as e:
logger.error(f"Failed to get catalog: {e}")
return jsonify({
"success": False,
"message": str(e)
}), 500
@oci_bp.route("/catalog/<app_id>", methods=["GET"])
@require_auth
def get_app_definition(app_id: str):
"""
Get the full definition for a specific app.
Args:
app_id: The app identifier
Returns:
Full app definition including config schema.
"""
try:
app_def = oci_manager.get_app_definition(app_id)
if not app_def:
return jsonify({
"success": False,
"message": f"App '{app_id}' not found in catalog"
}), 404
return jsonify({
"success": True,
"app": app_def,
"installed": oci_manager.is_installed(app_id)
})
except Exception as e:
logger.error(f"Failed to get app definition: {e}")
return jsonify({
"success": False,
"message": str(e)
}), 500
@oci_bp.route("/storages", methods=["GET"])
@require_auth
def get_storages():
"""
Get list of available storages for LXC rootfs.
Returns:
List of storages with capacity info and recommendations.
"""
try:
storages = oci_manager.get_available_storages()
return jsonify({
"success": True,
"storages": storages
})
except Exception as e:
logger.error(f"Failed to get storages: {e}")
return jsonify({
"success": False,
"message": str(e)
}), 500
@oci_bp.route("/catalog/<app_id>/schema", methods=["GET"])
@require_auth
def get_app_schema(app_id: str):
"""
Get only the config schema for an app.
Args:
app_id: The app identifier
Returns:
Config schema for building dynamic forms.
"""
try:
app_def = oci_manager.get_app_definition(app_id)
if not app_def:
return jsonify({
"success": False,
"message": f"App '{app_id}' not found in catalog"
}), 404
return jsonify({
"success": True,
"app_id": app_id,
"name": app_def.get("name", app_id),
"schema": app_def.get("config_schema", {})
})
except Exception as e:
logger.error(f"Failed to get app schema: {e}")
return jsonify({
"success": False,
"message": str(e)
}), 500
# =================================================================
# Installed Apps Endpoints
# =================================================================
@oci_bp.route("/installed", methods=["GET"])
@require_auth
def list_installed():
"""
List all installed apps with their current status.
Returns:
List of installed apps with status info.
"""
try:
apps = oci_manager.list_installed_apps()
return jsonify({
"success": True,
"instances": apps
})
except Exception as e:
logger.error(f"Failed to list installed apps: {e}")
return jsonify({
"success": False,
"message": str(e)
}), 500
@oci_bp.route("/installed/<app_id>", methods=["GET"])
@require_auth
def get_installed_app(app_id: str):
"""
Get details of an installed app including current status.
Args:
app_id: The app identifier
Returns:
Installed app details with container info and status.
"""
try:
app = oci_manager.get_installed_app(app_id)
if not app:
return jsonify({
"success": False,
"message": f"App '{app_id}' is not installed"
}), 404
return jsonify({
"success": True,
"instance": app
})
except Exception as e:
logger.error(f"Failed to get installed app: {e}")
return jsonify({
"success": False,
"message": str(e)
}), 500
@oci_bp.route("/installed/<app_id>/logs", methods=["GET"])
@require_auth
def get_app_logs(app_id: str):
"""
Get recent logs from an app's container.
Args:
app_id: The app identifier
Query params:
lines: Number of lines to return (default 100)
Returns:
Container logs.
"""
try:
lines = request.args.get("lines", 100, type=int)
result = oci_manager.get_app_logs(app_id, lines=lines)
if not result.get("success"):
return jsonify(result), 404 if "not installed" in result.get("message", "") else 500
return jsonify(result)
except Exception as e:
logger.error(f"Failed to get app logs: {e}")
return jsonify({
"success": False,
"message": str(e)
}), 500
# =================================================================
# Deployment Endpoint
# =================================================================
@oci_bp.route("/deploy", methods=["POST"])
@require_auth
def deploy_app():
"""
Deploy an OCI app with the given configuration.
Body:
{
"app_id": "secure-gateway",
"config": {
"auth_key": "tskey-auth-xxx",
"hostname": "proxmox-gateway",
...
}
}
Returns:
Deployment result with container ID if successful.
"""
try:
data = request.get_json()
if not data:
return jsonify({
"success": False,
"message": "Request body is required"
}), 400
app_id = data.get("app_id")
config = data.get("config", {})
if not app_id:
return jsonify({
"success": False,
"message": "app_id is required"
}), 400
logger.info(f"Deploy request: app_id={app_id}, config_keys={list(config.keys())}")
result = oci_manager.deploy_app(app_id, config, installed_by="web")
logger.info(f"Deploy result: {result}")
status_code = 200 if result.get("success") else 400
return jsonify(result), status_code
except Exception as e:
logger.error(f"Failed to deploy app: {e}")
return jsonify({
"success": False,
"message": str(e)
}), 500
# =================================================================
# Lifecycle Action Endpoints
# =================================================================
@oci_bp.route("/installed/<app_id>/start", methods=["POST"])
@require_auth
def start_app(app_id: str):
"""Start an installed app's container."""
try:
result = oci_manager.start_app(app_id)
status_code = 200 if result.get("success") else 400
return jsonify(result), status_code
except Exception as e:
logger.error(f"Failed to start app: {e}")
return jsonify({
"success": False,
"message": str(e)
}), 500
@oci_bp.route("/installed/<app_id>/stop", methods=["POST"])
@require_auth
def stop_app(app_id: str):
"""Stop an installed app's container."""
try:
result = oci_manager.stop_app(app_id)
status_code = 200 if result.get("success") else 400
return jsonify(result), status_code
except Exception as e:
logger.error(f"Failed to stop app: {e}")
return jsonify({
"success": False,
"message": str(e)
}), 500
@oci_bp.route("/installed/<app_id>/restart", methods=["POST"])
@require_auth
def restart_app(app_id: str):
"""Restart an installed app's container."""
try:
result = oci_manager.restart_app(app_id)
status_code = 200 if result.get("success") else 400
return jsonify(result), status_code
except Exception as e:
logger.error(f"Failed to restart app: {e}")
return jsonify({
"success": False,
"message": str(e)
}), 500
@oci_bp.route("/installed/<app_id>", methods=["DELETE"])
@require_auth
def remove_app(app_id: str):
"""
Remove an installed app.
Query params:
remove_data: If true, also remove persistent data (default false)
"""
try:
remove_data = request.args.get("remove_data", "false").lower() == "true"
result = oci_manager.remove_app(app_id, remove_data=remove_data)
status_code = 200 if result.get("success") else 400
return jsonify(result), status_code
except Exception as e:
logger.error(f"Failed to remove app: {e}")
return jsonify({
"success": False,
"message": str(e)
}), 500
# =================================================================
# Configuration Update Endpoint
# =================================================================
@oci_bp.route("/installed/<app_id>/config", methods=["PUT"])
@require_auth
def update_app_config(app_id: str):
"""
Update an app's configuration and recreate the container.
Body:
{
"config": { ... new config values ... }
}
"""
try:
data = request.get_json()
if not data or "config" not in data:
return jsonify({
"success": False,
"message": "config is required in request body"
}), 400
result = oci_manager.update_app_config(app_id, data["config"])
status_code = 200 if result.get("success") else 400
return jsonify(result), status_code
except Exception as e:
logger.error(f"Failed to update app config: {e}")
return jsonify({
"success": False,
"message": str(e)
}), 500
# =================================================================
# Utility Endpoints
# =================================================================
@oci_bp.route("/networks", methods=["GET"])
@require_auth
def get_networks():
"""
Get available networks for VPN routing.
Returns:
List of detected network interfaces with their subnets.
"""
try:
networks = oci_manager.detect_networks()
return jsonify({
"success": True,
"networks": networks
})
except Exception as e:
logger.error(f"Failed to detect networks: {e}")
return jsonify({
"success": False,
"message": str(e)
}), 500
@oci_bp.route("/runtime", methods=["GET"])
@require_auth
def get_runtime():
"""
Get container runtime information.
Returns:
Runtime type (podman/docker), version, and availability.
"""
try:
runtime_info = oci_manager.detect_runtime()
return jsonify({
"success": True,
**runtime_info
})
except Exception as e:
logger.error(f"Failed to detect runtime: {e}")
return jsonify({
"success": False,
"message": str(e)
}), 500
@oci_bp.route("/runtime/install-script", methods=["GET"])
@require_auth
def get_runtime_install_script():
"""
Get the path to the runtime installation script.
Returns:
Script path for installing Podman.
"""
import os
# Check possible paths for the install script
possible_paths = [
"/usr/local/share/proxmenux/scripts/oci/install_runtime.sh",
os.path.join(os.path.dirname(__file__), "..", "..", "Scripts", "oci", "install_runtime.sh"),
]
for script_path in possible_paths:
if os.path.exists(script_path):
return jsonify({
"success": True,
"script_path": os.path.abspath(script_path)
})
return jsonify({
"success": False,
"message": "Runtime installation script not found"
}), 404
@oci_bp.route("/status/<app_id>", methods=["GET"])
@require_auth
def get_app_status(app_id: str):
"""
Get the current status of an app's container.
Returns:
Container state, health, and uptime.
"""
try:
status = oci_manager.get_app_status(app_id)
return jsonify({
"success": True,
"app_id": app_id,
"status": status
})
except Exception as e:
logger.error(f"Failed to get app status: {e}")
return jsonify({
"success": False,
"message": str(e)
}), 500
@oci_bp.route("/installed/<app_id>/update-auth-key", methods=["POST"])
@require_auth
def update_auth_key(app_id: str):
"""
Update the Tailscale auth key for an installed gateway.
This is useful when the auth key expires and the gateway needs to re-authenticate.
Body:
{
"auth_key": "tskey-auth-xxx"
}
Returns:
Success status and message.
"""
try:
data = request.get_json()
if not data or "auth_key" not in data:
return jsonify({
"success": False,
"message": "auth_key is required in request body"
}), 400
auth_key = data["auth_key"]
if not auth_key.startswith("tskey-"):
return jsonify({
"success": False,
"message": "Invalid auth key format. Should start with 'tskey-'"
}), 400
result = oci_manager.update_auth_key(app_id, auth_key)
status_code = 200 if result.get("success") else 400
return jsonify(result), status_code
except Exception as e:
logger.error(f"Failed to update auth key: {e}")
return jsonify({
"success": False,
"message": str(e)
}), 500
+247 -26
View File
@@ -1,33 +1,198 @@
from flask import Blueprint, jsonify
from flask import Blueprint, jsonify, request
import json
import os
import re
proxmenux_bp = Blueprint('proxmenux', __name__)
# Tool descriptions mapping
TOOL_DESCRIPTIONS = {
'lvm_repair': 'LVM PV Headers Repair',
'repo_cleanup': 'Repository Cleanup',
'subscription_banner': 'Subscription Banner Removal',
'time_sync': 'Time Synchronization',
'apt_languages': 'APT Language Skip',
'journald': 'Journald Optimization',
'logrotate': 'Logrotate Optimization',
'system_limits': 'System Limits Increase',
'entropy': 'Entropy Generation (haveged)',
'memory_settings': 'Memory Settings Optimization',
'kernel_panic': 'Kernel Panic Configuration',
'apt_ipv4': 'APT IPv4 Force',
'kexec': 'kexec for quick reboots',
'network_optimization': 'Network Optimizations',
'bashrc_custom': 'Bashrc Customization',
'figurine': 'Figurine',
'fastfetch': 'Fastfetch',
'log2ram': 'Log2ram (SSD Protection)',
'amd_fixes': 'AMD CPU (Ryzen/EPYC) fixes',
'persistent_network': 'Setting persistent network interfaces'
# Tool metadata: description, function name in bash script, and version
# version: current version of the optimization function
# function: the bash function name that implements this optimization
TOOL_METADATA = {
'subscription_banner': {'name': 'Subscription Banner Removal', 'function': 'remove_subscription_banner', 'version': '1.0'},
'time_sync': {'name': 'Time Synchronization', 'function': 'configure_time_sync', 'version': '1.0'},
'apt_languages': {'name': 'APT Language Skip', 'function': 'skip_apt_languages', 'version': '1.0'},
'journald': {'name': 'Journald Optimization', 'function': 'optimize_journald', 'version': '1.1'},
'logrotate': {'name': 'Logrotate Optimization', 'function': 'optimize_logrotate', 'version': '1.1'},
'system_limits': {'name': 'System Limits Increase', 'function': 'increase_system_limits', 'version': '1.1'},
# entropy removed — modern kernels 5.6+ have built-in entropy generation, haveged no longer needed
'memory_settings': {'name': 'Memory Settings Optimization', 'function': 'optimize_memory_settings', 'version': '1.1'},
'kernel_panic': {'name': 'Kernel Panic Configuration', 'function': 'configure_kernel_panic', 'version': '1.0'},
'apt_ipv4': {'name': 'APT IPv4 Force', 'function': 'force_apt_ipv4', 'version': '1.0'},
'kexec': {'name': 'kexec for quick reboots', 'function': 'enable_kexec', 'version': '1.0'},
'network_optimization': {'name': 'Network Optimizations', 'function': 'apply_network_optimizations', 'version': '1.0'},
'bashrc_custom': {'name': 'Bashrc Customization', 'function': 'customize_bashrc', 'version': '1.0'},
'figurine': {'name': 'Figurine', 'function': 'configure_figurine', 'version': '1.0'},
'fastfetch': {'name': 'Fastfetch', 'function': 'configure_fastfetch', 'version': '1.0'},
'log2ram': {'name': 'Log2ram (SSD Protection)', 'function': 'configure_log2ram', 'version': '1.0'},
'amd_fixes': {'name': 'AMD CPU (Ryzen/EPYC) fixes', 'function': 'apply_amd_fixes', 'version': '1.0'},
'persistent_network': {'name': 'Setting persistent network interfaces', 'function': 'setup_persistent_network', 'version': '1.0'},
'vfio_iommu': {'name': 'VFIO/IOMMU Passthrough', 'function': 'enable_vfio_iommu', 'version': '1.0'},
'lvm_repair': {'name': 'LVM PV Headers Repair', 'function': 'repair_lvm_headers', 'version': '1.0'},
'repo_cleanup': {'name': 'Repository Cleanup', 'function': 'cleanup_repos', 'version': '1.0'},
# ── Legacy / Deprecated entries ──
# These optimizations were applied by previous ProxMenux versions but are
# no longer needed or have been removed from the current scripts. We still
# expose their source code for transparency with existing users.
'entropy': {'name': 'Entropy Generation (haveged)', 'function': 'configure_entropy', 'version': '1.0', 'deprecated': True},
}
# Backward-compatible description mapping (used by get_installed_tools)
TOOL_DESCRIPTIONS = {k: v['name'] for k, v in TOOL_METADATA.items()}
# Source code preserved for deprecated/removed optimization functions.
# When a function is removed from the active bash scripts (because it's
# no longer needed, e.g. obsoleted by kernel improvements), keep its code
# here so users who installed it in the past can still inspect what ran.
DEPRECATED_SOURCES = {
'configure_entropy': {
'script': 'customizable_post_install.sh (legacy)',
'source': '''# ─────────────────────────────────────────────────────────────────
# NOTE: This optimization has been REMOVED from current ProxMenux versions.
# Modern Linux kernels (5.6+, shipped with Proxmox VE 7.x and 8.x) include
# built-in entropy generation via the Jitter RNG and CRNG, making haveged
# unnecessary. The function below is preserved here for transparency so
# users who applied it in the past can see exactly what was installed.
# New ProxMenux installations no longer include this optimization.
# ─────────────────────────────────────────────────────────────────
configure_entropy() {
msg_info2 "$(translate "Configuring entropy generation to prevent slowdowns...")"
# Install haveged
msg_info "$(translate "Installing haveged...")"
/usr/bin/env DEBIAN_FRONTEND=noninteractive apt-get -y -o Dpkg::Options::='--force-confdef' install haveged > /dev/null 2>&1
msg_ok "$(translate "haveged installed successfully")"
# Configure haveged
msg_info "$(translate "Configuring haveged...")"
cat <<EOF > /etc/default/haveged
# -w sets low entropy watermark (in bits)
DAEMON_ARGS="-w 1024"
EOF
# Reload systemd daemon
systemctl daemon-reload > /dev/null 2>&1
# Enable haveged service
systemctl enable haveged > /dev/null 2>&1
msg_ok "$(translate "haveged service enabled successfully")"
register_tool "entropy" true
msg_success "$(translate "Entropy generation configuration completed")"
}
''',
},
}
# Scripts to search for function source code (in order of preference)
_SCRIPT_PATHS = [
'/usr/local/share/proxmenux/scripts/post_install/customizable_post_install.sh',
'/usr/local/share/proxmenux/scripts/post_install/auto_post_install.sh',
]
def _extract_bash_function(function_name: str) -> dict:
"""Extract a bash function's source code.
Checks DEPRECATED_SOURCES first (for functions removed from active scripts),
then searches the live bash scripts for `function_name() {` and captures
everything until the matching closing `}`, respecting brace nesting.
Returns {'source': str, 'script': str, 'line_start': int, 'line_end': int}
or {'source': '', 'error': '...'} on failure.
"""
# Check preserved deprecated source code first
if function_name in DEPRECATED_SOURCES:
entry = DEPRECATED_SOURCES[function_name]
source = entry['source']
return {
'source': source,
'script': entry['script'],
'line_start': 1,
'line_end': len(source.split('\n')),
}
for script_path in _SCRIPT_PATHS:
if not os.path.isfile(script_path):
continue
try:
with open(script_path, 'r') as f:
lines = f.readlines()
# Find function start: "function_name() {" or "function_name () {"
pattern = re.compile(rf'^{re.escape(function_name)}\s*\(\)\s*\{{')
start_idx = None
for i, line in enumerate(lines):
if pattern.match(line):
start_idx = i
break
if start_idx is None:
continue # Try next script
# Capture until the closing } at indent level 0
brace_depth = 0
end_idx = start_idx
for i in range(start_idx, len(lines)):
brace_depth += lines[i].count('{') - lines[i].count('}')
if brace_depth <= 0:
end_idx = i
break
source = ''.join(lines[start_idx:end_idx + 1])
script_name = os.path.basename(script_path)
return {
'source': source,
'script': script_name,
'line_start': start_idx + 1,
'line_end': end_idx + 1,
}
except Exception:
continue
return {'source': '', 'error': 'Function not found in available scripts'}
@proxmenux_bp.route('/api/proxmenux/update-status', methods=['GET'])
def get_update_status():
"""Get ProxMenux update availability status from config.json"""
config_path = '/usr/local/share/proxmenux/config.json'
try:
if not os.path.exists(config_path):
return jsonify({
'success': True,
'update_available': {
'stable': False,
'stable_version': '',
'beta': False,
'beta_version': ''
}
})
with open(config_path, 'r') as f:
config = json.load(f)
update_status = config.get('update_available', {
'stable': False,
'stable_version': '',
'beta': False,
'beta_version': ''
})
return jsonify({
'success': True,
'update_available': update_status
})
except Exception as e:
return jsonify({
'success': False,
'error': str(e)
}), 500
@proxmenux_bp.route('/api/proxmenux/installed-tools', methods=['GET'])
def get_installed_tools():
"""Get list of installed ProxMenux tools/optimizations"""
@@ -44,14 +209,18 @@ def get_installed_tools():
with open(installed_tools_path, 'r') as f:
data = json.load(f)
# Convert to list format with descriptions
# Convert to list format with descriptions and version
tools = []
for tool_key, enabled in data.items():
if enabled: # Only include enabled tools
meta = TOOL_METADATA.get(tool_key, {})
tools.append({
'key': tool_key,
'name': TOOL_DESCRIPTIONS.get(tool_key, tool_key.replace('_', ' ').title()),
'enabled': enabled
'name': meta.get('name', tool_key.replace('_', ' ').title()),
'enabled': enabled,
'version': meta.get('version', '1.0'),
'has_source': bool(meta.get('function')),
'deprecated': bool(meta.get('deprecated', False)),
})
# Sort alphabetically by name
@@ -73,3 +242,55 @@ def get_installed_tools():
'success': False,
'error': str(e)
}), 500
@proxmenux_bp.route('/api/proxmenux/tool-source/<tool_key>', methods=['GET'])
def get_tool_source(tool_key):
"""Get the bash source code of a specific optimization function.
Returns the function body extracted from the post-install scripts,
so users can see exactly what code was executed on their server.
"""
try:
meta = TOOL_METADATA.get(tool_key)
if not meta:
return jsonify({
'success': False,
'error': f'Unknown tool: {tool_key}'
}), 404
func_name = meta.get('function')
if not func_name:
return jsonify({
'success': False,
'error': f'No function mapping for {tool_key}'
}), 404
result = _extract_bash_function(func_name)
if not result.get('source'):
return jsonify({
'success': False,
'error': result.get('error', 'Source code not available'),
'tool': tool_key,
'function': func_name,
}), 404
return jsonify({
'success': True,
'tool': tool_key,
'name': meta['name'],
'version': meta.get('version', '1.0'),
'deprecated': bool(meta.get('deprecated', False)),
'function': func_name,
'source': result['source'],
'script': result['script'],
'line_start': result['line_start'],
'line_end': result['line_end'],
})
except Exception as e:
return jsonify({
'success': False,
'error': str(e)
}), 500
+352
View File
@@ -0,0 +1,352 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
ProxMenux Security Routes
Flask blueprint for firewall management and security tool detection.
"""
from flask import Blueprint, jsonify, request
security_bp = Blueprint('security', __name__)
try:
import security_manager
except ImportError:
security_manager = None
# -------------------------------------------------------------------
# Proxmox Firewall
# -------------------------------------------------------------------
@security_bp.route('/api/security/firewall/status', methods=['GET'])
def firewall_status():
"""Get Proxmox firewall status, rules, and port 8008 status"""
if not security_manager:
return jsonify({"success": False, "message": "Security manager not available"}), 500
try:
status = security_manager.get_firewall_status()
return jsonify({"success": True, **status})
except Exception as e:
return jsonify({"success": False, "message": str(e)}), 500
@security_bp.route('/api/security/firewall/enable', methods=['POST'])
def firewall_enable():
"""Enable Proxmox firewall at host or cluster level"""
if not security_manager:
return jsonify({"success": False, "message": "Security manager not available"}), 500
try:
data = request.json or {}
level = data.get("level", "host")
success, message = security_manager.enable_firewall(level)
return jsonify({"success": success, "message": message})
except Exception as e:
return jsonify({"success": False, "message": str(e)}), 500
@security_bp.route('/api/security/firewall/disable', methods=['POST'])
def firewall_disable():
"""Disable Proxmox firewall at host or cluster level"""
if not security_manager:
return jsonify({"success": False, "message": "Security manager not available"}), 500
try:
data = request.json or {}
level = data.get("level", "host")
success, message = security_manager.disable_firewall(level)
return jsonify({"success": success, "message": message})
except Exception as e:
return jsonify({"success": False, "message": str(e)}), 500
@security_bp.route('/api/security/firewall/rules', methods=['POST'])
def firewall_add_rule():
"""Add a custom firewall rule"""
if not security_manager:
return jsonify({"success": False, "message": "Security manager not available"}), 500
try:
data = request.json or {}
success, message = security_manager.add_firewall_rule(
direction=data.get("direction", "IN"),
action=data.get("action", "ACCEPT"),
protocol=data.get("protocol", "tcp"),
dport=data.get("dport", ""),
sport=data.get("sport", ""),
source=data.get("source", ""),
dest=data.get("dest", ""),
iface=data.get("iface", ""),
comment=data.get("comment", ""),
level=data.get("level", "host"),
)
if success:
return jsonify({"success": True, "message": message})
else:
return jsonify({"success": False, "message": message}), 400
except Exception as e:
return jsonify({"success": False, "message": str(e)}), 500
@security_bp.route('/api/security/firewall/rules', methods=['DELETE'])
def firewall_delete_rule():
"""Delete a firewall rule by index"""
if not security_manager:
return jsonify({"success": False, "message": "Security manager not available"}), 500
try:
data = request.json or {}
rule_index = data.get("rule_index")
level = data.get("level", "host")
if rule_index is None:
return jsonify({"success": False, "message": "rule_index is required"}), 400
success, message = security_manager.delete_firewall_rule(int(rule_index), level)
if success:
return jsonify({"success": True, "message": message})
else:
return jsonify({"success": False, "message": message}), 400
except Exception as e:
return jsonify({"success": False, "message": str(e)}), 500
@security_bp.route('/api/security/firewall/rules/edit', methods=['PUT'])
def firewall_edit_rule():
"""Edit an existing firewall rule (delete old + insert new at same position)"""
if not security_manager:
return jsonify({"success": False, "message": "Security manager not available"}), 500
try:
data = request.json or {}
rule_index = data.get("rule_index")
level = data.get("level", "host")
new_rule = data.get("new_rule", {})
if rule_index is None:
return jsonify({"success": False, "message": "rule_index is required"}), 400
success, message = security_manager.edit_firewall_rule(
rule_index=int(rule_index),
level=level,
direction=new_rule.get("direction", "IN"),
action=new_rule.get("action", "ACCEPT"),
protocol=new_rule.get("protocol", "tcp"),
dport=new_rule.get("dport", ""),
sport=new_rule.get("sport", ""),
source=new_rule.get("source", ""),
iface=new_rule.get("iface", ""),
comment=new_rule.get("comment", ""),
)
if success:
return jsonify({"success": True, "message": message})
else:
return jsonify({"success": False, "message": message}), 400
except Exception as e:
return jsonify({"success": False, "message": str(e)}), 500
@security_bp.route('/api/security/firewall/monitor-port', methods=['POST'])
def firewall_add_monitor_port():
"""Add firewall rule to allow port 8008 for ProxMenux Monitor"""
if not security_manager:
return jsonify({"success": False, "message": "Security manager not available"}), 500
try:
success, message = security_manager.add_monitor_port_rule()
return jsonify({"success": success, "message": message})
except Exception as e:
return jsonify({"success": False, "message": str(e)}), 500
@security_bp.route('/api/security/firewall/monitor-port', methods=['DELETE'])
def firewall_remove_monitor_port():
"""Remove the ProxMenux Monitor port 8008 rule"""
if not security_manager:
return jsonify({"success": False, "message": "Security manager not available"}), 500
try:
success, message = security_manager.remove_monitor_port_rule()
return jsonify({"success": success, "message": message})
except Exception as e:
return jsonify({"success": False, "message": str(e)}), 500
# -------------------------------------------------------------------
# Fail2Ban Detailed Management
# -------------------------------------------------------------------
@security_bp.route('/api/security/fail2ban/details', methods=['GET'])
def fail2ban_details():
"""Get detailed Fail2Ban info: per-jail banned IPs, stats, config"""
if not security_manager:
return jsonify({"success": False, "message": "Security manager not available"}), 500
try:
details = security_manager.get_fail2ban_details()
return jsonify({"success": True, **details})
except Exception as e:
return jsonify({"success": False, "message": str(e)}), 500
@security_bp.route('/api/security/fail2ban/unban', methods=['POST'])
def fail2ban_unban():
"""Unban a specific IP from a Fail2Ban jail"""
if not security_manager:
return jsonify({"success": False, "message": "Security manager not available"}), 500
try:
data = request.json or {}
jail = data.get("jail", "")
ip = data.get("ip", "")
success, message = security_manager.unban_ip(jail, ip)
if success:
return jsonify({"success": True, "message": message})
else:
return jsonify({"success": False, "message": message}), 400
except Exception as e:
return jsonify({"success": False, "message": str(e)}), 500
@security_bp.route('/api/security/fail2ban/jail/config', methods=['PUT'])
def fail2ban_jail_config():
"""Update jail configuration (maxretry, bantime, findtime)"""
if not security_manager:
return jsonify({"success": False, "message": "Security manager not available"}), 500
try:
data = request.json or {}
jail = data.get("jail", "")
if not jail:
return jsonify({"success": False, "message": "Jail name is required"}), 400
success, message = security_manager.update_jail_config(
jail,
maxretry=data.get("maxretry"),
bantime=data.get("bantime"),
findtime=data.get("findtime"),
)
if success:
return jsonify({"success": True, "message": message})
else:
return jsonify({"success": False, "message": message}), 400
except Exception as e:
return jsonify({"success": False, "message": str(e)}), 500
@security_bp.route('/api/security/fail2ban/apply-jails', methods=['POST'])
def fail2ban_apply_jails():
"""Apply missing Fail2Ban jails (proxmox, proxmenux)"""
if not security_manager:
return jsonify({"success": False, "message": "Security manager not available"}), 500
try:
success, message, applied = security_manager.apply_missing_jails()
return jsonify({"success": success, "message": message, "applied": applied})
except Exception as e:
return jsonify({"success": False, "message": str(e)}), 500
@security_bp.route('/api/security/fail2ban/activity', methods=['GET'])
def fail2ban_activity():
"""Get recent Fail2Ban log activity"""
if not security_manager:
return jsonify({"success": False, "message": "Security manager not available"}), 500
try:
events = security_manager.get_fail2ban_recent_activity()
return jsonify({"success": True, "events": events})
except Exception as e:
return jsonify({"success": False, "message": str(e)}), 500
# -------------------------------------------------------------------
# Lynis Audit
# -------------------------------------------------------------------
@security_bp.route('/api/security/lynis/run', methods=['POST'])
def lynis_run_audit():
"""Start a Lynis audit (runs in background)"""
if not security_manager:
return jsonify({"success": False, "message": "Security manager not available"}), 500
try:
success, message = security_manager.run_lynis_audit()
return jsonify({"success": success, "message": message})
except Exception as e:
return jsonify({"success": False, "message": str(e)}), 500
@security_bp.route('/api/security/lynis/status', methods=['GET'])
def lynis_audit_status():
"""Get Lynis audit running status"""
if not security_manager:
return jsonify({"success": False, "message": "Security manager not available"}), 500
try:
status = security_manager.get_lynis_audit_status()
return jsonify({"success": True, **status})
except Exception as e:
return jsonify({"success": False, "message": str(e)}), 500
@security_bp.route('/api/security/lynis/report', methods=['GET'])
def lynis_report():
"""Get parsed Lynis audit report"""
if not security_manager:
return jsonify({"success": False, "message": "Security manager not available"}), 500
try:
report = security_manager.parse_lynis_report()
if report:
return jsonify({"success": True, "report": report})
else:
return jsonify({"success": False, "message": "No report available. Run an audit first."})
except Exception as e:
return jsonify({"success": False, "message": str(e)}), 500
@security_bp.route('/api/security/lynis/report', methods=['DELETE'])
def lynis_report_delete():
"""Delete Lynis audit report files"""
if not security_manager:
return jsonify({"success": False, "message": "Security manager not available"}), 500
try:
import os
deleted = []
for f in ["/var/log/lynis-report.dat", "/var/log/lynis.log", "/var/log/lynis-output.log"]:
if os.path.isfile(f):
os.remove(f)
deleted.append(f)
if deleted:
return jsonify({"success": True, "message": f"Deleted: {', '.join(deleted)}"})
else:
return jsonify({"success": False, "message": "No report files found to delete"})
except Exception as e:
return jsonify({"success": False, "message": str(e)}), 500
# -------------------------------------------------------------------
# Security Tools Uninstall
# -------------------------------------------------------------------
@security_bp.route('/api/security/fail2ban/uninstall', methods=['POST'])
def fail2ban_uninstall():
"""Uninstall Fail2Ban and clean up configuration"""
if not security_manager:
return jsonify({"success": False, "message": "Security manager not available"}), 500
try:
success, message = security_manager.uninstall_fail2ban()
return jsonify({"success": success, "message": message})
except Exception as e:
return jsonify({"success": False, "message": str(e)}), 500
@security_bp.route('/api/security/lynis/uninstall', methods=['POST'])
def lynis_uninstall():
"""Uninstall Lynis and clean up files"""
if not security_manager:
return jsonify({"success": False, "message": "Security manager not available"}), 500
try:
success, message = security_manager.uninstall_lynis()
return jsonify({"success": success, "message": message})
except Exception as e:
return jsonify({"success": False, "message": str(e)}), 500
# -------------------------------------------------------------------
# Security Tools Detection
# -------------------------------------------------------------------
@security_bp.route('/api/security/tools', methods=['GET'])
def security_tools():
"""Detect installed security tools (Fail2Ban, Lynis, etc.)"""
if not security_manager:
return jsonify({"success": False, "message": "Security manager not available"}), 500
try:
tools = security_manager.detect_security_tools()
return jsonify({"success": True, "tools": tools})
except Exception as e:
return jsonify({"success": False, "message": str(e)}), 500
File diff suppressed because it is too large Load Diff
+17 -5
View File
@@ -181,11 +181,23 @@ def terminal_websocket(ws):
except Exception:
msg = None
if isinstance(msg, dict) and msg.get('type') == 'resize':
cols = int(msg.get('cols', 120))
rows = int(msg.get('rows', 30))
set_winsize(master_fd, rows, cols)
handled = True
if isinstance(msg, dict):
msg_type = msg.get('type')
# Handle ping messages (heartbeat to keep connection alive)
if msg_type == 'ping':
try:
ws.send(json.dumps({'type': 'pong'}))
except:
pass
handled = True
# Handle resize messages
elif msg_type == 'resize':
cols = int(msg.get('cols', 120))
rows = int(msg.get('rows', 30))
set_winsize(master_fd, rows, cols)
handled = True
if handled:
# Control message processed, do not send to bash
File diff suppressed because it is too large Load Diff
File diff suppressed because it is too large Load Diff
+929
View File
@@ -0,0 +1,929 @@
"""
ProxMenux Notification Channels
Provides transport adapters for Telegram, Gotify, and Discord.
Each channel implements send() and test() with:
- Retry with exponential backoff (3 attempts)
- Request timeout of 10s
- Rate limiting (max 30 msg/min per channel)
Author: MacRimi
"""
import json
import time
import urllib.request
import urllib.error
import urllib.parse
from abc import ABC, abstractmethod
from collections import deque
from typing import Tuple, Optional, Dict, Any
# ─── Rate Limiter ────────────────────────────────────────────────
class RateLimiter:
"""Token-bucket rate limiter: max N messages per window."""
def __init__(self, max_calls: int = 30, window_seconds: int = 60):
self.max_calls = max_calls
self.window = window_seconds
self._timestamps: deque = deque()
def allow(self) -> bool:
now = time.monotonic()
while self._timestamps and now - self._timestamps[0] > self.window:
self._timestamps.popleft()
if len(self._timestamps) >= self.max_calls:
return False
self._timestamps.append(now)
return True
def wait_time(self) -> float:
if not self._timestamps:
return 0.0
return max(0.0, self.window - (time.monotonic() - self._timestamps[0]))
# ─── Base Channel ────────────────────────────────────────────────
class NotificationChannel(ABC):
"""Abstract base for all notification channels."""
MAX_RETRIES = 3
RETRY_DELAYS = [2, 4, 8] # exponential backoff seconds
REQUEST_TIMEOUT = 10
def __init__(self):
self._rate_limiter = RateLimiter(max_calls=30, window_seconds=60)
@abstractmethod
def send(self, title: str, message: str, severity: str = 'INFO',
data: Optional[Dict] = None) -> Dict[str, Any]:
"""Send a notification. Returns {success, error, channel}."""
pass
@abstractmethod
def test(self) -> Tuple[bool, str]:
"""Send a test message. Returns (success, error_message)."""
pass
@abstractmethod
def validate_config(self) -> Tuple[bool, str]:
"""Check if config is valid without sending. Returns (valid, error)."""
pass
def _http_request(self, url: str, data: bytes, headers: Dict[str, str],
method: str = 'POST') -> Tuple[int, str]:
"""Execute HTTP request with timeout. Returns (status_code, body)."""
# Ensure User-Agent is set to avoid Cloudflare 1010 errors
if 'User-Agent' not in headers:
headers['User-Agent'] = 'ProxMenux-Monitor/1.1'
req = urllib.request.Request(url, data=data, headers=headers, method=method)
try:
with urllib.request.urlopen(req, timeout=self.REQUEST_TIMEOUT) as resp:
body = resp.read().decode('utf-8', errors='replace')
return resp.status, body
except urllib.error.HTTPError as e:
body = e.read().decode('utf-8', errors='replace') if e.fp else str(e)
return e.code, body
except urllib.error.URLError as e:
return 0, str(e.reason)
except Exception as e:
return 0, str(e)
def _send_with_retry(self, send_fn) -> Dict[str, Any]:
"""Wrap a send function with rate limiting and retry logic."""
if not self._rate_limiter.allow():
wait = self._rate_limiter.wait_time()
return {
'success': False,
'error': f'Rate limited. Retry in {wait:.0f}s',
'rate_limited': True
}
last_error = ''
for attempt in range(self.MAX_RETRIES):
try:
status, body = send_fn()
if 200 <= status < 300:
return {'success': True, 'error': None}
last_error = f'HTTP {status}: {body[:200]}'
except Exception as e:
last_error = str(e)
if attempt < self.MAX_RETRIES - 1:
time.sleep(self.RETRY_DELAYS[attempt])
return {'success': False, 'error': last_error}
# ─── Telegram ────────────────────────────────────────────────────
class TelegramChannel(NotificationChannel):
"""Telegram Bot API channel using HTML parse mode."""
API_BASE = 'https://api.telegram.org/bot{token}/sendMessage'
API_PHOTO = 'https://api.telegram.org/bot{token}/sendPhoto'
MAX_LENGTH = 4096
SEVERITY_ICONS = {
'CRITICAL': '\U0001F534', # red circle
'WARNING': '\U0001F7E1', # yellow circle
'INFO': '\U0001F535', # blue circle
'OK': '\U0001F7E2', # green circle
'UNKNOWN': '\u26AA', # white circle
}
def __init__(self, bot_token: str, chat_id: str, topic_id: str = ''):
super().__init__()
token = bot_token.strip()
# Strip 'bot' prefix if user included it (API_BASE already adds it)
if token.lower().startswith('bot') and ':' in token[3:]:
token = token[3:]
self.bot_token = token
self.chat_id = chat_id.strip()
# Topic ID for supergroups with topics enabled (message_thread_id)
self.topic_id = topic_id.strip() if topic_id else ''
def validate_config(self) -> Tuple[bool, str]:
if not self.bot_token:
return False, 'Bot token is required'
if not self.chat_id:
return False, 'Chat ID is required'
if ':' not in self.bot_token:
return False, 'Invalid bot token format (expected BOT_ID:TOKEN)'
return True, ''
def send(self, title: str, message: str, severity: str = 'INFO',
data: Optional[Dict] = None) -> Dict[str, Any]:
icon = self.SEVERITY_ICONS.get(severity, self.SEVERITY_ICONS['INFO'])
html_msg = f"<b>{icon} {self._escape_html(title)}</b>\n\n{self._escape_html(message)}"
# Split long messages
chunks = self._split_message(html_msg)
result = {'success': True, 'error': None, 'channel': 'telegram'}
for chunk in chunks:
res = self._send_with_retry(lambda c=chunk: self._post_message(c))
if not res['success']:
result = {**res, 'channel': 'telegram'}
break
return result
def send_photo(self, photo_url: str, caption: str = '') -> Dict[str, Any]:
"""Send a photo to Telegram chat."""
url = self.API_PHOTO.format(token=self.bot_token)
payload = {
'chat_id': self.chat_id,
'photo': photo_url,
}
# Add topic ID for supergroups with topics enabled
if self.topic_id:
try:
payload['message_thread_id'] = int(self.topic_id)
except ValueError:
pass
if caption:
payload['caption'] = caption[:1024] # Telegram caption limit
payload['parse_mode'] = 'HTML'
body = json.dumps(payload).encode()
headers = {'Content-Type': 'application/json'}
result = self._send_with_retry(
lambda: self._http_request(url, body, headers)
)
result['channel'] = 'telegram'
return result
def test(self) -> Tuple[bool, str]:
valid, err = self.validate_config()
if not valid:
return False, err
result = self.send(
'ProxMenux Test',
'Notification service is working correctly.\nThis is a test message from ProxMenux Monitor.',
'INFO'
)
return result['success'], result.get('error', '')
def _post_message(self, text: str) -> Tuple[int, str]:
url = self.API_BASE.format(token=self.bot_token)
payload_dict = {
'chat_id': self.chat_id,
'text': text,
'parse_mode': 'HTML',
'disable_web_page_preview': True,
}
# Add topic ID for supergroups with topics enabled
if self.topic_id:
try:
payload_dict['message_thread_id'] = int(self.topic_id)
except ValueError:
pass # Invalid topic_id, skip
payload = json.dumps(payload_dict).encode('utf-8')
return self._http_request(url, payload, {'Content-Type': 'application/json'})
def _split_message(self, text: str) -> list:
if len(text) <= self.MAX_LENGTH:
return [text]
chunks = []
while text:
if len(text) <= self.MAX_LENGTH:
chunks.append(text)
break
split_at = text.rfind('\n', 0, self.MAX_LENGTH)
if split_at == -1:
split_at = self.MAX_LENGTH
chunks.append(text[:split_at])
text = text[split_at:].lstrip('\n')
return chunks
@staticmethod
def _escape_html(text: str) -> str:
return (text
.replace('&', '&amp;')
.replace('<', '&lt;')
.replace('>', '&gt;'))
# ─── Gotify ──────────────────────────────────────────────────────
class GotifyChannel(NotificationChannel):
"""Gotify push notification channel with priority mapping."""
PRIORITY_MAP = {
'OK': 1,
'INFO': 2,
'UNKNOWN': 3,
'WARNING': 5,
'CRITICAL': 10,
}
def __init__(self, server_url: str, app_token: str):
super().__init__()
self.server_url = server_url.rstrip('/').strip()
self.app_token = app_token.strip()
def validate_config(self) -> Tuple[bool, str]:
if not self.server_url:
return False, 'Server URL is required'
if not self.app_token:
return False, 'Application token is required'
if not self.server_url.startswith(('http://', 'https://')):
return False, 'Server URL must start with http:// or https://'
return True, ''
def send(self, title: str, message: str, severity: str = 'INFO',
data: Optional[Dict] = None) -> Dict[str, Any]:
priority = self.PRIORITY_MAP.get(severity, 2)
result = self._send_with_retry(
lambda: self._post_message(title, message, priority)
)
result['channel'] = 'gotify'
return result
def test(self) -> Tuple[bool, str]:
valid, err = self.validate_config()
if not valid:
return False, err
result = self.send(
'ProxMenux Test',
'Notification service is working correctly.\nThis is a test message from ProxMenux Monitor.',
'INFO'
)
return result['success'], result.get('error', '')
def _post_message(self, title: str, message: str, priority: int) -> Tuple[int, str]:
url = f"{self.server_url}/message?token={self.app_token}"
payload = json.dumps({
'title': title,
'message': message,
'priority': priority,
'extras': {
'client::display': {'contentType': 'text/markdown'}
}
}).encode('utf-8')
return self._http_request(url, payload, {'Content-Type': 'application/json'})
# ─── Discord ─────────────────────────────────────────────────────
class DiscordChannel(NotificationChannel):
"""Discord webhook channel with color-coded embeds."""
MAX_EMBED_DESC = 2048
SEVERITY_COLORS = {
'CRITICAL': 0xED4245, # red
'WARNING': 0xFEE75C, # yellow
'INFO': 0x5865F2, # blurple
'OK': 0x57F287, # green
'UNKNOWN': 0x99AAB5, # grey
}
def __init__(self, webhook_url: str):
super().__init__()
self.webhook_url = webhook_url.strip()
def validate_config(self) -> Tuple[bool, str]:
if not self.webhook_url:
return False, 'Webhook URL is required'
if 'discord.com/api/webhooks/' not in self.webhook_url:
return False, 'Invalid Discord webhook URL'
return True, ''
def send(self, title: str, message: str, severity: str = 'INFO',
data: Optional[Dict] = None) -> Dict[str, Any]:
color = self.SEVERITY_COLORS.get(severity, 0x5865F2)
desc = message[:self.MAX_EMBED_DESC] if len(message) > self.MAX_EMBED_DESC else message
embed = {
'title': title,
'description': desc,
'color': color,
'footer': {'text': 'ProxMenux Monitor'},
'timestamp': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()),
}
# Use structured fields from render_template if available
rendered_fields = (data or {}).get('_rendered_fields', [])
if rendered_fields:
embed['fields'] = [
{'name': name, 'value': val[:1024], 'inline': True}
for name, val in rendered_fields[:25] # Discord limit: 25 fields
]
elif data:
fields = []
if data.get('category'):
fields.append({'name': 'Category', 'value': data['category'], 'inline': True})
if data.get('hostname'):
fields.append({'name': 'Host', 'value': data['hostname'], 'inline': True})
if data.get('severity'):
fields.append({'name': 'Severity', 'value': data['severity'], 'inline': True})
if fields:
embed['fields'] = fields
result = self._send_with_retry(
lambda: self._post_webhook(embed)
)
result['channel'] = 'discord'
return result
def test(self) -> Tuple[bool, str]:
valid, err = self.validate_config()
if not valid:
return False, err
result = self.send(
'ProxMenux Test',
'Notification service is working correctly.\nThis is a test message from ProxMenux Monitor.',
'INFO'
)
return result['success'], result.get('error', '')
def _post_webhook(self, embed: Dict) -> Tuple[int, str]:
payload = json.dumps({
'username': 'ProxMenux',
'embeds': [embed]
}).encode('utf-8')
return self._http_request(
self.webhook_url, payload, {'Content-Type': 'application/json'}
)
# ─── Email Channel ──────────────────────────────────────────────
class EmailChannel(NotificationChannel):
"""Email notification channel using SMTP (smtplib) or sendmail fallback.
Config keys:
host, port, username, password, tls_mode (none|starttls|ssl),
from_address, to_addresses (comma-separated), subject_prefix, timeout
"""
def __init__(self, config: Dict[str, str]):
super().__init__()
self.host = config.get('host', '')
self.port = int(config.get('port', 587) or 587)
self.username = config.get('username', '')
self.password = config.get('password', '')
self.tls_mode = config.get('tls_mode', 'starttls') # none | starttls | ssl
self.from_address = config.get('from_address', '')
self.to_addresses = self._parse_recipients(config.get('to_addresses', ''))
self.subject_prefix = config.get('subject_prefix', '[ProxMenux]')
self.timeout = int(config.get('timeout', 10) or 10)
@staticmethod
def _parse_recipients(raw) -> list:
if isinstance(raw, list):
return [a.strip() for a in raw if a.strip()]
return [addr.strip() for addr in str(raw).split(',') if addr.strip()]
def validate_config(self) -> Tuple[bool, str]:
if not self.to_addresses:
return False, 'No recipients configured'
if not self.from_address:
return False, 'No from address configured'
# Must have SMTP host OR local sendmail available
if not self.host:
import os
if not os.path.exists('/usr/sbin/sendmail'):
return False, 'No SMTP host configured and /usr/sbin/sendmail not found'
return True, ''
def send(self, title: str, message: str, severity: str = 'INFO',
data: Optional[Dict] = None) -> Dict[str, Any]:
subject = f"{self.subject_prefix} [{severity}] {title}"
def _do_send():
if self.host:
return self._send_smtp(subject, message, severity, data)
else:
return self._send_sendmail(subject, message, severity, data)
return self._send_with_retry(_do_send)
def _send_smtp(self, subject: str, body: str, severity: str,
data: Optional[Dict] = None) -> Tuple[int, str]:
import smtplib
from email.message import EmailMessage
msg = EmailMessage()
msg['Subject'] = subject
msg['From'] = self.from_address
msg['To'] = ', '.join(self.to_addresses)
msg.set_content(body)
# Add HTML alternative
html_body = self._format_html(subject, body, severity, data)
if html_body:
msg.add_alternative(html_body, subtype='html')
server = None
try:
import ssl as _ssl
if self.tls_mode == 'ssl':
ctx = _ssl.create_default_context()
server = smtplib.SMTP_SSL(self.host, self.port,
timeout=self.timeout, context=ctx)
server.ehlo()
else:
server = smtplib.SMTP(self.host, self.port, timeout=self.timeout)
server.ehlo()
if self.tls_mode == 'starttls':
ctx = _ssl.create_default_context()
server.starttls(context=ctx)
server.ehlo() # Re-identify after TLS -- server re-announces AUTH
if self.username and self.password:
server.login(self.username, self.password)
server.send_message(msg)
server.quit()
server = None
return 200, 'OK'
except smtplib.SMTPAuthenticationError as e:
return 0, f'SMTP authentication failed (check username/password or app-specific password): {e}'
except smtplib.SMTPNotSupportedError as e:
return 0, (f'SMTP AUTH not supported by server. '
f'This may mean the server requires OAuth2 or an App Password '
f'instead of regular credentials: {e}')
except smtplib.SMTPConnectError as e:
return 0, f'SMTP connection failed: {e}'
except smtplib.SMTPException as e:
return 0, f'SMTP error: {e}'
except _ssl.SSLError as e:
return 0, f'TLS/SSL error (check TLS mode and port): {e}'
except (OSError, TimeoutError) as e:
return 0, f'Connection error: {e}'
finally:
if server:
try:
server.quit()
except Exception:
pass
def _send_sendmail(self, subject: str, body: str, severity: str,
data: Optional[Dict] = None) -> Tuple[int, str]:
import os
import subprocess
from email.message import EmailMessage
sendmail = '/usr/sbin/sendmail'
if not os.path.exists(sendmail):
return 0, 'sendmail not found at /usr/sbin/sendmail'
msg = EmailMessage()
msg['Subject'] = subject
msg['From'] = self.from_address or 'proxmenux@localhost'
msg['To'] = ', '.join(self.to_addresses)
msg.set_content(body)
# Add HTML alternative
html_body = self._format_html(subject, body, severity, data)
if html_body:
msg.add_alternative(html_body, subtype='html')
try:
proc = subprocess.run(
[sendmail, '-t', '-oi'],
input=msg.as_string(), capture_output=True, text=True, timeout=30
)
if proc.returncode == 0:
return 200, 'OK'
return 0, f'sendmail failed (rc={proc.returncode}): {proc.stderr[:200]}'
except subprocess.TimeoutExpired:
return 0, 'sendmail timed out after 30s'
except Exception as e:
return 0, f'sendmail error: {e}'
# Severity -> accent colour + label
_SEV_STYLE = {
'CRITICAL': {'color': '#dc2626', 'bg': '#fef2f2', 'border': '#fecaca', 'label': 'Critical'},
'WARNING': {'color': '#d97706', 'bg': '#fffbeb', 'border': '#fde68a', 'label': 'Warning'},
'INFO': {'color': '#2563eb', 'bg': '#eff6ff', 'border': '#bfdbfe', 'label': 'Information'},
'OK': {'color': '#16a34a', 'bg': '#f0fdf4', 'border': '#bbf7d0', 'label': 'Resolved'},
}
_SEV_DEFAULT = {'color': '#6b7280', 'bg': '#f9fafb', 'border': '#e5e7eb', 'label': 'Notice'}
# Group -> human-readable section header for the email
_GROUP_LABELS = {
'vm_ct': 'Virtual Machine / Container',
'backup': 'Backup & Snapshot',
'resources': 'System Resources',
'storage': 'Storage',
'network': 'Network',
'security': 'Security',
'cluster': 'Cluster',
'services': 'System Services',
'health': 'Health Monitor',
'updates': 'System Updates',
'other': 'System Notification',
}
def _format_html(self, subject: str, body: str, severity: str,
data: Optional[Dict] = None) -> str:
"""Build a professional HTML email with structured data sections."""
import html as html_mod
import time as _time
data = data or {}
sev = self._SEV_STYLE.get(severity, self._SEV_DEFAULT)
# Determine group for section header
event_type = data.get('_event_type', '')
group = data.get('_group', 'other')
section_label = self._GROUP_LABELS.get(group, 'System Notification')
# Timestamp
ts = data.get('timestamp', '') or _time.strftime('%Y-%m-%d %H:%M:%S UTC', _time.gmtime())
# ── Build structured detail rows from known data fields ──
detail_rows = self._build_detail_rows(data, event_type, group, html_mod)
# ── Fallback: if no structured rows, render body text lines ──
if not detail_rows:
for line in body.split('\n'):
stripped = line.strip()
if not stripped:
continue
# Try to split "Label: value" patterns
if ':' in stripped:
lbl, _, val = stripped.partition(':')
if val.strip() and len(lbl) < 40:
detail_rows.append((html_mod.escape(lbl.strip()), html_mod.escape(val.strip())))
continue
detail_rows.append(('', html_mod.escape(stripped)))
# ── Render detail rows as HTML table ──
rows_html = ''
for label, value in detail_rows:
if label:
rows_html += f'''<tr>
<td style="padding:8px 12px;font-size:13px;color:#374151;font-weight:500;white-space:nowrap;vertical-align:top;border-bottom:1px solid #e5e7eb;">{label}</td>
<td style="padding:8px 12px;font-size:13px;color:#111827;border-bottom:1px solid #e5e7eb;">{value}</td>
</tr>'''
else:
# Full-width row (no label, just description text)
rows_html += f'''<tr>
<td colspan="2" style="padding:8px 12px;font-size:13px;color:#1f2937;border-bottom:1px solid #e5e7eb;">{value}</td>
</tr>'''
# ── Reason / details block (long text, displayed separately) ──
reason = data.get('reason', '')
reason_html = ''
if reason and len(reason) > 80:
reason_html = f'''
<div style="margin:16px 0 0;padding:12px 16px;border:1px solid #d1d5db;border-radius:6px;">
<p style="margin:0 0 4px;font-size:11px;font-weight:600;color:#374151;text-transform:uppercase;letter-spacing:0.05em;">Details</p>
<p style="margin:0;font-size:13px;color:#1f2937;line-height:1.6;white-space:pre-wrap;">{html_mod.escape(reason)}</p>
</div>'''
# ── Clean subject for display (remove prefix if present) ──
display_title = subject
for prefix in [self.subject_prefix, '[CRITICAL]', '[WARNING]', '[INFO]', '[OK]']:
display_title = display_title.replace(prefix, '').strip()
return f'''<!DOCTYPE html>
<html lang="en">
<head><meta charset="utf-8"><meta name="viewport" content="width=device-width,initial-scale=1.0"></head>
<body style="margin:0;padding:0;background-color:#f3f4f6;font-family:-apple-system,BlinkMacSystemFont,'Segoe UI',Roboto,'Helvetica Neue',Arial,sans-serif;">
<div style="max-width:640px;margin:24px auto;background:#ffffff;border-radius:8px;overflow:hidden;box-shadow:0 1px 3px rgba(0,0,0,0.1);border:1px solid #d1d5db;">
<!-- Header -->
<div style="padding:20px 28px;background:#f8f9fa;border-bottom:1px solid {sev['border']};">
<table width="100%" cellpadding="0" cellspacing="0" border="0">
<tr>
<td>
<h1 style="margin:0;font-size:18px;font-weight:700;color:#111827;letter-spacing:-0.02em;">ProxMenux Monitor</h1>
<p style="margin:4px 0 0;font-size:12px;color:#4b5563;">{html_mod.escape(section_label)} Report</p>
</td>
<td style="text-align:right;vertical-align:top;">
<span style="display:inline-block;padding:4px 12px;border-radius:4px;font-size:11px;font-weight:600;letter-spacing:0.05em;color:{sev['color']};background:{sev['bg']};border:1px solid {sev['border']};">{sev['label'].upper()}</span>
</td>
</tr>
</table>
</div>
<!-- Title bar -->
<div style="padding:16px 28px;background:{sev['bg']};border-bottom:1px solid {sev['border']};">
<h2 style="margin:0;font-size:15px;font-weight:600;color:{sev['color']};">{html_mod.escape(display_title)}</h2>
</div>
<!-- Body -->
<div style="padding:24px 28px;">
<!-- Metadata -->
<table width="100%" cellpadding="0" cellspacing="0" border="0" style="margin-bottom:16px;">
<tr>
<td style="font-size:12px;color:#4b5563;">
Host: <strong style="color:#111827;">{html_mod.escape(data.get('hostname', ''))}</strong>
</td>
<td style="font-size:12px;color:#4b5563;text-align:right;">
{html_mod.escape(ts)}
</td>
</tr>
</table>
<!-- Detail table -->
<table width="100%" cellpadding="0" cellspacing="0" border="0" style="border:1px solid #d1d5db;border-radius:6px;overflow:hidden;">
{rows_html}
</table>
{reason_html}
</div>
<!-- Footer -->
<div style="padding:14px 28px;border-top:1px solid #d1d5db;">
<table width="100%" cellpadding="0" cellspacing="0" border="0">
<tr>
<td style="font-size:11px;color:#4b5563;">ProxMenux Notification Service</td>
<td style="font-size:11px;color:#4b5563;text-align:right;">proxmenux.com</td>
</tr>
</table>
</div>
</div>
</body>
</html>'''
@staticmethod
def _build_detail_rows(data: Dict, event_type: str, group: str,
html_mod) -> list:
"""Build structured (label, value) rows from event data.
Returns list of (label_html, value_html) tuples.
An empty label means a full-width descriptive row.
"""
esc = html_mod.escape
rows = []
def _add(label: str, value, fmt: str = ''):
"""Add a row if value is truthy."""
v = str(value).strip() if value else ''
if not v or v == '0' and label not in ('Failures',):
return
if fmt == 'severity':
sev_colors = {
'CRITICAL': '#dc2626', 'WARNING': '#d97706',
'INFO': '#2563eb', 'OK': '#16a34a',
}
c = sev_colors.get(v, '#6b7280')
rows.append((esc(label), f'<span style="color:{c};font-weight:600;">{esc(v)}</span>'))
elif fmt == 'code':
rows.append((esc(label), f'<code style="padding:2px 6px;background:#f3f4f6;border-radius:3px;font-family:monospace;font-size:12px;">{esc(v)}</code>'))
elif fmt == 'bold':
rows.append((esc(label), f'<strong>{esc(v)}</strong>'))
else:
rows.append((esc(label), esc(v)))
# ── Common fields present in most events ──
# ── VM / CT events ──
if group == 'vm_ct':
_add('VM/CT ID', data.get('vmid'), 'code')
_add('Name', data.get('vmname'), 'bold')
_add('Action', event_type.replace('_', ' ').replace('vm ', 'VM ').replace('ct ', 'CT ').title())
_add('Target Node', data.get('target_node'))
_add('Reason', data.get('reason'))
# ── Backup events ──
elif group == 'backup':
_add('VM/CT ID', data.get('vmid'), 'code')
_add('Name', data.get('vmname'), 'bold')
_add('Status', 'Failed' if 'fail' in event_type else 'Completed' if 'complete' in event_type else 'Started',
'severity' if 'fail' in event_type else '')
_add('Size', data.get('size'))
_add('Duration', data.get('duration'))
_add('Snapshot', data.get('snapshot_name'), 'code')
# For backup_complete/fail with parsed body, add short reason only
reason = data.get('reason', '')
if reason and len(reason) <= 80:
_add('Details', reason)
# ── Resources ──
elif group == 'resources':
_add('Metric', event_type.replace('_', ' ').title())
_add('Current Value', data.get('value'), 'bold')
_add('Threshold', data.get('threshold'))
_add('CPU Cores', data.get('cores'))
_add('Memory', f"{data.get('used', '')} / {data.get('total', '')}" if data.get('used') else '')
_add('Temperature', f"{data.get('value')}C" if 'temp' in event_type else '')
# ── Storage ──
elif group == 'storage':
if 'disk_space' in event_type:
_add('Mount Point', data.get('mount'), 'code')
_add('Usage', f"{data.get('used')}%", 'bold')
_add('Available', data.get('available'))
elif 'io_error' in event_type:
_add('Device', data.get('device'), 'code')
_add('Severity', data.get('severity', ''), 'severity')
elif 'unavailable' in event_type:
_add('Storage Name', data.get('storage_name'), 'bold')
_add('Type', data.get('storage_type'), 'code')
reason = data.get('reason', '')
if reason and len(reason) <= 80:
_add('Details', reason)
# ── Network ──
elif group == 'network':
_add('Interface', data.get('interface'), 'code')
_add('Latency', f"{data.get('value')}ms" if data.get('value') else '')
_add('Threshold', f"{data.get('threshold')}ms" if data.get('threshold') else '')
reason = data.get('reason', '')
if reason and len(reason) <= 80:
_add('Details', reason)
# ── Security ──
elif group == 'security':
_add('Event', event_type.replace('_', ' ').title())
_add('Source IP', data.get('source_ip'), 'code')
_add('Username', data.get('username'), 'code')
_add('Service', data.get('service'))
_add('Jail', data.get('jail'), 'code')
_add('Failures', data.get('failures'))
_add('Change', data.get('change_details'))
# ── Cluster ──
elif group == 'cluster':
_add('Event', event_type.replace('_', ' ').title())
_add('Node', data.get('node_name'), 'bold')
_add('Quorum', data.get('quorum'))
_add('Nodes Affected', data.get('entity_list'))
# ── Services ──
elif group == 'services':
_add('Service', data.get('service_name'), 'code')
_add('Process', data.get('process'), 'code')
_add('Event', event_type.replace('_', ' ').title())
reason = data.get('reason', '')
if reason and len(reason) <= 80:
_add('Details', reason)
# ── Health monitor ──
elif group == 'health':
_add('Category', data.get('category'), 'bold')
_add('Severity', data.get('severity', ''), 'severity')
if data.get('original_severity'):
_add('Previous Severity', data.get('original_severity'), 'severity')
_add('Duration', data.get('duration'))
_add('Active Issues', data.get('count'))
reason = data.get('reason', '')
if reason and len(reason) <= 80:
_add('Details', reason)
# ── Updates ──
elif group == 'updates':
_add('Total Updates', data.get('total_count'), 'bold')
_add('Security Updates', data.get('security_count'))
_add('Proxmox Updates', data.get('pve_count'))
_add('Kernel Updates', data.get('kernel_count'))
imp = data.get('important_list', '')
if imp and imp != 'none':
# Render each package on its own line inside a single cell
pkg_lines = [l.strip() for l in imp.split('\n') if l.strip()]
if pkg_lines:
pkg_html = '<br>'.join(
f'<code style="padding:1px 5px;background:#f3f4f6;border-radius:3px;font-family:monospace;font-size:12px;">{esc(p)}</code>'
for p in pkg_lines
)
rows.append((esc('Important Packages'), pkg_html))
_add('Current Version', data.get('current_version'), 'code')
_add('New Version', data.get('new_version'), 'code')
# ── Other / unknown ──
else:
reason = data.get('reason', '')
if reason and len(reason) <= 80:
_add('Details', reason)
return rows
def test(self) -> Tuple[bool, str]:
import socket as _socket
hostname = _socket.gethostname().split('.')[0]
result = self.send(
'ProxMenux Test Notification',
'This is a test notification from ProxMenux Monitor.\n'
'If you received this, your email channel is working correctly.',
'INFO',
data={
'hostname': hostname,
'_event_type': 'webhook_test',
'_group': 'other',
'reason': 'Email notification channel connectivity verified successfully. '
'You will receive alerts from ProxMenux Monitor at this address.',
}
)
return result.get('success', False), result.get('error', '')
# ─── Channel Factory ─────────────────────────────────────────────
CHANNEL_TYPES = {
'telegram': {
'name': 'Telegram',
'config_keys': ['bot_token', 'chat_id', 'topic_id'],
'class': TelegramChannel,
},
'gotify': {
'name': 'Gotify',
'config_keys': ['url', 'token'],
'class': GotifyChannel,
},
'discord': {
'name': 'Discord',
'config_keys': ['webhook_url'],
'class': DiscordChannel,
},
'email': {
'name': 'Email (SMTP)',
'config_keys': ['host', 'port', 'username', 'password', 'tls_mode',
'from_address', 'to_addresses', 'subject_prefix'],
'class': EmailChannel,
},
}
def create_channel(channel_type: str, config: Dict[str, str]) -> Optional[NotificationChannel]:
"""Create a channel instance from type name and config dict.
Args:
channel_type: 'telegram', 'gotify', or 'discord'
config: Dict with channel-specific keys (see CHANNEL_TYPES)
Returns:
Channel instance or None if creation fails
"""
try:
if channel_type == 'telegram':
return TelegramChannel(
bot_token=config.get('bot_token', ''),
chat_id=config.get('chat_id', ''),
topic_id=config.get('topic_id', '')
)
elif channel_type == 'gotify':
return GotifyChannel(
server_url=config.get('url', ''),
app_token=config.get('token', '')
)
elif channel_type == 'discord':
return DiscordChannel(
webhook_url=config.get('webhook_url', '')
)
elif channel_type == 'email':
return EmailChannel(config)
except Exception as e:
print(f"[NotificationChannels] Failed to create {channel_type}: {e}")
return None
File diff suppressed because it is too large Load Diff
File diff suppressed because it is too large Load Diff
File diff suppressed because it is too large Load Diff
@@ -0,0 +1,268 @@
#!/usr/bin/env python3
"""
ProxMenux - HTML Description Templates for OCI Containers
==========================================================
Generates beautiful HTML descriptions for the Proxmox Notes panel.
Can be used from both Python (oci_manager.py) and bash scripts.
Usage from bash:
python3 description_templates.py --app-id "secure-gateway" --hostname "my-gateway"
Usage from Python:
from description_templates import generate_description
html = generate_description(app_def, container_def, hostname)
"""
import sys
import json
import argparse
import urllib.parse
from pathlib import Path
from typing import Dict, Optional
# Default paths
CATALOG_PATH = Path(__file__).parent / "catalog.json"
def get_shield_icon_svg(color: str = "#0EA5E9") -> str:
"""Generate a shield icon SVG with checkmark."""
return f"""<svg xmlns='http://www.w3.org/2000/svg' width='48' height='48' viewBox='0 0 24 24' fill='none' stroke='{color}' stroke-width='2' stroke-linecap='round' stroke-linejoin='round'><path d='M12 22s8-4 8-10V5l-8-3-8 3v7c0 6 8 10 8 10z'/><path d='M9 12l2 2 4-4'/></svg>"""
def get_default_icon_svg(color: str = "#0EA5E9") -> str:
"""Generate a default container icon SVG."""
return f"""<svg xmlns='http://www.w3.org/2000/svg' width='48' height='48' viewBox='0 0 24 24' fill='none' stroke='{color}' stroke-width='2' stroke-linecap='round' stroke-linejoin='round'><path d='M21 16V8a2 2 0 0 0-1-1.73l-7-4a2 2 0 0 0-2 0l-7 4A2 2 0 0 0 3 8v8a2 2 0 0 0 1 1.73l7 4a2 2 0 0 0 2 0l7-4A2 2 0 0 0 21 16z'/><polyline points='3.27 6.96 12 12.01 20.73 6.96'/><line x1='12' y1='22.08' x2='12' y2='12'/></svg>"""
# Pre-defined icon types
ICON_TYPES = {
"shield": get_shield_icon_svg,
"container": get_default_icon_svg,
"default": get_default_icon_svg,
}
def generate_description(
app_def: Dict,
container_def: Optional[Dict] = None,
hostname: str = "",
extra_info: str = ""
) -> str:
"""
Generate HTML description for Proxmox Notes panel.
Args:
app_def: Application definition from catalog
container_def: Container definition (optional)
hostname: Container hostname
extra_info: Additional info to display (e.g., disk info)
Returns:
HTML string for the description
"""
# Extract app info
app_name = app_def.get("name", "ProxMenux App")
app_subtitle = app_def.get("subtitle", "")
app_color = app_def.get("color", "#0EA5E9")
app_icon_type = app_def.get("icon_type", "default")
doc_url = app_def.get("documentation_url", "https://macrimi.github.io/ProxMenux/")
code_url = app_def.get("code_url", "https://github.com/MacRimi/ProxMenux")
installer_url = app_def.get("installer_url", "")
kofi_url = "https://ko-fi.com/macrimi"
# Get the icon SVG
icon_func = ICON_TYPES.get(app_icon_type, ICON_TYPES["default"])
icon_svg = icon_func(app_color)
icon_data = "data:image/svg+xml," + urllib.parse.quote(icon_svg)
# Build badge buttons
badges = []
badges.append(f"<a href='{doc_url}' target='_blank'><img src='https://img.shields.io/badge/📚_Docs-blue' alt='Docs'></a>")
badges.append(f"<a href='{code_url}' target='_blank'><img src='https://img.shields.io/badge/💻_Code-green' alt='Code'></a>")
if installer_url:
badges.append(f"<a href='{installer_url}' target='_blank'><img src='https://img.shields.io/badge/📦_Installer-orange' alt='Installer'></a>")
badges.append(f"<a href='{kofi_url}' target='_blank'><img src='https://img.shields.io/badge/☕_Ko--fi-red' alt='Ko-fi'></a>")
badges_html = "\n".join(badges)
# Build footer info
footer_parts = []
if hostname:
footer_parts.append(f"Hostname: {hostname}")
if extra_info:
footer_parts.append(extra_info)
footer_html = "<br>".join(footer_parts) if footer_parts else ""
# Build the complete HTML
html = f"""<div align='center'>
<table style='width: 100%; border-collapse: collapse;'>
<tr>
<td style='width: 100px; vertical-align: middle;'>
<img src="/images/design-mode/logo_desc.png" alt='ProxMenux Logo' style='height: 100px;'>
</td>
<td style='vertical-align: middle;'>
<h1 style='margin: 0;'>{app_name}</h1>
<p style='margin: 0;'>Created with ProxMenux</p>
</td>
</tr>
</table>
<div style='margin: 15px 0; padding: 10px; background: #2d2d2d; border-radius: 8px; display: inline-block;'>
<table style='border-collapse: collapse;'>
<tr>
<td style='vertical-align: middle; padding-right: 10px;'>
<img src='{icon_data}' alt='Icon' style='height: 48px;'>
</td>
<td style='vertical-align: middle; text-align: left;'>
<span style='font-size: 18px; font-weight: bold; color: {app_color};'>{app_name}</span><br>
<span style='color: #9ca3af;'>{app_subtitle}</span>
</td>
</tr>
</table>
</div>
<p>
{badges_html}
</p>
"""
if footer_html:
html += f"""
<p style='color: #6b7280; font-size: 12px;'>
{footer_html}
</p>
"""
html += "</div>"
return html
def generate_vm_description(
vm_name: str,
vm_version: str = "",
doc_url: str = "",
code_url: str = "",
installer_url: str = "",
extra_info: str = "",
icon_url: str = ""
) -> str:
"""
Generate HTML description for VMs (like ZimaOS).
Args:
vm_name: Name of the VM
vm_version: Version string
doc_url: Documentation URL
code_url: Code repository URL
installer_url: Installer URL
extra_info: Additional info (e.g., disk info)
icon_url: Custom icon URL for the VM
Returns:
HTML string for the description
"""
# Build badge buttons
badges = []
if doc_url:
badges.append(f"<a href='{doc_url}' target='_blank'><img src='https://img.shields.io/badge/📚_Docs-blue' alt='Docs'></a>")
if code_url:
badges.append(f"<a href='{code_url}' target='_blank'><img src='https://img.shields.io/badge/💻_Code-green' alt='Code'></a>")
if installer_url:
badges.append(f"<a href='{installer_url}' target='_blank'><img src='https://img.shields.io/badge/📦_Installer-orange' alt='Installer'></a>")
badges.append("<a href='https://ko-fi.com/macrimi' target='_blank'><img src='https://img.shields.io/badge/☕_Ko--fi-red' alt='Ko-fi'></a>")
badges_html = "\n".join(badges)
# Version line
version_html = f"<p style='margin: 0;'>{vm_version}</p>" if vm_version else ""
# Extra info
extra_html = f"<p style='color: #6b7280; font-size: 12px;'>{extra_info}</p>" if extra_info else ""
html = f"""<div align='center'>
<table style='width: 100%; border-collapse: collapse;'>
<tr>
<td style='width: 100px; vertical-align: middle;'>
<img src="/images/design-mode/logo_desc.png" alt='ProxMenux Logo' style='height: 100px;'>
</td>
<td style='vertical-align: middle;'>
<h1 style='margin: 0;'>{vm_name}</h1>
<p style='margin: 0;'>Created with ProxMenux</p>
{version_html}
</td>
</tr>
</table>
<p>
{badges_html}
</p>
{extra_html}
</div>"""
return html
def load_catalog() -> Dict:
"""Load the OCI catalog."""
if CATALOG_PATH.exists():
with open(CATALOG_PATH) as f:
return json.load(f)
return {"apps": {}}
def main():
"""CLI interface for generating descriptions."""
parser = argparse.ArgumentParser(description="Generate HTML descriptions for Proxmox")
parser.add_argument("--app-id", help="Application ID from catalog")
parser.add_argument("--hostname", default="", help="Container hostname")
parser.add_argument("--extra-info", default="", help="Additional info to display")
parser.add_argument("--output", choices=["html", "encoded"], default="html",
help="Output format: html or url-encoded")
# For VM descriptions (not from catalog)
parser.add_argument("--vm-name", help="VM name (for non-catalog VMs)")
parser.add_argument("--vm-version", default="", help="VM version")
parser.add_argument("--doc-url", default="", help="Documentation URL")
parser.add_argument("--code-url", default="", help="Code repository URL")
parser.add_argument("--installer-url", default="", help="Installer URL")
args = parser.parse_args()
if args.app_id:
# Generate from catalog
catalog = load_catalog()
apps = catalog.get("apps", {})
if args.app_id not in apps:
print(f"Error: App '{args.app_id}' not found in catalog", file=sys.stderr)
sys.exit(1)
app_def = apps[args.app_id]
html = generate_description(app_def, hostname=args.hostname, extra_info=args.extra_info)
elif args.vm_name:
# Generate for VM
html = generate_vm_description(
vm_name=args.vm_name,
vm_version=args.vm_version,
doc_url=args.doc_url,
code_url=args.code_url,
installer_url=args.installer_url,
extra_info=args.extra_info
)
else:
parser.print_help()
sys.exit(1)
if args.output == "encoded":
print(urllib.parse.quote(html))
else:
print(html)
if __name__ == "__main__":
main()
File diff suppressed because it is too large Load Diff
+348
View File
@@ -0,0 +1,348 @@
#!/usr/bin/env python3
"""
Database of known Proxmox/Linux errors with causes, solutions, and severity levels.
This provides the AI with accurate, pre-verified information about common errors,
reducing hallucinations and ensuring consistent, helpful responses.
Each entry includes:
- pattern: regex pattern to match against error messages/logs
- cause: brief explanation of what causes this error
- cause_detailed: more comprehensive explanation for detailed mode
- severity: info, warning, critical
- solution: brief actionable solution
- solution_detailed: step-by-step solution for detailed mode
- url: optional documentation link
"""
import re
from typing import Optional, Dict, Any, List
# Known error patterns with causes and solutions
PROXMOX_KNOWN_ERRORS: List[Dict[str, Any]] = [
# ==================== SUBSCRIPTION/LICENSE ====================
{
"pattern": r"no valid subscription|subscription.*invalid|not subscribed",
"cause": "Proxmox enterprise repository requires paid subscription",
"cause_detailed": "Proxmox VE uses a subscription model for enterprise features. Without a valid subscription key, access to the enterprise repository is denied. This is normal for home/lab users.",
"severity": "info",
"solution": "Use no-subscription repository or purchase subscription",
"solution_detailed": "For home/lab use: Switch to the no-subscription repository by editing /etc/apt/sources.list.d/pve-enterprise.list. For production: Purchase a subscription at proxmox.com/pricing",
"url": "https://pve.proxmox.com/wiki/Package_Repositories",
"category": "updates"
},
# ==================== CLUSTER/COROSYNC ====================
{
"pattern": r"quorum.*lost|lost.*quorum|not.*quorate",
"cause": "Cluster lost majority of voting nodes",
"cause_detailed": "Corosync cluster requires more than 50% of configured votes to maintain quorum. When quorum is lost, the cluster becomes read-only to prevent split-brain scenarios.",
"severity": "critical",
"solution": "Check network connectivity between nodes; ensure majority of nodes are online",
"solution_detailed": "1. Verify network connectivity: ping all cluster nodes\n2. Check corosync status: systemctl status corosync\n3. View cluster status: pvecm status\n4. If nodes are unreachable, check firewall rules (ports 5405-5412 UDP)\n5. For emergency single-node operation: pvecm expected 1",
"url": "https://pve.proxmox.com/wiki/Cluster_Manager",
"category": "cluster"
},
{
"pattern": r"corosync.*qdevice.*error|qdevice.*connection.*failed|qdevice.*not.*connected",
"cause": "QDevice helper node is unreachable",
"cause_detailed": "The Corosync QDevice provides an additional vote for 2-node clusters. When it cannot connect, the cluster may lose quorum if one node fails.",
"severity": "warning",
"solution": "Check QDevice server connectivity and corosync-qnetd service",
"solution_detailed": "1. Verify QDevice server is running: systemctl status corosync-qnetd (on QDevice host)\n2. Check connectivity: nc -zv <qdevice-ip> 5403\n3. Restart qdevice: systemctl restart corosync-qdevice\n4. Check certificates: corosync-qdevice-net-certutil -s",
"url": "https://pve.proxmox.com/wiki/Cluster_Manager#_corosync_external_vote_support",
"category": "cluster"
},
{
"pattern": r"corosync.*retransmit|corosync.*token.*timeout|ring.*mark.*faulty",
"cause": "Network latency or packet loss between cluster nodes",
"cause_detailed": "Corosync uses multicast/unicast for cluster communication. High latency, packet loss, or network congestion causes token timeouts and retransmissions, potentially leading to node eviction.",
"severity": "warning",
"solution": "Check network quality between nodes; consider increasing token timeout",
"solution_detailed": "1. Test network latency: ping -c 100 <other-node>\n2. Check for packet loss between nodes\n3. Verify MTU settings match on all interfaces\n4. Increase token timeout in /etc/pve/corosync.conf if needed (default 1000ms)\n5. Check switch/router for congestion",
"category": "cluster"
},
# ==================== DISK/STORAGE ====================
{
"pattern": r"SMART.*FAILED|smart.*failed.*health|Pre-fail|Old_age.*FAILING",
"cause": "Disk SMART health check failed - disk is failing",
"cause_detailed": "SMART (Self-Monitoring, Analysis and Reporting Technology) detected critical disk health issues. The disk is likely failing and data loss is imminent.",
"severity": "critical",
"solution": "IMMEDIATELY backup data and replace disk",
"solution_detailed": "1. URGENT: Backup all data from this disk immediately\n2. Check SMART details: smartctl -a /dev/sdX\n3. Note the failing attributes (Reallocated_Sector_Ct, Current_Pending_Sector, etc.)\n4. Plan disk replacement\n5. If in RAID/ZFS: initiate disk replacement procedure",
"category": "disks"
},
{
"pattern": r"Reallocated_Sector_Ct.*threshold|reallocated.*sectors?.*exceeded",
"cause": "Disk has excessive bad sectors being remapped",
"cause_detailed": "The disk firmware has remapped multiple bad sectors to spare areas. While the disk is still functioning, this indicates physical degradation and eventual failure.",
"severity": "warning",
"solution": "Monitor closely and plan disk replacement",
"solution_detailed": "1. Check current value: smartctl -A /dev/sdX | grep Reallocated\n2. If value is increasing, plan immediate replacement\n3. Backup important data\n4. Run extended SMART test: smartctl -t long /dev/sdX",
"category": "disks"
},
{
"pattern": r"ata.*error|ATA.*bus.*error|Emask.*0x|DRDY.*ERR|UNC.*error",
"cause": "ATA communication error with disk",
"cause_detailed": "The SATA/ATA controller encountered communication errors with the disk. This can indicate cable issues, controller problems, or disk failure.",
"severity": "warning",
"solution": "Check SATA cables and connections; verify disk health with smartctl",
"solution_detailed": "1. Check SMART health: smartctl -H /dev/sdX\n2. Inspect and reseat SATA cables\n3. Try different SATA port\n4. Check dmesg for pattern of errors\n5. If errors persist, disk may be failing",
"category": "disks"
},
{
"pattern": r"I/O.*error|blk_update_request.*error|Buffer I/O error",
"cause": "Disk I/O operation failed",
"cause_detailed": "The kernel failed to read or write data to the disk. This can be caused by disk failure, cable issues, or filesystem corruption.",
"severity": "critical",
"solution": "Check disk health and connections immediately",
"solution_detailed": "1. Check SMART status: smartctl -H /dev/sdX\n2. Check dmesg for related errors: dmesg | grep -i error\n3. Verify disk is still accessible: lsblk\n4. If ZFS: check pool status with zpool status\n5. Consider filesystem check if safe to unmount",
"category": "disks"
},
{
"pattern": r"zfs.*pool.*DEGRADED|pool.*is.*degraded",
"cause": "ZFS pool has reduced redundancy",
"cause_detailed": "One or more devices in the ZFS pool are unavailable or experiencing errors. The pool is still functional but without full redundancy.",
"severity": "warning",
"solution": "Identify failed device with 'zpool status' and replace",
"solution_detailed": "1. Check pool status: zpool status <pool>\n2. Identify the DEGRADED or UNAVAIL device\n3. If device is present but erroring: zpool scrub <pool>\n4. To replace: zpool replace <pool> <old-device> <new-device>\n5. Monitor resilver progress: zpool status",
"category": "storage"
},
{
"pattern": r"zfs.*pool.*FAULTED|pool.*is.*faulted",
"cause": "ZFS pool is inaccessible",
"cause_detailed": "The ZFS pool has lost too many devices and cannot maintain data integrity. Data may be inaccessible.",
"severity": "critical",
"solution": "Check failed devices; may need data recovery",
"solution_detailed": "1. Check status: zpool status <pool>\n2. Identify all failed devices\n3. Attempt to online devices: zpool online <pool> <device>\n4. If drives are physically present, try zpool clear <pool>\n5. May require data recovery if multiple drives failed",
"category": "storage"
},
# ==================== CEPH ====================
{
"pattern": r"ceph.*OSD.*down|osd\.\d+.*down|ceph.*osd.*failed",
"cause": "Ceph OSD daemon is not running",
"cause_detailed": "A Ceph Object Storage Daemon (OSD) has stopped or crashed. This reduces storage redundancy and may trigger data rebalancing.",
"severity": "warning",
"solution": "Check disk health and restart OSD service",
"solution_detailed": "1. Check OSD status: ceph osd tree\n2. View OSD logs: journalctl -u ceph-osd@<id>\n3. Check underlying disk: smartctl -H /dev/sdX\n4. Restart OSD: systemctl start ceph-osd@<id>\n5. If OSD keeps crashing, check for disk failure",
"category": "storage"
},
{
"pattern": r"ceph.*health.*WARN|HEALTH_WARN",
"cause": "Ceph cluster has warnings",
"cause_detailed": "Ceph detected issues that don't prevent operation but should be addressed. Common causes: degraded PGs, clock skew, full OSDs.",
"severity": "warning",
"solution": "Run 'ceph health detail' for specific issues",
"solution_detailed": "1. Get details: ceph health detail\n2. Common fixes:\n - Degraded PGs: wait for recovery or add capacity\n - Clock skew: sync NTP on all nodes\n - Full OSDs: add storage or delete data\n3. Check: ceph status",
"category": "storage"
},
{
"pattern": r"ceph.*health.*ERR|HEALTH_ERR",
"cause": "Ceph cluster has critical errors",
"cause_detailed": "Ceph has detected critical issues that may affect data availability or integrity. Immediate attention required.",
"severity": "critical",
"solution": "Run 'ceph health detail' and address errors immediately",
"solution_detailed": "1. Get details: ceph health detail\n2. Check OSD status: ceph osd tree\n3. Check MON status: ceph mon stat\n4. View PG status: ceph pg stat\n5. Address each error shown in health detail",
"category": "storage"
},
# ==================== VM/CT ERRORS ====================
{
"pattern": r"TASK ERROR.*failed to get exclusive lock|lock.*timeout|couldn't acquire lock",
"cause": "Resource is locked by another operation",
"cause_detailed": "Another task is currently holding a lock on this VM/CT. This prevents concurrent modifications that could cause corruption.",
"severity": "info",
"solution": "Wait for other task to complete or check for stuck tasks",
"solution_detailed": "1. Check running tasks: cat /var/log/pve/tasks/active\n2. Wait for task completion\n3. If task is stuck (>1h), check process: ps aux | grep <vmid>\n4. As last resort, remove lock file: rm /var/lock/qemu-server/lock-<vmid>.conf",
"category": "vms"
},
{
"pattern": r"kvm.*not.*available|kvm.*disabled|hardware.*virtualization.*disabled",
"cause": "KVM/hardware virtualization not available",
"cause_detailed": "The CPU's hardware virtualization extensions (Intel VT-x or AMD-V) are either not supported, not enabled in BIOS, or blocked by another hypervisor.",
"severity": "warning",
"solution": "Enable VT-x/AMD-V in BIOS settings",
"solution_detailed": "1. Reboot into BIOS/UEFI\n2. Find Virtualization settings (often in CPU or Advanced section)\n3. Enable Intel VT-x or AMD-V/SVM\n4. Save and reboot\n5. Verify: grep -E 'vmx|svm' /proc/cpuinfo",
"category": "vms"
},
{
"pattern": r"out of memory|OOM.*kill|cannot allocate memory|memory.*exhausted",
"cause": "System or VM ran out of memory",
"cause_detailed": "The Linux OOM (Out Of Memory) killer terminated a process to free memory. This indicates memory pressure from overcommitment or memory leaks.",
"severity": "critical",
"solution": "Increase memory allocation or reduce VM memory usage",
"solution_detailed": "1. Check what was killed: dmesg | grep -i oom\n2. Review memory usage: free -h\n3. Check balloon driver status for VMs\n4. Consider adding swap or RAM\n5. Review VM memory allocations for overcommitment",
"category": "memory"
},
# ==================== NETWORK ====================
{
"pattern": r"bond.*slave.*link.*down|bond.*no.*active.*slave",
"cause": "Network bond lost a slave interface",
"cause_detailed": "One or more physical interfaces in a network bond have lost link. Depending on bond mode, this may reduce bandwidth or affect failover.",
"severity": "warning",
"solution": "Check physical cable connections and switch ports",
"solution_detailed": "1. Check bond status: cat /proc/net/bonding/bond0\n2. Identify down slave interface\n3. Check physical cable connection\n4. Check switch port status and errors\n5. Verify interface: ethtool <slave-iface>",
"category": "network"
},
{
"pattern": r"link.*not.*ready|carrier.*lost|link.*down|NIC.*Link.*Down",
"cause": "Network interface lost link",
"cause_detailed": "The physical or virtual network interface has lost its connection. This could be a cable issue, switch problem, or driver issue.",
"severity": "warning",
"solution": "Check cable, switch port, and interface status",
"solution_detailed": "1. Check interface: ip link show <iface>\n2. Check cable connection\n3. Check switch port LEDs\n4. Try: ip link set <iface> down && ip link set <iface> up\n5. Check driver: ethtool -i <iface>",
"category": "network"
},
{
"pattern": r"bridge.*STP.*blocked|spanning.*tree.*blocked",
"cause": "Spanning Tree Protocol blocked a port",
"cause_detailed": "STP detected a potential network loop and blocked a bridge port to prevent broadcast storms. This is normal behavior but may indicate network topology issues.",
"severity": "info",
"solution": "Review network topology; this may be expected behavior",
"solution_detailed": "1. Check bridge status: brctl show\n2. View STP state: brctl showstp <bridge>\n3. If unexpected, review network topology for loops\n4. Consider disabling STP if network is simple: brctl stp <bridge> off",
"category": "network"
},
# ==================== SERVICES ====================
{
"pattern": r"pvedaemon.*failed|pveproxy.*failed|pvestatd.*failed",
"cause": "Critical Proxmox service failed",
"cause_detailed": "One of the core Proxmox daemons has crashed or failed to start. This may affect web GUI access or API functionality.",
"severity": "critical",
"solution": "Restart the failed service; check logs for cause",
"solution_detailed": "1. Check status: systemctl status <service>\n2. View logs: journalctl -u <service> -n 50\n3. Restart: systemctl restart <service>\n4. If persistent, check: /var/log/pveproxy/access.log",
"category": "pve_services"
},
{
"pattern": r"failed to start.*service|service.*start.*failed|service.*activation.*failed",
"cause": "System service failed to start",
"cause_detailed": "A systemd service unit failed during startup. This could be due to configuration errors, missing dependencies, or resource issues.",
"severity": "warning",
"solution": "Check service logs with journalctl -u <service>",
"solution_detailed": "1. Check status: systemctl status <service>\n2. View logs: journalctl -xeu <service>\n3. Check config: systemctl cat <service>\n4. Verify dependencies: systemctl list-dependencies <service>\n5. Try restart: systemctl restart <service>",
"category": "services"
},
# ==================== BACKUP ====================
{
"pattern": r"backup.*failed|vzdump.*error|backup.*job.*failed",
"cause": "Backup job failed",
"cause_detailed": "A scheduled or manual backup operation failed. Common causes: storage full, VM locked, network issues for remote storage.",
"severity": "warning",
"solution": "Check backup storage space and VM status",
"solution_detailed": "1. Check backup log in Datacenter > Backup\n2. Verify storage space: df -h\n3. Check if VM is locked: qm list or pct list\n4. Verify backup storage is accessible\n5. Try manual backup to identify specific error",
"category": "backups"
},
# ==================== CERTIFICATES ====================
{
"pattern": r"certificate.*expired|SSL.*certificate.*expired|cert.*expir",
"cause": "SSL/TLS certificate has expired",
"cause_detailed": "An SSL certificate used for secure communication has passed its expiration date. This may cause connection failures or security warnings.",
"severity": "warning",
"solution": "Renew the certificate using pvenode cert set or Let's Encrypt",
"solution_detailed": "1. Check certificate: pvenode cert info\n2. For self-signed renewal: pvecm updatecerts\n3. For Let's Encrypt: pvenode acme cert order\n4. Restart pveproxy after renewal: systemctl restart pveproxy",
"url": "https://pve.proxmox.com/wiki/Certificate_Management",
"category": "security"
},
# ==================== HARDWARE/TEMPERATURE ====================
{
"pattern": r"temperature.*critical|thermal.*critical|CPU.*overheating|temp.*above.*threshold",
"cause": "Component temperature critical",
"cause_detailed": "A hardware component (CPU, disk, etc.) has reached a dangerous temperature. Sustained high temperatures can cause hardware damage or system shutdowns.",
"severity": "critical",
"solution": "Check cooling system immediately; clean dust, verify fans",
"solution_detailed": "1. Check current temps: sensors\n2. Verify all fans are running\n3. Clean dust from heatsinks and filters\n4. Ensure adequate airflow\n5. Consider reapplying thermal paste if CPU\n6. Check ambient room temperature",
"category": "temperature"
},
# ==================== AUTHENTICATION ====================
{
"pattern": r"authentication.*failed|login.*failed|invalid.*credentials|access.*denied",
"cause": "Authentication failure",
"cause_detailed": "A login attempt failed due to invalid credentials or permissions. Multiple failures may indicate a brute-force attack.",
"severity": "info",
"solution": "Verify credentials; check for unauthorized access attempts",
"solution_detailed": "1. Review auth logs: journalctl -u pvedaemon | grep auth\n2. Check for multiple failures from same IP\n3. Verify user exists: pveum user list\n4. If attack suspected, consider fail2ban\n5. Reset password if needed: pveum passwd <user>",
"category": "security"
},
]
def find_matching_error(text: str, category: Optional[str] = None) -> Optional[Dict[str, Any]]:
"""Find a known error that matches the given text.
Args:
text: Error message or log content to match against
category: Optional category to filter by
Returns:
Matching error dict or None
"""
if not text:
return None
text_lower = text.lower()
for error in PROXMOX_KNOWN_ERRORS:
# Filter by category if specified
if category and error.get("category") != category:
continue
try:
if re.search(error["pattern"], text_lower, re.IGNORECASE):
return error
except re.error:
continue
return None
def get_error_context(text: str, category: Optional[str] = None, detail_level: str = "standard") -> Optional[str]:
"""Get formatted context for a known error.
Args:
text: Error message to match
category: Optional category filter
detail_level: "minimal", "standard", or "detailed"
Returns:
Formatted context string or None
"""
error = find_matching_error(text, category)
if not error:
return None
if detail_level == "minimal":
return f"Known issue: {error['cause']}"
elif detail_level == "standard":
lines = [
f"KNOWN PROXMOX ERROR DETECTED:",
f" Cause: {error['cause']}",
f" Severity: {error['severity'].upper()}",
f" Solution: {error['solution']}"
]
if error.get("url"):
lines.append(f" Docs: {error['url']}")
return "\n".join(lines)
else: # detailed
lines = [
f"KNOWN PROXMOX ERROR DETECTED:",
f" Cause: {error.get('cause_detailed', error['cause'])}",
f" Severity: {error['severity'].upper()}",
f" Solution: {error.get('solution_detailed', error['solution'])}"
]
if error.get("url"):
lines.append(f" Documentation: {error['url']}")
return "\n".join(lines)
def get_all_patterns() -> List[str]:
"""Get all error patterns for external use."""
return [error["pattern"] for error in PROXMOX_KNOWN_ERRORS]
+49 -6
View File
@@ -8,18 +8,32 @@ Monitors configured Proxmox storages and tracks unavailable storages
import json
import subprocess
import socket
import time
from typing import Dict, List, Any, Optional
class ProxmoxStorageMonitor:
"""Monitor Proxmox storage configuration and status"""
# Cache TTL: 177 seconds (~3 min) - offset to avoid sync with other processes
_CACHE_TTL = 177
def __init__(self):
self.configured_storages: Dict[str, Dict[str, Any]] = {}
self._node_name_cache = {'name': None, 'time': 0}
self._storage_status_cache = {'data': None, 'time': 0}
self._config_cache_time = 0 # Track when config was last loaded
self._load_configured_storages()
def _get_node_name(self) -> str:
"""Get current Proxmox node name"""
"""Get current Proxmox node name (cached)"""
current_time = time.time()
cache = self._node_name_cache
# Return cached result if fresh
if cache['name'] and (current_time - cache['time']) < self._CACHE_TTL:
return cache['name']
try:
result = subprocess.run(
['pvesh', 'get', '/nodes', '--output-format', 'json'],
@@ -32,9 +46,14 @@ class ProxmoxStorageMonitor:
hostname = socket.gethostname()
for node in nodes:
if node.get('node') == hostname:
cache['name'] = hostname
cache['time'] = current_time
return hostname
if nodes:
return nodes[0].get('node', hostname)
name = nodes[0].get('node', hostname)
cache['name'] = name
cache['time'] = current_time
return name
return socket.gethostname()
except Exception:
return socket.gethostname()
@@ -84,7 +103,7 @@ class ProxmoxStorageMonitor:
def get_storage_status(self) -> Dict[str, List[Dict[str, Any]]]:
"""
Get storage status, including unavailable storages
Get storage status, including unavailable storages (cached)
Returns:
{
@@ -92,6 +111,13 @@ class ProxmoxStorageMonitor:
'unavailable': [...]
}
"""
current_time = time.time()
cache = self._storage_status_cache
# Return cached result if fresh
if cache['data'] and (current_time - cache['time']) < self._CACHE_TTL:
return cache['data']
try:
local_node = self._get_node_name()
@@ -176,10 +202,16 @@ class ProxmoxStorageMonitor:
'node': local_node
})
return {
result_data = {
'available': available_storages,
'unavailable': unavailable_storages
}
# Cache the result
cache['data'] = result_data
cache['time'] = current_time
return result_data
except Exception:
return {
@@ -192,10 +224,21 @@ class ProxmoxStorageMonitor:
status = self.get_storage_status()
return len(status['unavailable'])
def reload_configuration(self) -> None:
"""Reload storage configuration from Proxmox"""
def reload_configuration(self, force: bool = False) -> None:
"""Reload storage configuration from Proxmox (cached)
Args:
force: If True, bypass cache and force reload
"""
current_time = time.time()
# Skip reload if cache is still fresh (unless forced)
if not force and (current_time - self._config_cache_time) < self._CACHE_TTL:
return
self.configured_storages.clear()
self._load_configured_storages()
self._config_cache_time = current_time
# Global instance
File diff suppressed because it is too large Load Diff
+510
View File
@@ -0,0 +1,510 @@
"""
Centralized Startup Grace Period Management
This module provides a single source of truth for startup grace period logic.
During system boot, various transient issues occur (high latency, storage not ready,
QMP timeouts, etc.) that shouldn't trigger notifications or critical alerts.
Grace Periods:
- VM/CT aggregation: 3 minutes - Aggregate multiple VM/CT starts into one notification
- Health suppression: 5 minutes - Suppress transient health warnings/errors
- Shutdown suppression: 2 minutes - Suppress VM/CT stops during system shutdown
Categories suppressed during startup:
- storage: NFS/CIFS mounts may take time to become available
- vms: VMs may have QMP timeouts or startup delays
- network: Latency spikes during boot are normal
- services: PVE services may take time to fully initialize
"""
import time
import threading
from typing import Set, List, Tuple, Optional
# ─── Configuration ───────────────────────────────────────────────────────────
# Grace period durations (seconds)
STARTUP_VM_GRACE_SECONDS = 180 # 3 minutes for VM/CT start aggregation
STARTUP_HEALTH_GRACE_SECONDS = 300 # 5 minutes for health warning suppression
SHUTDOWN_GRACE_SECONDS = 120 # 2 minutes for VM/CT stop suppression
# Maximum system uptime to consider this a real server boot (not just service restart)
# If system uptime > this value when service starts, skip startup notification
MAX_BOOT_UPTIME_SECONDS = 600 # 10 minutes - if system was up longer, it's a service restart
def _get_system_uptime() -> float:
"""
Get actual system uptime in seconds from /proc/uptime.
Returns 0 if unable to read (will default to treating as new boot).
"""
try:
with open('/proc/uptime', 'r') as f:
return float(f.readline().split()[0])
except Exception:
return 0
# Categories to suppress during startup grace period
# These categories typically have transient issues during boot
STARTUP_GRACE_CATEGORIES: Set[str] = {
'storage', # NFS/CIFS mounts may take time
'vms', # VMs may have QMP timeouts
'network', # Latency spikes during boot
'services', # PVE services initialization
}
# ─── Singleton State ─────────────────────────────────────────────────────────
class _StartupGraceState:
"""
Thread-safe singleton managing all startup/shutdown grace period state.
Initialized when the module loads (service start), which serves as the
reference point for determining if we're still in the startup period.
"""
_instance: Optional['_StartupGraceState'] = None
_init_lock = threading.Lock()
def __new__(cls) -> '_StartupGraceState':
if cls._instance is None:
with cls._init_lock:
if cls._instance is None:
cls._instance = super().__new__(cls)
cls._instance._initialized = False
return cls._instance
def __init__(self):
if self._initialized:
return
self._lock = threading.Lock()
# Startup time = when service started (module load time)
self._startup_time: float = time.time()
# Check if this is a REAL system boot or just a service restart
# by comparing system uptime to our threshold
system_uptime = _get_system_uptime()
self._is_real_boot: bool = system_uptime < MAX_BOOT_UPTIME_SECONDS
# Shutdown tracking
self._shutdown_time: float = 0
# VM/CT aggregation during startup
self._startup_vms: List[Tuple[str, str, str]] = [] # [(vmid, vmname, 'vm'|'ct'), ...]
self._startup_aggregated: bool = False
self._initialized = True
# ─── Startup Period Checks ───────────────────────────────────────────────
def is_startup_vm_period(self) -> bool:
"""
Check if we're within the VM/CT start aggregation period (3 min).
During this period, individual VM/CT start notifications are collected
and later sent as a single aggregated notification.
"""
with self._lock:
return (time.time() - self._startup_time) < STARTUP_VM_GRACE_SECONDS
def is_startup_health_grace(self) -> bool:
"""
Check if we're within the health suppression period (5 min).
During this period:
- Transient health warnings (latency, storage, etc.) are suppressed
- CRITICAL/WARNING may be downgraded to INFO for certain categories
- Health degradation notifications are skipped for grace categories
"""
with self._lock:
return (time.time() - self._startup_time) < STARTUP_HEALTH_GRACE_SECONDS
def should_suppress_category(self, category: str) -> bool:
"""
Check if notifications for a category should be suppressed.
Args:
category: Health category name (e.g., 'network', 'storage', 'vms')
Returns:
True if we're in grace period AND category is in STARTUP_GRACE_CATEGORIES
"""
if category.lower() in STARTUP_GRACE_CATEGORIES:
return self.is_startup_health_grace()
return False
def is_real_system_boot(self) -> bool:
"""
Check if the service started during a real system boot.
Returns False if the system was already running for more than 10 minutes
when the service started (indicates a service restart, not a system boot).
This prevents sending "System startup completed" notifications when
just restarting the ProxMenux Monitor service.
"""
with self._lock:
return self._is_real_boot
def get_startup_elapsed(self) -> float:
"""Get seconds elapsed since service startup."""
with self._lock:
return time.time() - self._startup_time
# ─── Shutdown Tracking ───────────────────────────────────────────────────
def mark_shutdown(self):
"""
Called when system_shutdown or system_reboot is detected.
After this, VM/CT stop notifications will be suppressed for the
shutdown grace period (expected stops during system shutdown).
"""
with self._lock:
self._shutdown_time = time.time()
def is_host_shutting_down(self) -> bool:
"""
Check if we're within the shutdown grace period.
During this period, VM/CT stop events are expected and should not
generate notifications.
"""
with self._lock:
if self._shutdown_time == 0:
return False
return (time.time() - self._shutdown_time) < SHUTDOWN_GRACE_SECONDS
# ─── VM/CT Start Aggregation ─────────────────────────────────────────────
def add_startup_vm(self, vmid: str, vmname: str, vm_type: str):
"""
Record a VM/CT start during startup period for later aggregation.
Args:
vmid: VM/CT ID
vmname: VM/CT name
vm_type: 'vm' or 'ct'
"""
with self._lock:
self._startup_vms.append((vmid, vmname, vm_type))
def get_and_clear_startup_vms(self) -> List[Tuple[str, str, str]]:
"""
Get all recorded startup VMs and clear the list.
Should be called once after the VM aggregation grace period ends
to get all VMs that started during boot for a single notification.
Returns:
List of (vmid, vmname, vm_type) tuples
"""
with self._lock:
vms = self._startup_vms.copy()
self._startup_vms = []
self._startup_aggregated = True
return vms
def has_startup_vms(self) -> bool:
"""Check if there are any startup VMs recorded."""
with self._lock:
return len(self._startup_vms) > 0
def was_startup_aggregated(self) -> bool:
"""Check if startup aggregation has already been processed."""
with self._lock:
return self._startup_aggregated
def mark_startup_aggregated(self) -> None:
"""Mark startup aggregation as completed without returning VMs."""
with self._lock:
self._startup_aggregated = True
# ─── Module-level convenience functions ──────────────────────────────────────
# Global singleton instance
_state = _StartupGraceState()
def is_startup_vm_period() -> bool:
"""Check if we're within the VM/CT start aggregation period (3 min)."""
return _state.is_startup_vm_period()
def is_startup_health_grace() -> bool:
"""Check if we're within the health suppression period (5 min)."""
return _state.is_startup_health_grace()
def should_suppress_category(category: str) -> bool:
"""Check if notifications for a category should be suppressed during startup."""
return _state.should_suppress_category(category)
def get_startup_elapsed() -> float:
"""Get seconds elapsed since service startup."""
return _state.get_startup_elapsed()
def mark_shutdown():
"""Mark that system shutdown/reboot has been detected."""
_state.mark_shutdown()
def is_host_shutting_down() -> bool:
"""Check if we're within the shutdown grace period."""
return _state.is_host_shutting_down()
def add_startup_vm(vmid: str, vmname: str, vm_type: str):
"""Record a VM/CT start during startup period for aggregation."""
_state.add_startup_vm(vmid, vmname, vm_type)
def get_and_clear_startup_vms() -> List[Tuple[str, str, str]]:
"""Get all recorded startup VMs and clear the list."""
return _state.get_and_clear_startup_vms()
def has_startup_vms() -> bool:
"""Check if there are any startup VMs recorded."""
return _state.has_startup_vms()
def was_startup_aggregated() -> bool:
"""Check if startup aggregation has already been processed."""
return _state.was_startup_aggregated()
def mark_startup_aggregated() -> None:
"""Mark startup aggregation as completed without processing VMs.
Use this when skipping startup notification (e.g., service restart
instead of real system boot) to prevent future checks.
"""
_state.mark_startup_aggregated()
def is_real_system_boot() -> bool:
"""
Check if this is a real system boot (not just a service restart).
Returns True if the system uptime was less than 10 minutes when the
service started. Returns False if the system was already running
longer (indicates the service was restarted, not the whole system).
Use this to prevent sending "System startup completed" notifications
when just restarting the ProxMenux Monitor service.
"""
return _state.is_real_system_boot()
# ─── Startup Report Collection ───────────────────────────────────────────────
def collect_startup_report() -> dict:
"""
Collect comprehensive startup report data.
Called at the end of the grace period to generate a complete
startup report including:
- VMs/CTs that started successfully
- VMs/CTs that failed to start
- Service status
- Storage status
- Journal errors during boot (for AI enrichment)
Returns:
Dictionary with startup report data
"""
import subprocess
report = {
# VMs/CTs
'vms_started': [],
'cts_started': [],
'vms_failed': [],
'cts_failed': [],
# System status
'services_ok': True,
'services_failed': [],
'storage_ok': True,
'storage_unavailable': [],
# Health summary
'health_status': 'OK',
'health_issues': [],
# For AI enrichment
'_journal_context': '',
'_startup_errors': [],
# Metadata
'startup_duration_seconds': get_startup_elapsed(),
'timestamp': int(time.time()),
}
# Get VMs/CTs that started during boot
startup_vms = get_and_clear_startup_vms()
for vmid, vmname, vm_type in startup_vms:
if vm_type == 'vm':
report['vms_started'].append({'vmid': vmid, 'name': vmname})
else:
report['cts_started'].append({'vmid': vmid, 'name': vmname})
# Try to get health status from health_monitor
try:
import health_monitor
health_data = health_monitor.get_detailed_status()
if health_data:
report['health_status'] = health_data.get('overall_status', 'UNKNOWN')
# Check storage
storage_cat = health_data.get('categories', {}).get('storage', {})
if storage_cat.get('status') in ['CRITICAL', 'WARNING']:
report['storage_ok'] = False
for check in storage_cat.get('checks', []):
if check.get('status') in ['CRITICAL', 'WARNING', 'error']:
report['storage_unavailable'].append({
'name': check.get('name', 'unknown'),
'reason': check.get('reason', check.get('message', ''))
})
# Check services
services_cat = health_data.get('categories', {}).get('services', {})
if services_cat.get('status') in ['CRITICAL', 'WARNING']:
report['services_ok'] = False
for check in services_cat.get('checks', []):
if check.get('status') in ['CRITICAL', 'WARNING', 'error']:
report['services_failed'].append({
'name': check.get('name', 'unknown'),
'reason': check.get('reason', check.get('message', ''))
})
# Check VMs category for failed VMs
vms_cat = health_data.get('categories', {}).get('vms', {})
for check in vms_cat.get('checks', []):
if check.get('status') in ['CRITICAL', 'WARNING', 'error']:
# Determine if VM or CT based on name/type
check_name = check.get('name', '')
check_reason = check.get('reason', check.get('message', ''))
if 'error al iniciar' in check_reason.lower() or 'failed to start' in check_reason.lower():
if 'CT' in check_name or 'Container' in check_name:
report['cts_failed'].append({
'name': check_name,
'reason': check_reason
})
else:
report['vms_failed'].append({
'name': check_name,
'reason': check_reason
})
# Collect all health issues for summary
for cat_name, cat_data in health_data.get('categories', {}).items():
if cat_data.get('status') in ['CRITICAL', 'WARNING']:
report['health_issues'].append({
'category': cat_name,
'status': cat_data.get('status'),
'reason': cat_data.get('reason', '')
})
except Exception as e:
report['_startup_errors'].append(f"Error getting health data: {e}")
# Get journal errors during startup (for AI enrichment)
try:
boot_time = int(_state._startup_time)
result = subprocess.run(
['journalctl', '-p', 'err', '--since', f'@{boot_time}', '--no-pager', '-n', '50'],
capture_output=True,
text=True,
timeout=10
)
if result.returncode == 0 and result.stdout.strip():
report['_journal_context'] = result.stdout.strip()
except Exception as e:
report['_startup_errors'].append(f"Error getting journal: {e}")
return report
def format_startup_summary(report: dict) -> str:
"""
Format a human-readable startup summary from report data.
Args:
report: Dictionary from collect_startup_report()
Returns:
Formatted summary string
"""
lines = []
# Count totals
vms_ok = len(report.get('vms_started', []))
cts_ok = len(report.get('cts_started', []))
vms_fail = len(report.get('vms_failed', []))
cts_fail = len(report.get('cts_failed', []))
total_ok = vms_ok + cts_ok
total_fail = vms_fail + cts_fail
# Determine overall status
has_issues = (
total_fail > 0 or
not report.get('services_ok', True) or
not report.get('storage_ok', True) or
report.get('health_status') in ['CRITICAL', 'WARNING']
)
# Header
if has_issues:
issue_count = total_fail + len(report.get('services_failed', [])) + len(report.get('storage_unavailable', []))
lines.append(f"System startup - {issue_count} issue(s) detected")
else:
lines.append("System startup completed")
lines.append("All systems operational.")
# VMs/CTs started
if total_ok > 0:
parts = []
if vms_ok > 0:
parts.append(f"{vms_ok} VM{'s' if vms_ok > 1 else ''}")
if cts_ok > 0:
parts.append(f"{cts_ok} CT{'s' if cts_ok > 1 else ''}")
# List names
names = []
for vm in report.get('vms_started', []):
names.append(f"{vm['name']} ({vm['vmid']})")
for ct in report.get('cts_started', []):
names.append(f"{ct['name']} ({ct['vmid']})")
line = f"{' and '.join(parts)} started"
if names and len(names) <= 5:
line += f": {', '.join(names)}"
elif names:
line += f": {', '.join(names[:3])}... (+{len(names)-3} more)"
lines.append(line)
# Failed VMs/CTs
if total_fail > 0:
for vm in report.get('vms_failed', []):
lines.append(f"VM failed: {vm['name']} - {vm.get('reason', 'unknown error')}")
for ct in report.get('cts_failed', []):
lines.append(f"CT failed: {ct['name']} - {ct.get('reason', 'unknown error')}")
# Storage issues
if not report.get('storage_ok', True):
unavailable = report.get('storage_unavailable', [])
if unavailable:
names = [s['name'] for s in unavailable]
lines.append(f"Storage: {len(unavailable)} unavailable ({', '.join(names[:3])})")
# Service issues
if not report.get('services_ok', True):
failed = report.get('services_failed', [])
if failed:
names = [s['name'] for s in failed]
lines.append(f"Services: {len(failed)} failed ({', '.join(names[:3])})")
return '\n'.join(lines)
# ─── For backwards compatibility ─────────────────────────────────────────────
# Expose constants for external use
GRACE_CATEGORIES = STARTUP_GRACE_CATEGORIES
+481
View File
@@ -0,0 +1,481 @@
#!/bin/bash
# ============================================================================
# ProxMenux Notification System - Complete Test Suite
# ============================================================================
#
# Usage:
# chmod +x test_all_notifications.sh
# ./test_all_notifications.sh # Run ALL tests (with 3s pause between)
# ./test_all_notifications.sh system # Run only System category
# ./test_all_notifications.sh vm_ct # Run only VM/CT category
# ./test_all_notifications.sh backup # Run only Backup category
# ./test_all_notifications.sh resources # Run only Resources category
# ./test_all_notifications.sh storage # Run only Storage category
# ./test_all_notifications.sh network # Run only Network category
# ./test_all_notifications.sh security # Run only Security category
# ./test_all_notifications.sh cluster # Run only Cluster category
# ./test_all_notifications.sh burst # Run only Burst aggregation tests
#
# Each test sends a simulated webhook to the local notification endpoint.
# Check your Telegram/Gotify/Discord/Email for the notifications.
# ============================================================================
API="http://127.0.0.1:8008/api/notifications/webhook"
PAUSE=3 # seconds between tests
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
CYAN='\033[0;36m'
NC='\033[0m' # No Color
BOLD='\033[1m'
test_count=0
pass_count=0
fail_count=0
send_test() {
local name="$1"
local payload="$2"
test_count=$((test_count + 1))
echo -e "${CYAN} [$test_count] ${BOLD}$name${NC}"
response=$(curl -s -w "\n%{http_code}" -X POST "$API" \
-H "Content-Type: application/json" \
-d "$payload" 2>&1)
http_code=$(echo "$response" | tail -1)
body=$(echo "$response" | head -n -1)
if [ "$http_code" = "200" ] || [ "$http_code" = "202" ]; then
echo -e " ${GREEN}HTTP $http_code${NC} - $body"
pass_count=$((pass_count + 1))
else
echo -e " ${RED}HTTP $http_code${NC} - $body"
fail_count=$((fail_count + 1))
fi
sleep "$PAUSE"
}
# ============================================================================
# SYSTEM CATEGORY (group: system)
# ============================================================================
test_system() {
echo ""
echo -e "${YELLOW}========================================${NC}"
echo -e "${YELLOW} SYSTEM - Startup, shutdown, kernel${NC}"
echo -e "${YELLOW}========================================${NC}"
echo ""
# 1. state_change (disabled by default -- test to verify it does NOT arrive)
send_test "state_change (should NOT arrive - disabled by default)" \
'{"type":"state_change","component":"health","severity":"warning","title":"overall changed to WARNING","body":"overall status changed from OK to WARNING."}'
# 2. new_error
send_test "new_error" \
'{"type":"new_error","component":"health","severity":"warning","title":"New WARNING - cpu","body":"CPU usage exceeds 90% for more than 5 minutes","category":"cpu"}'
# 3. error_resolved
send_test "error_resolved" \
'{"type":"error_resolved","component":"health","severity":"info","title":"Resolved - cpu","body":"CPU usage returned to normal.\nDuration: 15 minutes","category":"cpu","duration":"15 minutes"}'
# 4. error_escalated
send_test "error_escalated" \
'{"type":"error_escalated","component":"health","severity":"critical","title":"Escalated to CRITICAL - memory","body":"Memory usage exceeded 95% and swap is active","category":"memory"}'
# 5. system_shutdown
send_test "system_shutdown" \
'{"type":"system_shutdown","component":"system","severity":"warning","title":"System shutting down","body":"The system is shutting down.\nUser initiated shutdown."}'
# 6. system_reboot
send_test "system_reboot" \
'{"type":"system_reboot","component":"system","severity":"warning","title":"System rebooting","body":"The system is rebooting.\nKernel update applied."}'
# 7. system_problem
send_test "system_problem" \
'{"type":"system_problem","component":"system","severity":"critical","title":"System problem detected","body":"Kernel panic: Attempted to kill init! exitcode=0x00000009"}'
# 8. service_fail
send_test "service_fail" \
'{"type":"service_fail","component":"systemd","severity":"warning","title":"Service failed - pvedaemon","body":"Service pvedaemon has failed.\nUnit pvedaemon.service entered failed state.","service_name":"pvedaemon"}'
# 9. update_available (legacy, superseded by update_summary)
send_test "update_available" \
'{"type":"update_available","component":"apt","severity":"info","title":"Updates available","body":"Total updates: 12\nSecurity: 3\nProxmox: 5\nKernel: 1\nImportant: pve-manager (8.3.5 -> 8.4.1)","total_count":"12","security_count":"3","pve_count":"5","kernel_count":"1","important_list":"pve-manager (8.3.5 -> 8.4.1)"}'
# 10. update_complete
send_test "update_complete" \
'{"type":"update_complete","component":"apt","severity":"info","title":"Update completed","body":"12 packages updated successfully."}'
# 11. unknown_persistent
send_test "unknown_persistent" \
'{"type":"unknown_persistent","component":"health","severity":"warning","title":"Check unavailable - temperature","body":"Health check for temperature has been unavailable for 3+ cycles.\nSensor not responding.","category":"temperature"}'
# 12. health_persistent
send_test "health_persistent" \
'{"type":"health_persistent","component":"health","severity":"warning","title":"3 active health issue(s)","body":"The following health issues remain active:\n- CPU at 92%\n- Memory at 88%\n- Disk /dev/sda at 94%\n\nThis digest is sent once every 24 hours while issues persist.","count":"3"}'
# 13. health_issue_new
send_test "health_issue_new" \
'{"type":"health_issue_new","component":"health","severity":"warning","title":"New health issue - disk","body":"New WARNING issue detected:\nDisk /dev/sda usage at 94%","category":"disk"}'
# 14. health_issue_resolved
send_test "health_issue_resolved" \
'{"type":"health_issue_resolved","component":"health","severity":"info","title":"Resolved - disk","body":"disk issue has been resolved.\nDisk usage dropped to 72%.\nDuration: 3 hours","category":"disk","duration":"3 hours"}'
# 15. update_summary
send_test "update_summary" \
'{"type":"update_summary","component":"apt","severity":"info","title":"Updates available","body":"Total updates: 70\nSecurity updates: 9\nProxmox-related updates: 24\nKernel updates: 1\nImportant packages: pve-manager (8.3.5 -> 8.4.1), proxmox-ve (8.3.0 -> 8.4.0), qemu-server (8.3.8 -> 8.4.2)","total_count":"70","security_count":"9","pve_count":"24","kernel_count":"1","important_list":"pve-manager (8.3.5 -> 8.4.1), proxmox-ve (8.3.0 -> 8.4.0), qemu-server (8.3.8 -> 8.4.2)"}'
# 16. pve_update
send_test "pve_update" \
'{"type":"pve_update","component":"apt","severity":"info","title":"Proxmox VE 8.4.1 available","body":"Proxmox VE 8.3.5 -> 8.4.1\npve-manager 8.3.5 -> 8.4.1","current_version":"8.3.5","new_version":"8.4.1","version":"8.4.1","details":"pve-manager 8.3.5 -> 8.4.1"}'
}
# ============================================================================
# VM / CT CATEGORY (group: vm_ct)
# ============================================================================
test_vm_ct() {
echo ""
echo -e "${YELLOW}========================================${NC}"
echo -e "${YELLOW} VM / CT - Start, stop, crash, migration${NC}"
echo -e "${YELLOW}========================================${NC}"
echo ""
# 1. vm_start
send_test "vm_start" \
'{"type":"vm_start","component":"qemu","severity":"info","title":"VM 100 started","body":"ubuntu-server (100) has been started.","vmid":"100","vmname":"ubuntu-server"}'
# 2. vm_stop
send_test "vm_stop" \
'{"type":"vm_stop","component":"qemu","severity":"info","title":"VM 100 stopped","body":"ubuntu-server (100) has been stopped.","vmid":"100","vmname":"ubuntu-server"}'
# 3. vm_shutdown
send_test "vm_shutdown" \
'{"type":"vm_shutdown","component":"qemu","severity":"info","title":"VM 100 shutdown","body":"ubuntu-server (100) has been shut down.","vmid":"100","vmname":"ubuntu-server"}'
# 4. vm_fail
send_test "vm_fail" \
'{"type":"vm_fail","component":"qemu","severity":"critical","title":"VM 100 FAILED","body":"ubuntu-server (100) has failed.\nKVM: internal error: unexpected exit to hypervisor","vmid":"100","vmname":"ubuntu-server","reason":"KVM: internal error: unexpected exit to hypervisor"}'
# 5. vm_restart
send_test "vm_restart" \
'{"type":"vm_restart","component":"qemu","severity":"info","title":"VM 100 restarted","body":"ubuntu-server (100) has been restarted.","vmid":"100","vmname":"ubuntu-server"}'
# 6. ct_start
send_test "ct_start" \
'{"type":"ct_start","component":"lxc","severity":"info","title":"CT 200 started","body":"nginx-proxy (200) has been started.","vmid":"200","vmname":"nginx-proxy"}'
# 7. ct_stop
send_test "ct_stop" \
'{"type":"ct_stop","component":"lxc","severity":"info","title":"CT 200 stopped","body":"nginx-proxy (200) has been stopped.","vmid":"200","vmname":"nginx-proxy"}'
# 8. ct_fail
send_test "ct_fail" \
'{"type":"ct_fail","component":"lxc","severity":"critical","title":"CT 200 FAILED","body":"nginx-proxy (200) has failed.\nContainer exited with error code 137","vmid":"200","vmname":"nginx-proxy","reason":"Container exited with error code 137"}'
# 9. migration_start
send_test "migration_start" \
'{"type":"migration_start","component":"qemu","severity":"info","title":"Migration started - 100","body":"ubuntu-server (100) migration to pve-node2 started.","vmid":"100","vmname":"ubuntu-server","target_node":"pve-node2"}'
# 10. migration_complete
send_test "migration_complete" \
'{"type":"migration_complete","component":"qemu","severity":"info","title":"Migration complete - 100","body":"ubuntu-server (100) migrated successfully to pve-node2.","vmid":"100","vmname":"ubuntu-server","target_node":"pve-node2"}'
# 11. migration_fail
send_test "migration_fail" \
'{"type":"migration_fail","component":"qemu","severity":"critical","title":"Migration FAILED - 100","body":"ubuntu-server (100) migration to pve-node2 failed.\nNetwork timeout during memory transfer","vmid":"100","vmname":"ubuntu-server","target_node":"pve-node2","reason":"Network timeout during memory transfer"}'
# 12. replication_fail
send_test "replication_fail" \
'{"type":"replication_fail","component":"replication","severity":"critical","title":"Replication FAILED - 100","body":"Replication of ubuntu-server (100) has failed.\nTarget storage unreachable","vmid":"100","vmname":"ubuntu-server","reason":"Target storage unreachable"}'
# 13. replication_complete
send_test "replication_complete" \
'{"type":"replication_complete","component":"replication","severity":"info","title":"Replication complete - 100","body":"Replication of ubuntu-server (100) completed successfully.","vmid":"100","vmname":"ubuntu-server"}'
}
# ============================================================================
# BACKUP CATEGORY (group: backup)
# ============================================================================
test_backup() {
echo ""
echo -e "${YELLOW}========================================${NC}"
echo -e "${YELLOW} BACKUPS - Backup start, complete, fail${NC}"
echo -e "${YELLOW}========================================${NC}"
echo ""
# 1. backup_start
send_test "backup_start" \
'{"type":"backup_start","component":"vzdump","severity":"info","title":"Backup started - 100","body":"Backup of ubuntu-server (100) has started.","vmid":"100","vmname":"ubuntu-server"}'
# 2. backup_complete
send_test "backup_complete" \
'{"type":"backup_complete","component":"vzdump","severity":"info","title":"Backup complete - 100","body":"Backup of ubuntu-server (100) completed successfully.\nSize: 12.4 GB","vmid":"100","vmname":"ubuntu-server","size":"12.4 GB"}'
# 3. backup_fail
send_test "backup_fail" \
'{"type":"backup_fail","component":"vzdump","severity":"critical","title":"Backup FAILED - 100","body":"Backup of ubuntu-server (100) has failed.\nStorage local-lvm is full","vmid":"100","vmname":"ubuntu-server","reason":"Storage local-lvm is full"}'
# 4. snapshot_complete
send_test "snapshot_complete" \
'{"type":"snapshot_complete","component":"qemu","severity":"info","title":"Snapshot created - 100","body":"Snapshot of ubuntu-server (100) created: pre-upgrade-2026","vmid":"100","vmname":"ubuntu-server","snapshot_name":"pre-upgrade-2026"}'
# 5. snapshot_fail
send_test "snapshot_fail" \
'{"type":"snapshot_fail","component":"qemu","severity":"critical","title":"Snapshot FAILED - 100","body":"Snapshot of ubuntu-server (100) failed.\nInsufficient space on storage","vmid":"100","vmname":"ubuntu-server","reason":"Insufficient space on storage"}'
}
# ============================================================================
# RESOURCES CATEGORY (group: resources)
# ============================================================================
test_resources() {
echo ""
echo -e "${YELLOW}========================================${NC}"
echo -e "${YELLOW} RESOURCES - CPU, memory, temperature${NC}"
echo -e "${YELLOW}========================================${NC}"
echo ""
# 1. cpu_high
send_test "cpu_high" \
'{"type":"cpu_high","component":"health","severity":"warning","title":"High CPU usage (94%)","body":"CPU usage is at 94% on 16 cores.\nTop process: kvm (VM 100)","value":"94","cores":"16","details":"Top process: kvm (VM 100)"}'
# 2. ram_high
send_test "ram_high" \
'{"type":"ram_high","component":"health","severity":"warning","title":"High memory usage (91%)","body":"Memory usage: 58.2 GB / 64 GB (91%).\n4 VMs running, swap at 2.1 GB","value":"91","used":"58.2 GB","total":"64 GB","details":"4 VMs running, swap at 2.1 GB"}'
# 3. temp_high
send_test "temp_high" \
'{"type":"temp_high","component":"health","severity":"critical","title":"High temperature (89C)","body":"CPU temperature: 89C (threshold: 80C).\nCheck cooling system immediately","value":"89","threshold":"80","details":"Check cooling system immediately"}'
# 4. load_high
send_test "load_high" \
'{"type":"load_high","component":"health","severity":"warning","title":"High system load (24.5)","body":"System load average: 24.5 on 16 cores.\nI/O wait: 35%","value":"24.5","cores":"16","details":"I/O wait: 35%"}'
}
# ============================================================================
# STORAGE CATEGORY (group: storage)
# ============================================================================
test_storage() {
echo ""
echo -e "${YELLOW}========================================${NC}"
echo -e "${YELLOW} STORAGE - Disk space, I/O errors, SMART${NC}"
echo -e "${YELLOW}========================================${NC}"
echo ""
# 1. disk_space_low
send_test "disk_space_low" \
'{"type":"disk_space_low","component":"storage","severity":"warning","title":"Low disk space on /var","body":"/var: 93% used (4.2 GB available).","mount":"/var","used":"93","available":"4.2 GB"}'
# 2. disk_io_error
send_test "disk_io_error" \
'{"type":"disk_io_error","component":"smart","severity":"critical","title":"Disk I/O error","body":"I/O error detected on /dev/sdb.\nSMART error: Current Pending Sector Count = 8","device":"/dev/sdb","reason":"SMART error: Current Pending Sector Count = 8"}'
# 3. burst_disk_io
send_test "burst_disk_io" \
'{"type":"burst_disk_io","component":"storage","severity":"critical","title":"5 disk I/O errors on /dev/sdb, /dev/sdc","body":"5 I/O errors detected in 60s.\nDevices: /dev/sdb, /dev/sdc","count":"5","window":"60s","entity_list":"/dev/sdb, /dev/sdc"}'
}
# ============================================================================
# NETWORK CATEGORY (group: network)
# ============================================================================
test_network() {
echo ""
echo -e "${YELLOW}========================================${NC}"
echo -e "${YELLOW} NETWORK - Connectivity, bond, latency${NC}"
echo -e "${YELLOW}========================================${NC}"
echo ""
# 1. network_down
send_test "network_down" \
'{"type":"network_down","component":"network","severity":"critical","title":"Network connectivity lost","body":"Network connectivity check failed.\nGateway 192.168.1.1 unreachable. Bond vmbr0 degraded.","reason":"Gateway 192.168.1.1 unreachable. Bond vmbr0 degraded."}'
# 2. network_latency
send_test "network_latency" \
'{"type":"network_latency","component":"network","severity":"warning","title":"High network latency (450ms)","body":"Latency to gateway: 450ms (threshold: 100ms).","value":"450","threshold":"100"}'
}
# ============================================================================
# SECURITY CATEGORY (group: security)
# ============================================================================
test_security() {
echo ""
echo -e "${YELLOW}========================================${NC}"
echo -e "${YELLOW} SECURITY - Auth failures, fail2ban, firewall${NC}"
echo -e "${YELLOW}========================================${NC}"
echo ""
# 1. auth_fail
send_test "auth_fail" \
'{"type":"auth_fail","component":"auth","severity":"warning","title":"Authentication failure","body":"Failed login attempt from 203.0.113.42.\nUser: root\nService: sshd","source_ip":"203.0.113.42","username":"root","service":"sshd"}'
# 2. ip_block
send_test "ip_block" \
'{"type":"ip_block","component":"security","severity":"info","title":"IP blocked by Fail2Ban","body":"IP 203.0.113.42 has been banned.\nJail: sshd\nFailures: 5","source_ip":"203.0.113.42","jail":"sshd","failures":"5"}'
# 3. firewall_issue
send_test "firewall_issue" \
'{"type":"firewall_issue","component":"firewall","severity":"warning","title":"Firewall issue detected","body":"Firewall rule conflict detected on vmbr0.\nRule 15 overlaps with rule 23, potentially blocking cluster traffic.","reason":"Firewall rule conflict detected on vmbr0. Rule 15 overlaps with rule 23."}'
# 4. user_permission_change
send_test "user_permission_change" \
'{"type":"user_permission_change","component":"auth","severity":"info","title":"User permission changed","body":"User: admin@pam\nChange: Added PVEAdmin role on /vms/100","username":"admin@pam","change_details":"Added PVEAdmin role on /vms/100"}'
# 5. burst_auth_fail
send_test "burst_auth_fail" \
'{"type":"burst_auth_fail","component":"security","severity":"warning","title":"8 auth failures in 2m","body":"8 authentication failures detected in 2m.\nSources: 203.0.113.42, 198.51.100.7, 192.0.2.15","count":"8","window":"2m","entity_list":"203.0.113.42, 198.51.100.7, 192.0.2.15"}'
# 6. burst_ip_block
send_test "burst_ip_block" \
'{"type":"burst_ip_block","component":"security","severity":"info","title":"Fail2Ban banned 4 IPs in 5m","body":"4 IPs banned by Fail2Ban in 5m.\nIPs: 203.0.113.42, 198.51.100.7, 192.0.2.15, 10.0.0.99","count":"4","window":"5m","entity_list":"203.0.113.42, 198.51.100.7, 192.0.2.15, 10.0.0.99"}'
}
# ============================================================================
# CLUSTER CATEGORY (group: cluster)
# ============================================================================
test_cluster() {
echo ""
echo -e "${YELLOW}========================================${NC}"
echo -e "${YELLOW} CLUSTER - Quorum, split-brain, HA fencing${NC}"
echo -e "${YELLOW}========================================${NC}"
echo ""
# 1. split_brain
send_test "split_brain" \
'{"type":"split_brain","component":"cluster","severity":"critical","title":"SPLIT-BRAIN detected","body":"Cluster split-brain condition detected.\nQuorum status: No quorum - 1/3 nodes visible","quorum":"No quorum - 1/3 nodes visible"}'
# 2. node_disconnect
send_test "node_disconnect" \
'{"type":"node_disconnect","component":"corosync","severity":"critical","title":"Node disconnected","body":"Node pve-node3 has disconnected from the cluster.","node_name":"pve-node3"}'
# 3. node_reconnect
send_test "node_reconnect" \
'{"type":"node_reconnect","component":"corosync","severity":"info","title":"Node reconnected","body":"Node pve-node3 has reconnected to the cluster.","node_name":"pve-node3"}'
# 4. burst_cluster
send_test "burst_cluster" \
'{"type":"burst_cluster","component":"cluster","severity":"critical","title":"Cluster flapping detected (6 changes)","body":"Cluster state changed 6 times in 5m.\nNodes: pve-node2, pve-node3","count":"6","window":"5m","entity_list":"pve-node2, pve-node3"}'
}
# ============================================================================
# BURST AGGREGATION TESTS (send rapid events to trigger burst detection)
# ============================================================================
test_burst() {
echo ""
echo -e "${YELLOW}========================================${NC}"
echo -e "${YELLOW} BURST - Rapid events to trigger aggregation${NC}"
echo -e "${YELLOW}========================================${NC}"
echo ""
echo -e "${BLUE} Sending 5 rapid auth_fail events (should trigger burst_auth_fail)...${NC}"
for i in $(seq 1 5); do
curl -s -X POST "$API" \
-H "Content-Type: application/json" \
-d "{\"type\":\"auth_fail\",\"component\":\"auth\",\"severity\":\"warning\",\"title\":\"Auth fail from 10.0.0.$i\",\"body\":\"Failed login from 10.0.0.$i\",\"source_ip\":\"10.0.0.$i\"}" > /dev/null
echo -e " ${CYAN}Sent auth_fail $i/5${NC}"
sleep 0.5
done
echo -e " ${GREEN}Done. Wait ~10s for burst aggregation...${NC}"
sleep 10
echo ""
echo -e "${BLUE} Sending 4 rapid disk_io_error events (should trigger burst_disk_io)...${NC}"
for i in $(seq 1 4); do
curl -s -X POST "$API" \
-H "Content-Type: application/json" \
-d "{\"type\":\"disk_io_error\",\"component\":\"smart\",\"severity\":\"critical\",\"title\":\"I/O error on /dev/sd${i}\",\"body\":\"Error on device\",\"device\":\"/dev/sd${i}\"}" > /dev/null
echo -e " ${CYAN}Sent disk_io_error $i/4${NC}"
sleep 0.5
done
echo -e " ${GREEN}Done. Wait ~10s for burst aggregation...${NC}"
sleep 10
echo ""
echo -e "${BLUE} Sending 3 rapid node_disconnect events (should trigger burst_cluster)...${NC}"
for i in $(seq 1 3); do
curl -s -X POST "$API" \
-H "Content-Type: application/json" \
-d "{\"type\":\"node_disconnect\",\"component\":\"corosync\",\"severity\":\"critical\",\"title\":\"Node pve-node$i disconnected\",\"body\":\"Node lost\",\"node_name\":\"pve-node$i\"}" > /dev/null
echo -e " ${CYAN}Sent node_disconnect $i/3${NC}"
sleep 0.5
done
echo -e " ${GREEN}Done. Wait ~10s for burst aggregation...${NC}"
sleep 10
}
# ============================================================================
# MAIN
# ============================================================================
echo ""
echo -e "${BOLD}============================================================${NC}"
echo -e "${BOLD} ProxMenux Notification System - Complete Test Suite${NC}"
echo -e "${BOLD}============================================================${NC}"
echo -e " API: $API"
echo -e " Pause: ${PAUSE}s between tests"
echo ""
# Check that the service is reachable
status=$(curl -s -o /dev/null -w "%{http_code}" "http://127.0.0.1:8008/api/notifications/status" 2>/dev/null)
if [ "$status" != "200" ]; then
echo -e "${RED}ERROR: Notification service not reachable (HTTP $status)${NC}"
echo -e " Make sure ProxMenux Monitor is running."
exit 1
fi
echo -e "${GREEN}Service is reachable.${NC}"
# Parse argument
category="${1:-all}"
case "$category" in
system) test_system ;;
vm_ct) test_vm_ct ;;
backup) test_backup ;;
resources) test_resources ;;
storage) test_storage ;;
network) test_network ;;
security) test_security ;;
cluster) test_cluster ;;
burst) test_burst ;;
all)
test_system
test_vm_ct
test_backup
test_resources
test_storage
test_network
test_security
test_cluster
test_burst
;;
*)
echo -e "${RED}Unknown category: $category${NC}"
echo "Usage: $0 [system|vm_ct|backup|resources|storage|network|security|cluster|burst|all]"
exit 1
;;
esac
# ============================================================================
# SUMMARY
# ============================================================================
echo ""
echo -e "${BOLD}============================================================${NC}"
echo -e "${BOLD} SUMMARY${NC}"
echo -e "${BOLD}============================================================${NC}"
echo -e " Total tests: $test_count"
echo -e " ${GREEN}Accepted:${NC} $pass_count"
echo -e " ${RED}Rejected:${NC} $fail_count"
echo ""
echo -e " Check your notification channels for the messages."
echo -e " Note: Some events may be filtered by your current settings"
echo -e " (severity filter, disabled categories, disabled individual events)."
echo ""
echo -e " To check notification history (all events):"
echo -e " ${CYAN}curl -s 'http://127.0.0.1:8008/api/notifications/history?limit=200' | python3 -m json.tool${NC}"
echo ""
echo -e " To count events by type:"
echo -e " ${CYAN}curl -s 'http://127.0.0.1:8008/api/notifications/history?limit=200' | python3 -c \"import sys,json; h=json.load(sys.stdin)['history']; [print(f' {t}: {c}') for t,c in sorted(dict((e['event_type'],sum(1 for x in h if x['event_type']==e['event_type'])) for e in h).items())]\"${NC}
echo ""
+131
View File
@@ -0,0 +1,131 @@
#!/usr/bin/env python3
"""
Test script to simulate a disk error and verify observation recording.
Usage: python3 test_disk_observation.py [device_name] [error_type]
Examples:
python3 test_disk_observation.py sdh io_error
python3 test_disk_observation.py sdh smart_error
python3 test_disk_observation.py sdh fs_error
"""
import sys
import os
# Add possible module locations to path
script_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, script_dir)
sys.path.insert(0, '/usr/local/share/proxmenux')
sys.path.insert(0, '/tmp/.mount_ProxMeztyU13/usr/bin') # AppImage mount point
# Try to find the module
for path in sys.path:
if os.path.exists(os.path.join(path, 'health_persistence.py')):
print(f"[INFO] Found health_persistence.py in: {path}")
break
from health_persistence import HealthPersistence
from datetime import datetime
def main():
device_name = sys.argv[1] if len(sys.argv) > 1 else 'sdh'
error_type = sys.argv[2] if len(sys.argv) > 2 else 'io_error'
# Known serial for sdh (WDC 2TB)
serial_map = {
'sdh': 'WD-WX72A30AA72R',
'nvme0n1': '2241E675EA6C',
'nvme1n1': '2241E675EBE6',
'sda': '22440F443504',
'sdb': 'WWZ1SJ18',
'sdc': '52X0A0D9FZ1G',
'sdd': '50026B7784446E63',
'sde': '22440F442105',
'sdf': 'WRQ0X2GP',
'sdg': '23Q0A0MPFZ1G',
}
serial = serial_map.get(device_name, None)
# Error messages by type
error_messages = {
'io_error': f'Test I/O error on /dev/{device_name}: sector read failed at LBA 12345678',
'smart_error': f'/dev/{device_name}: SMART warning - 1 Currently unreadable (pending) sectors detected',
'fs_error': f'EXT4-fs error (device {device_name}1): inode 123456: block 789012: error reading data',
}
error_signatures = {
'io_error': f'io_test_{device_name}',
'smart_error': f'smart_test_{device_name}',
'fs_error': f'fs_test_{device_name}',
}
message = error_messages.get(error_type, f'Test error on /dev/{device_name}')
signature = error_signatures.get(error_type, f'test_{device_name}')
print(f"\n{'='*60}")
print(f"Testing Disk Observation Recording")
print(f"{'='*60}")
print(f"Device: /dev/{device_name}")
print(f"Serial: {serial or 'Unknown'}")
print(f"Error Type: {error_type}")
print(f"Message: {message}")
print(f"Signature: {signature}")
print(f"{'='*60}\n")
# Initialize persistence
hp = HealthPersistence()
# Record the observation
print("[1] Recording observation...")
hp.record_disk_observation(
device_name=device_name,
serial=serial,
error_type=error_type,
error_signature=signature,
raw_message=message,
severity='warning'
)
print(" OK - Observation recorded\n")
# Query observations for this device
print("[2] Querying observations for this device...")
observations = hp.get_disk_observations(device_name=device_name, serial=serial)
if observations:
print(f" Found {len(observations)} observation(s):\n")
for obs in observations:
print(f" ID: {obs['id']}")
print(f" Type: {obs['error_type']}")
print(f" Signature: {obs['error_signature']}")
print(f" Message: {obs['raw_message'][:80]}...")
print(f" Severity: {obs['severity']}")
print(f" First: {obs['first_occurrence']}")
print(f" Last: {obs['last_occurrence']}")
print(f" Count: {obs['occurrence_count']}")
print(f" Dismissed: {obs['dismissed']}")
print()
else:
print(" No observations found!\n")
# Also show the disk registry
print("[3] Checking disk registry...")
all_devices = hp.get_all_observed_devices()
for dev in all_devices:
if dev.get('device_name') == device_name or dev.get('serial') == serial:
print(f" Found in registry:")
print(f" ID: {dev.get('id')}")
print(f" Device: {dev.get('device_name')}")
print(f" Serial: {dev.get('serial')}")
print(f" First seen: {dev.get('first_seen')}")
print(f" Last seen: {dev.get('last_seen')}")
print()
print(f"{'='*60}")
print("Test complete! Check the Storage section in the UI.")
print(f"The disk /dev/{device_name} should now show an observations badge.")
print(f"{'='*60}\n")
if __name__ == '__main__':
main()
+732
View File
@@ -0,0 +1,732 @@
#!/bin/bash
# ============================================================================
# ProxMenux - Real Proxmox Event Simulator
# ============================================================================
# This script triggers ACTUAL events on Proxmox so that PVE's notification
# system fires real webhooks through the full pipeline:
#
# PVE event -> PVE notification -> webhook POST -> our pipeline -> Telegram
#
# Unlike test_all_notifications.sh (which injects directly via API), this
# script makes Proxmox generate the events itself.
#
# Usage:
# chmod +x test_real_events.sh
# ./test_real_events.sh # interactive menu
# ./test_real_events.sh disk # run disk tests only
# ./test_real_events.sh backup # run backup tests only
# ./test_real_events.sh all # run all tests
# ============================================================================
set -euo pipefail
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
CYAN='\033[0;36m'
BOLD='\033[1m'
NC='\033[0m'
API="http://127.0.0.1:8008"
LOG_FILE="/tmp/proxmenux_real_test_$(date +%Y%m%d_%H%M%S).log"
# ── Helpers ─────────────────────────────────────────────────────
log() { echo -e "$1" | tee -a "$LOG_FILE"; }
header() {
echo "" | tee -a "$LOG_FILE"
echo -e "${BOLD}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}" | tee -a "$LOG_FILE"
echo -e "${BOLD} $1${NC}" | tee -a "$LOG_FILE"
echo -e "${BOLD}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}" | tee -a "$LOG_FILE"
}
warn() { log "${YELLOW} [!] $1${NC}"; }
ok() { log "${GREEN} [OK] $1${NC}"; }
fail() { log "${RED} [FAIL] $1${NC}"; }
info() { log "${CYAN} [i] $1${NC}"; }
confirm() {
echo ""
echo -e "${YELLOW} $1${NC}"
echo -ne " Continue? [Y/n]: "
read -r ans
[[ -z "$ans" || "$ans" =~ ^[Yy] ]]
}
wait_webhook() {
local seconds=${1:-10}
log " Waiting ${seconds}s for webhook delivery..."
sleep "$seconds"
}
snapshot_history() {
curl -s "${API}/api/notifications/history?limit=200" 2>/dev/null | python3 -c "
import sys, json
try:
data = json.load(sys.stdin)
count = len(data.get('history', []))
print(count)
except:
print(0)
" 2>/dev/null || echo "0"
}
check_new_events() {
local before=$1
local after
after=$(snapshot_history)
local diff=$((after - before))
if [ "$diff" -gt 0 ]; then
ok "Received $diff new notification(s) via webhook"
# Show the latest events
curl -s "${API}/api/notifications/history?limit=$((diff + 2))" 2>/dev/null | python3 -c "
import sys, json
data = json.load(sys.stdin)
for h in data.get('history', [])[:$diff]:
sev = h.get('severity', '?')
icon = {'CRITICAL': ' RED', 'WARNING': ' YEL', 'INFO': ' BLU'}.get(sev, ' ???')
print(f'{icon} {h[\"event_type\"]:25s} {h.get(\"title\", \"\")[:60]}')
" 2>/dev/null | tee -a "$LOG_FILE"
else
warn "No new notifications detected (may need more time or check filters)"
fi
}
# ── Pre-flight checks ──────────────────────────────────────────
preflight() {
header "Pre-flight Checks"
# Check if running as root
if [ "$(id -u)" -ne 0 ]; then
fail "This script must be run as root"
exit 1
fi
ok "Running as root"
# Check ProxMenux is running
if curl -s "${API}/api/health" >/dev/null 2>&1; then
ok "ProxMenux Monitor is running"
else
fail "ProxMenux Monitor not reachable at ${API}"
exit 1
fi
# Check webhook is configured by querying PVE directly
if pvesh get /cluster/notifications/endpoints/webhook --output-format json 2>/dev/null | python3 -c "
import sys, json
endpoints = json.load(sys.stdin)
found = any('proxmenux' in e.get('name','').lower() for e in (endpoints if isinstance(endpoints, list) else [endpoints]))
exit(0 if found else 1)
" 2>/dev/null; then
ok "PVE webhook endpoint 'proxmenux-webhook' is configured"
else
warn "PVE webhook may not be configured. Run setup from the UI first."
if ! confirm "Continue anyway?"; then
exit 1
fi
fi
# Check notification config
# API returns { config: { enabled: true/false/'true'/'false', ... }, success: true }
if curl -s "${API}/api/notifications/settings" 2>/dev/null | python3 -c "
import sys, json
d = json.load(sys.stdin)
cfg = d.get('config', d)
enabled = cfg.get('enabled', False)
exit(0 if enabled is True or str(enabled).lower() == 'true' else 1)
" 2>/dev/null; then
ok "Notifications are enabled"
else
fail "Notifications are NOT enabled. Enable them in the UI first."
exit 1
fi
# Re-run webhook setup to ensure priv config and body template exist
info "Re-configuring PVE webhook (ensures priv config + body template)..."
local setup_result
setup_result=$(curl -s -X POST "${API}/api/notifications/proxmox/setup-webhook" 2>/dev/null)
if echo "$setup_result" | python3 -c "import sys,json; d=json.load(sys.stdin); exit(0 if d.get('configured') else 1)" 2>/dev/null; then
ok "PVE webhook re-configured successfully"
else
local setup_err
setup_err=$(echo "$setup_result" | python3 -c "import sys,json; print(json.load(sys.stdin).get('error','unknown'))" 2>/dev/null)
warn "Webhook setup returned: ${setup_err}"
warn "PVE webhook events may not work. Manual commands below:"
echo "$setup_result" | python3 -c "
import sys, json
d = json.load(sys.stdin)
for cmd in d.get('fallback_commands', []):
print(f' {cmd}')
" 2>/dev/null
if ! confirm "Continue anyway?"; then
exit 1
fi
fi
# Find a VM/CT for testing
VMID=""
VMNAME=""
VMTYPE=""
# Try to find a stopped CT first (safest)
local cts
cts=$(pvesh get /cluster/resources --type vm --output-format json 2>/dev/null || echo "[]")
# Look for a stopped container
VMID=$(echo "$cts" | python3 -c "
import sys, json
vms = json.load(sys.stdin)
# Prefer stopped CTs, then stopped VMs
for v in sorted(vms, key=lambda x: (0 if x.get('type')=='lxc' else 1, 0 if x.get('status')=='stopped' else 1)):
if v.get('status') == 'stopped':
print(v.get('vmid', ''))
break
" 2>/dev/null || echo "")
if [ -n "$VMID" ]; then
VMTYPE=$(echo "$cts" | python3 -c "
import sys, json
vms = json.load(sys.stdin)
for v in vms:
if str(v.get('vmid')) == '$VMID':
print(v.get('type', 'qemu'))
break
" 2>/dev/null)
VMNAME=$(echo "$cts" | python3 -c "
import sys, json
vms = json.load(sys.stdin)
for v in vms:
if str(v.get('vmid')) == '$VMID':
print(v.get('name', 'unknown'))
break
" 2>/dev/null)
ok "Found stopped ${VMTYPE} for testing: ${VMID} (${VMNAME})"
else
warn "No stopped VM/CT found. Backup tests will use ID 0 (host backup)."
fi
# List available storage
info "Available storage:"
pvesh get /storage --output-format json 2>/dev/null | python3 -c "
import sys, json
stores = json.load(sys.stdin)
for s in stores:
sid = s.get('storage', '?')
stype = s.get('type', '?')
content = s.get('content', '?')
print(f' {sid:20s} type={stype:10s} content={content}')
" 2>/dev/null | tee -a "$LOG_FILE" || warn "Could not list storage"
echo ""
log " Log file: ${LOG_FILE}"
}
# ============================================================================
# TEST CATEGORY: DISK ERRORS
# ============================================================================
test_disk() {
header "DISK ERROR TESTS"
# ── Test D1: SMART error injection ──
log ""
log "${BOLD} Test D1: SMART error log injection${NC}"
info "Writes a simulated SMART error to syslog so JournalWatcher catches it."
info "This tests the journal -> notification_events -> pipeline flow."
local before
before=$(snapshot_history)
# Inject a realistic SMART error into the system journal
logger -t kernel -p kern.err "ata1.00: exception Emask 0x0 SAct 0x0 SErr 0x0 action 0x6 frozen"
sleep 1
logger -t kernel -p kern.crit "ata1.00: failed command: READ FPDMA QUEUED"
sleep 1
logger -t smartd -p daemon.warning "Device: /dev/sda [SAT], 1 Currently unreadable (pending) sectors"
wait_webhook 8
check_new_events "$before"
# ── Test D2: ZFS error simulation ──
log ""
log "${BOLD} Test D2: ZFS scrub error simulation${NC}"
# Check if ZFS is available
if command -v zpool >/dev/null 2>&1; then
local zpools
zpools=$(zpool list -H -o name 2>/dev/null || echo "")
if [ -n "$zpools" ]; then
local pool
pool=$(echo "$zpools" | head -1)
info "ZFS pool found: ${pool}"
info "Injecting ZFS checksum error into syslog (non-destructive)."
before=$(snapshot_history)
# Simulate ZFS error events via syslog (non-destructive)
logger -t kernel -p kern.warning "ZFS: pool '${pool}' has experienced an error"
sleep 1
logger -t zfs-module -p daemon.err "CHECKSUM error on ${pool}:mirror-0/sda: zio error"
wait_webhook 8
check_new_events "$before"
else
warn "ZFS installed but no pools found. Skipping ZFS test."
fi
else
warn "ZFS not installed. Skipping ZFS test."
fi
# ── Test D3: Filesystem space pressure ──
log ""
log "${BOLD} Test D3: Disk space pressure simulation${NC}"
info "Creates a large temporary file to fill disk, triggering space warnings."
info "The Health Monitor should detect low disk space within ~60s."
# Check current free space on /
local free_pct
free_pct=$(df / | tail -1 | awk '{print 100-$5}' | tr -d '%')
info "Current free space on /: ${free_pct}%"
if [ "$free_pct" -gt 15 ]; then
info "Disk has ${free_pct}% free. Need to reduce below threshold for test."
# Calculate how much to fill (leave only 8% free)
local total_k free_k fill_k
total_k=$(df / | tail -1 | awk '{print $2}')
free_k=$(df / | tail -1 | awk '{print $4}')
fill_k=$((free_k - (total_k * 8 / 100)))
if [ "$fill_k" -gt 0 ] && [ "$fill_k" -lt 50000000 ]; then
info "Will create ${fill_k}KB temp file to simulate low space."
if confirm "This will temporarily fill disk to ~92% on /. Safe to proceed?"; then
before=$(snapshot_history)
dd if=/dev/zero of=/tmp/.proxmenux_disk_test bs=1024 count="$fill_k" 2>/dev/null || true
ok "Temp file created. Disk pressure active."
info "Waiting 90s for Health Monitor to detect low space..."
# Wait for health monitor polling cycle
for i in $(seq 1 9); do
echo -ne "\r Waiting... ${i}0/90s"
sleep 10
done
echo ""
# Clean up immediately
rm -f /tmp/.proxmenux_disk_test
ok "Temp file removed. Disk space restored."
check_new_events "$before"
else
warn "Skipped disk pressure test."
fi
else
warn "Cannot safely fill disk (would need ${fill_k}KB). Skipping."
fi
else
warn "Disk already at ${free_pct}% free. Health Monitor may already be alerting."
fi
# ── Test D4: I/O error in syslog ──
log ""
log "${BOLD} Test D4: Generic I/O error injection${NC}"
info "Injects I/O errors into syslog for JournalWatcher."
before=$(snapshot_history)
logger -t kernel -p kern.err "Buffer I/O error on dev sdb1, logical block 0, async page read"
sleep 1
logger -t kernel -p kern.err "EXT4-fs error (device sdb1): ext4_find_entry:1455: inode #2: comm ls: reading directory lblock 0"
wait_webhook 8
check_new_events "$before"
}
# ============================================================================
# TEST CATEGORY: BACKUP EVENTS
# ============================================================================
test_backup() {
header "BACKUP EVENT TESTS"
local backup_storage=""
# Find backup-capable storage
backup_storage=$(pvesh get /storage --output-format json 2>/dev/null | python3 -c "
import sys, json
stores = json.load(sys.stdin)
for s in stores:
content = s.get('content', '')
if 'backup' in content or 'vztmpl' in content:
print(s.get('storage', ''))
break
# Fallback: try 'local'
else:
for s in stores:
if s.get('storage') == 'local':
print('local')
break
" 2>/dev/null || echo "local")
info "Using backup storage: ${backup_storage}"
# ── Test B1: Successful vzdump backup ──
if [ -n "$VMID" ]; then
log ""
log "${BOLD} Test B1: Real vzdump backup (success)${NC}"
info "Running a real vzdump backup of ${VMTYPE} ${VMID} (${VMNAME})."
info "This triggers PVE's notification system with a real backup event."
if confirm "This will backup ${VMTYPE} ${VMID} to '${backup_storage}'. Proceed?"; then
local before
before=$(snapshot_history)
# Use snapshot mode for VMs (non-disruptive), stop mode for CTs
local bmode="snapshot"
if [ "$VMTYPE" = "lxc" ]; then
bmode="suspend"
fi
info "Starting vzdump (mode=${bmode}, compress=zstd)..."
if vzdump "$VMID" --storage "$backup_storage" --mode "$bmode" --compress zstd --notes-template "ProxMenux test backup" 2>&1 | tee -a "$LOG_FILE"; then
ok "vzdump completed successfully!"
else
warn "vzdump returned non-zero (check output above)"
fi
wait_webhook 12
check_new_events "$before"
# Clean up the test backup
info "Cleaning up test backup file..."
local latest_bak
latest_bak=$(find "/var/lib/vz/dump/" -name "vzdump-*-${VMID}-*" -type f -newer /tmp/.proxmenux_bak_marker 2>/dev/null | head -1 || echo "")
# Create a marker for cleanup
touch /tmp/.proxmenux_bak_marker 2>/dev/null || true
else
warn "Skipped backup success test."
fi
# ── Test B2: Failed vzdump backup ──
log ""
log "${BOLD} Test B2: vzdump backup failure (invalid storage)${NC}"
info "Attempting backup to non-existent storage to trigger a backup failure event."
before=$(snapshot_history)
# This WILL fail because the storage doesn't exist
info "Starting vzdump to fake storage (will fail intentionally)..."
vzdump "$VMID" --storage "nonexistent_storage_12345" --mode snapshot 2>&1 | tail -5 | tee -a "$LOG_FILE" || true
warn "vzdump failed as expected (this is intentional)."
wait_webhook 12
check_new_events "$before"
else
warn "No VM/CT available for backup tests."
info "You can create a minimal LXC container for testing:"
info " pct create 9999 local:vztmpl/debian-12-standard_12.2-1_amd64.tar.zst --storage local-lvm --memory 128 --cores 1"
fi
# ── Test B3: Snapshot create/delete ──
if [ -n "$VMID" ] && [ "$VMTYPE" = "qemu" ]; then
log ""
log "${BOLD} Test B3: VM Snapshot create & delete${NC}"
info "Creating a snapshot of VM ${VMID} to test snapshot events."
if confirm "Create snapshot 'proxmenux_test' on VM ${VMID}?"; then
local before
before=$(snapshot_history)
if qm snapshot "$VMID" proxmenux_test --description "ProxMenux test snapshot" 2>&1 | tee -a "$LOG_FILE"; then
ok "Snapshot created!"
else
warn "Snapshot creation returned non-zero"
fi
wait_webhook 10
check_new_events "$before"
# Clean up snapshot
info "Cleaning up test snapshot..."
qm delsnapshot "$VMID" proxmenux_test 2>/dev/null || true
ok "Snapshot removed."
fi
elif [ -n "$VMID" ] && [ "$VMTYPE" = "lxc" ]; then
log ""
log "${BOLD} Test B3: CT Snapshot create & delete${NC}"
info "Creating a snapshot of CT ${VMID}."
if confirm "Create snapshot 'proxmenux_test' on CT ${VMID}?"; then
local before
before=$(snapshot_history)
if pct snapshot "$VMID" proxmenux_test --description "ProxMenux test snapshot" 2>&1 | tee -a "$LOG_FILE"; then
ok "Snapshot created!"
else
warn "Snapshot creation returned non-zero"
fi
wait_webhook 10
check_new_events "$before"
# Clean up
info "Cleaning up test snapshot..."
pct delsnapshot "$VMID" proxmenux_test 2>/dev/null || true
ok "Snapshot removed."
fi
fi
# ── Test B4: PVE scheduled backup notification ──
log ""
log "${BOLD} Test B4: Trigger PVE notification system directly${NC}"
info "Using 'pvesh create /notifications/endpoints/...' to test PVE's own system."
info "This sends a test notification through PVE, which should hit our webhook."
local before
before=$(snapshot_history)
# PVE 8.x has a test endpoint for notifications
if pvesh create /notifications/targets/test --target proxmenux-webhook 2>&1 | tee -a "$LOG_FILE"; then
ok "PVE test notification sent!"
else
# Try alternative method
info "Direct test not available. Trying via API..."
pvesh set /notifications/endpoints/webhook/proxmenux-webhook --test 1 2>/dev/null || \
warn "Could not send PVE test notification (requires PVE 8.1+)"
fi
wait_webhook 8
check_new_events "$before"
}
# ============================================================================
# TEST CATEGORY: VM/CT LIFECYCLE
# ============================================================================
test_vmct() {
header "VM/CT LIFECYCLE TESTS"
if [ -z "$VMID" ]; then
warn "No stopped VM/CT found for lifecycle tests."
info "Create a minimal CT: pct create 9999 local:vztmpl/debian-12-standard_12.2-1_amd64.tar.zst --storage local-lvm --memory 128 --cores 1"
return
fi
log ""
log "${BOLD} Test V1: Start ${VMTYPE} ${VMID} (${VMNAME})${NC}"
if confirm "Start ${VMTYPE} ${VMID}? It will be stopped again after the test."; then
local before
before=$(snapshot_history)
if [ "$VMTYPE" = "lxc" ]; then
pct start "$VMID" 2>&1 | tee -a "$LOG_FILE" || true
else
qm start "$VMID" 2>&1 | tee -a "$LOG_FILE" || true
fi
ok "Start command sent."
wait_webhook 10
check_new_events "$before"
# Wait a moment
sleep 5
# ── Test V2: Stop ──
log ""
log "${BOLD} Test V2: Stop ${VMTYPE} ${VMID}${NC}"
before=$(snapshot_history)
if [ "$VMTYPE" = "lxc" ]; then
pct stop "$VMID" 2>&1 | tee -a "$LOG_FILE" || true
else
qm stop "$VMID" 2>&1 | tee -a "$LOG_FILE" || true
fi
ok "Stop command sent."
wait_webhook 10
check_new_events "$before"
fi
}
# ============================================================================
# TEST CATEGORY: SYSTEM EVENTS (via syslog injection)
# ============================================================================
test_system() {
header "SYSTEM EVENT TESTS (syslog injection)"
# ── Test S1: Authentication failures ──
log ""
log "${BOLD} Test S1: SSH auth failure injection${NC}"
info "Injecting SSH auth failure messages into syslog."
local before
before=$(snapshot_history)
logger -t sshd -p auth.warning "Failed password for root from 192.168.1.200 port 44312 ssh2"
sleep 2
logger -t sshd -p auth.warning "Failed password for invalid user admin from 10.0.0.50 port 55123 ssh2"
sleep 2
logger -t sshd -p auth.warning "Failed password for root from 192.168.1.200 port 44315 ssh2"
wait_webhook 8
check_new_events "$before"
# ── Test S2: Firewall event ──
log ""
log "${BOLD} Test S2: Firewall drop event${NC}"
before=$(snapshot_history)
logger -t kernel -p kern.warning "pve-fw-reject: IN=vmbr0 OUT= MAC=00:11:22:33:44:55 SRC=10.0.0.99 DST=192.168.1.1 PROTO=TCP DPT=22 REJECT"
sleep 2
logger -t pvefw -p daemon.warning "firewall: blocked incoming connection from 10.0.0.99:45678 to 192.168.1.1:8006"
wait_webhook 8
check_new_events "$before"
# ── Test S3: Service failure ──
log ""
log "${BOLD} Test S3: Service failure injection${NC}"
before=$(snapshot_history)
logger -t systemd -p daemon.err "pvedaemon.service: Main process exited, code=exited, status=1/FAILURE"
sleep 1
logger -t systemd -p daemon.err "Failed to start Proxmox VE API Daemon."
wait_webhook 8
check_new_events "$before"
}
# ============================================================================
# SUMMARY & REPORT
# ============================================================================
show_summary() {
header "TEST SUMMARY"
info "Fetching full notification history..."
echo ""
curl -s "${API}/api/notifications/history?limit=200" 2>/dev/null | python3 -c "
import sys, json
from collections import Counter
data = json.load(sys.stdin)
history = data.get('history', [])
if not history:
print(' No notifications in history.')
sys.exit(0)
# Group by event_type
by_type = Counter(h['event_type'] for h in history)
# Group by severity
by_sev = Counter(h.get('severity', '?') for h in history)
# Group by source
by_src = Counter(h.get('source', '?') for h in history)
print(f' Total notifications: {len(history)}')
print()
sev_icons = {'CRITICAL': '\033[0;31mCRITICAL\033[0m', 'WARNING': '\033[1;33mWARNING\033[0m', 'INFO': '\033[0;36mINFO\033[0m'}
print(' By severity:')
for sev, count in by_sev.most_common():
icon = sev_icons.get(sev, sev)
print(f' {icon}: {count}')
print()
print(' By source:')
for src, count in by_src.most_common():
print(f' {src:20s}: {count}')
print()
print(' By event type:')
for etype, count in by_type.most_common():
print(f' {etype:30s}: {count}')
print()
print(' Latest 15 events:')
for h in history[:15]:
sev = h.get('severity', '?')
icon = {'CRITICAL': ' \033[0;31mRED\033[0m', 'WARNING': ' \033[1;33mYEL\033[0m', 'INFO': ' \033[0;36mBLU\033[0m'}.get(sev, ' ???')
ts = h.get('sent_at', '?')[:19]
src = h.get('source', '?')[:12]
print(f' {icon} {ts} {src:12s} {h[\"event_type\"]:25s} {h.get(\"title\", \"\")[:50]}')
" 2>/dev/null | tee -a "$LOG_FILE"
echo ""
info "Full log saved to: ${LOG_FILE}"
echo ""
info "To see all history:"
echo -e " ${CYAN}curl -s '${API}/api/notifications/history?limit=200' | python3 -m json.tool${NC}"
echo ""
info "To check Telegram delivery, look at your Telegram bot chat."
}
# ============================================================================
# INTERACTIVE MENU
# ============================================================================
show_menu() {
echo ""
echo -e "${BOLD} ProxMenux Real Event Test Suite${NC}"
echo ""
echo -e " ${CYAN}1)${NC} Disk error tests (SMART, ZFS, I/O, space pressure)"
echo -e " ${CYAN}2)${NC} Backup tests (vzdump success/fail, snapshots)"
echo -e " ${CYAN}3)${NC} VM/CT lifecycle tests (start/stop real VMs)"
echo -e " ${CYAN}4)${NC} System event tests (auth, firewall, service failures)"
echo -e " ${CYAN}5)${NC} Run ALL tests"
echo -e " ${CYAN}6)${NC} Show summary report"
echo -e " ${CYAN}q)${NC} Exit"
echo ""
echo -ne " Select: "
}
# ── Main ────────────────────────────────────────────────────────
main() {
local mode="${1:-menu}"
echo ""
echo -e "${BOLD}============================================================${NC}"
echo -e "${BOLD} ProxMenux - Real Proxmox Event Simulator${NC}"
echo -e "${BOLD}============================================================${NC}"
echo -e " Tests REAL events through the full PVE -> webhook pipeline."
echo -e " Log file: ${CYAN}${LOG_FILE}${NC}"
echo ""
preflight
case "$mode" in
disk) test_disk; show_summary ;;
backup) test_backup; show_summary ;;
vmct) test_vmct; show_summary ;;
system) test_system; show_summary ;;
all)
test_disk
test_backup
test_vmct
test_system
show_summary
;;
menu|*)
while true; do
show_menu
read -r choice
case "$choice" in
1) test_disk ;;
2) test_backup ;;
3) test_vmct ;;
4) test_system ;;
5) test_disk; test_backup; test_vmct; test_system; show_summary; break ;;
6) show_summary ;;
q|Q) echo " Bye!"; break ;;
*) warn "Invalid option" ;;
esac
done
;;
esac
}
main "${1:-menu}"
+46
View File
@@ -112,6 +112,50 @@ export interface UPS {
[key: string]: any
}
export interface CoralTPU {
type: "pcie" | "usb"
name: string
vendor: string
vendor_id: string
device_id: string
slot?: string // PCIe only, e.g. "0000:0c:00.0"
bus_device?: string // USB only, e.g. "002:007"
form_factor?: string // "M.2 / Mini PCIe (x1)" | "USB Accelerator" | ...
interface_speed?: string // "PCIe 2.5GT/s x1" | "USB 3.0" | ...
kernel_driver?: string | null
usb_driver?: string | null
kernel_modules?: {
gasket: boolean
apex: boolean
}
device_nodes?: string[]
edgetpu_runtime?: string
programmed?: boolean // USB only: runtime has interacted with the device
drivers_ready: boolean
// Thermal data — PCIe/M.2 only (apex driver). Always null for USB Coral.
temperature?: number | null // °C current die temperature
temperature_trips?: number[] | null // trip_point0/1/2_temp, ordered warn→critical
thermal_warnings?: Array<{
name: string // e.g. "hw_temp_warn1"
threshold_c: number | null
enabled: boolean
}> | null
}
export interface UsbDevice {
bus_device: string // "002:007"
vendor_id: string // "18d1"
product_id: string // "9302"
vendor: string
name: string
class_code: string // "ff"
class_label: string // "Vendor Specific", "HID", "Mass Storage", ...
speed_mbps: number
speed_label: string // "USB 3.0" | "USB 2.0" | ...
serial?: string
driver?: string
}
export interface GPU {
slot: string
name: string
@@ -208,6 +252,8 @@ export interface HardwareData {
fans?: Fan[]
power_supplies?: PowerSupply[]
ups?: UPS | UPS[]
coral_tpus?: CoralTPU[]
usb_devices?: UsbDevice[]
}
export const fetcher = async (url: string) => {
+2 -4
View File
@@ -144,13 +144,10 @@ The following dependencies are installed automatically during setup:
| `python3` + `python3-venv` | Translation support *(Translation version only)* |
| `googletrans` | Google Translate library *(Translation version only)* |
<br>
> **🛡️ Security Note / VirusTotal False Positive**
> If you scan the raw installation URL on VirusTotal, you might see a 1/95 detection by heuristic engines like *Chong Lua Dao*. This is a **known false positive**. Because this script uses the standard `curl | bash` installation pattern and downloads legitimate binaries (like `jq` from its official GitHub release), overly aggressive scanners flag the *behavior*. The script is 100% open source and safe to review. You can read more about this in [Issue #162](https://github.com/MacRimi/ProxMenux/issues/162).
---
## ⭐ Support the Project!
If you find **ProxMenux** useful, consider giving it a ⭐ on GitHub to help others discover it!
@@ -163,6 +160,7 @@ Contributions, bug reports and feature suggestions are welcome!
- 💡 [Suggest a feature](https://github.com/MacRimi/ProxMenux/discussions)
- 🔀 [Submit a pull request](https://github.com/MacRimi/ProxMenux/pulls)
If you find ProxMenux useful, consider giving it a ⭐ on GitHub — it helps others discover the project!
---
-720
View File
@@ -1,720 +0,0 @@
# base-packages.txt - Generated on 2025-05-15 21:15:29
# Proxmox Version: pve-manager/8.4.1/ (running kernel: 6.8.12-9-pve)
adduser
apparmor
apt
apt-listchanges
apt-utils
attr
base-files
base-passwd
bash
bash-completion
bc
bind9-dnsutils
bind9-host
bind9-libs
binutils
binutils-common
binutils-x86-64-linux-gnu
bridge-utils
bsdextrautils
bsd-mailx
bsdutils
btrfs-progs
busybox
bzip2
ca-certificates
ceph-common
ceph-fuse
chrony
cifs-utils
console-setup
console-setup-linux
coreutils
corosync
cpio
criu
cron
cron-daemon-common
cstream
curl
dash
dbus
dbus-bin
dbus-daemon
dbus-session-bus-common
dbus-system-bus-common
debconf
debconf-i18n
debian-archive-keyring
debian-faq
debianutils
dialog
diffutils
dirmngr
distro-info-data
dmeventd
dmidecode
dmsetup
doc-debian
dosfstools
dpkg
dtach
e2fsprogs
ebtables
efibootmgr
eject
ethtool
faketime
fdisk
fdutils
file
findutils
fontconfig
fontconfig-config
fonts-dejavu-core
fonts-font-awesome
fonts-font-logos
fonts-glyphicons-halflings
frr
frr-pythontools
fuse
gcc-12-base
gdisk
genisoimage
gettext-base
glusterfs-client
glusterfs-common
gnupg
gnupg-l10n
gnupg-utils
gnutls-bin
gpg
gpg-agent
gpgconf
gpgsm
gpgv
gpg-wks-client
gpg-wks-server
grep
groff-base
grub2-common
grub-common
grub-efi-amd64
grub-efi-amd64-bin
grub-efi-amd64-signed
grub-pc-bin
gzip
hdparm
hostname
ifupdown2
inetutils-telnet
init
initramfs-tools
initramfs-tools-core
init-system-helpers
iproute2
ipset
iptables
iputils-ping
isc-dhcp-client
isc-dhcp-common
iso-codes
jq
kbd
keyboard-configuration
keyutils
klibc-utils
kmod
krb5-locales
ksm-control-daemon
less
libacl1
libaio1
libanyevent-http-perl
libanyevent-perl
libapparmor1
libappconfig-perl
libapt-pkg6.0
libapt-pkg-perl
libarchive13
libargon2-1
libasound2
libasound2-data
libassuan0
libasyncns0
libattr1
libaudit1
libaudit-common
libauthen-pam-perl
libavahi-client3
libavahi-common3
libavahi-common-data
libbabeltrace1
libbinutils
libblas3
libblkid1
libbpf1
libbrotli1
libbsd0
libbytes-random-secure-perl
libbz2-1.0
libc6
libcairo2
libcap2
libcap2-bin
libcap-ng0
libc-ares2
libc-bin
libcbor0.8
libcephfs2
libcfg7
libc-l10n
libclone-perl
libcmap4
libcom-err2
libcommon-sense-perl
libconvert-asn1-perl
libcorosync-common4
libcpg4
libcrypt1
libcrypt-openssl-bignum-perl
libcrypt-openssl-random-perl
libcrypt-openssl-rsa-perl
libcrypt-random-seed-perl
libcryptsetup12
libcrypt-ssleay-perl
libctf0
libctf-nobfd0
libcurl3-gnutls
libcurl4
libdatrie1
libdb5.3
libdbi1
libdbus-1-3
libdebconfclient0
libdevel-cycle-perl
libdevmapper1.02.1
libdevmapper-event1.02.1
libdigest-hmac-perl
libdouble-conversion3
libdrm2
libdrm-common
libdw1
libedit2
libefiboot1
libefivar1
libelf1
libencode-locale-perl
libepoxy0
libevent-2.1-7
libevent-core-2.1-7
libexpat1
libext2fs2
libfaketime
libfdisk1
libfdt1
libffi8
libfido2-1
libfile-chdir-perl
libfile-find-rule-perl
libfile-listing-perl
libfile-readbackwards-perl
libfilesys-df-perl
libflac12
libfmt9
libfontconfig1
libfreetype6
libfribidi0
libfstrm0
libfuse2
libfuse3-3
libgbm1
libgcc-s1
libgcrypt20
libgdbm6
libgdbm-compat4
libgfapi0
libgfchangelog0
libgfrpc0
libgfxdr0
libglib2.0-0
libglusterd0
libglusterfs0
libgmp10
libgnutls30
libgnutls-dane0
libgnutlsxx30
libgoogle-perftools4
libgpg-error0
libgprofng0
libgraphite2-3
libgssapi-krb5-2
libgstreamer1.0-0
libgstreamer-plugins-base1.0-0
libharfbuzz0b
libhogweed6
libhtml-parser-perl
libhtml-tagset-perl
libhtml-tree-perl
libhttp-cookies-perl
libhttp-daemon-perl
libhttp-date-perl
libhttp-message-perl
libhttp-negotiate-perl
libibverbs1
libicu72
libidn2-0
libinih1
libio-html-perl
libio-multiplex-perl
libio-socket-ssl-perl
libio-stringy-perl
libip4tc2
libip6tc2
libipset13
libiscsi7
libisns0
libjansson4
libjemalloc2
libjpeg62-turbo
libjq1
libjs-bootstrap
libjs-extjs
libjs-jquery
libjson-c5
libjson-glib-1.0-0
libjson-glib-1.0-common
libjson-perl
libjson-xs-perl
libjs-qrcodejs
libjs-sencha-touch
libk5crypto3
libkeyutils1
libklibc
libkmod2
libknet1
libkrb5-3
libkrb5support0
libksba8
libldap-2.5-0
libldb2
liblinear4
liblinux-inotify2-perl
liblmdb0
liblocale-gettext-perl
liblockfile1
liblockfile-bin
liblttng-ust1
liblttng-ust-common1
liblttng-ust-ctl5
liblua5.3-0
liblvm2cmd2.03
liblwp-mediatypes-perl
liblwp-protocol-https-perl
liblz4-1
liblzma5
liblzo2-2
libmagic1
libmagic-mgc
libmath-random-isaac-perl
libmaxminddb0
libmd0
libmime-base32-perl
libmnl0
libmount1
libmp3lame0
libmpg123-0
libncurses6
libncursesw6
libnet1
libnetaddr-ip-perl
libnet-dbus-perl
libnet-dns-perl
libnetfilter-conntrack3
libnetfilter-log1
libnet-http-perl
libnet-ip-perl
libnet-ldap-perl
libnet-ssleay-perl
libnet-subnet-perl
libnettle8
libnewt0.52
libnfnetlink0
libnfsidmap1
libnftables1
libnftnl11
libnghttp2-14
libnl-3-200
libnl-route-3-200
libnozzle1
libnpth0
libnsl2
libnspr4
libnss3
libnss-systemd
libnuma1
libnumber-compare-perl
libnvpair3linux
liboath0
libogg0
libonig5
libopeniscsiusr
libopus0
liborc-0.4-0
libp11-kit0
libpam0g
libpam-modules
libpam-modules-bin
libpam-runtime
libpam-systemd
libpango-1.0-0
libpangocairo-1.0-0
libpangoft2-1.0-0
libpcap0.8
libpci3
libpcre2-16-0
libpcre2-8-0
libpcre3
libperl5.36
libpipeline1
libpixman-1-0
libpng16-16
libpopt0
libposix-strptime-perl
libproc2-0
libprotobuf32
libprotobuf-c1
libproxmox-acme-perl
libproxmox-acme-plugins
libproxmox-backup-qemu0
libproxmox-rs-perl
libpsl5
libpulse0
libpve-access-control
libpve-apiclient-perl
libpve-cluster-api-perl
libpve-cluster-perl
libpve-common-perl
libpve-guest-common-perl
libpve-http-server-perl
libpve-network-api-perl
libpve-network-perl
libpve-notify-perl
libpve-rs-perl
libpve-storage-perl
libpve-u2f-server-perl
libpython3.11-minimal
libpython3.11-stdlib
libpython3-stdlib
libqb100
libqrencode4
libqt5core5a
libqt5dbus5
libqt5network5
libquorum5
librabbitmq4
librados2
librados2-perl
libradosstriper1
librbd1
librdkafka1
librdmacm1
libreadline8
libregexp-ipv6-perl
librgw2
librrd8
librrds-perl
librtmp1
libsasl2-2
libsasl2-modules-db
libseccomp2
libselinux1
libsemanage2
libsemanage-common
libsepol2
libslang2
libslirp0
libsmartcols1
libsmbclient
libsnappy1v5
libsndfile1
libsocket6-perl
libspice-server1
libsqlite3-0
libss2
libssh2-1
libssl3
libstatgrab10
libstdc++6
libstring-shellquote-perl
libsubid4
libsystemd0
libsystemd-shared
libtalloc2
libtasn1-6
libtcmalloc-minimal4
libtdb1
libtemplate-perl
libterm-readline-gnu-perl
libtevent0
libtext-charwidth-perl
libtext-glob-perl
libtext-iconv-perl
libtext-wrapi18n-perl
libthai0
libthai-data
libthrift-0.17.0
libtimedate-perl
libtinfo6
libtirpc3
libtirpc-common
libtpms0
libtry-tiny-perl
libtypes-serialiser-perl
libu2f-server0
libuchardet0
libudev1
libunbound8
libunistring2
libunwind8
liburcu8
liburing2
liburi-perl
libusb-1.0-0
libusbredirparser1
libuuid1
libuuid-perl
libuutil3linux
libuv1
libva2
libva-drm2
libvirglrenderer1
libvorbis0a
libvorbisenc2
libvotequorum8
libvulkan1
libwayland-server0
libwbclient0
libwrap0
libwww-perl
libwww-robotrules-perl
libx11-6
libx11-data
libx11-xcb1
libxau6
libxcb1
libxcb-render0
libxcb-shm0
libxdmcp6
libxext6
libxml2
libxml-libxml-perl
libxml-namespacesupport-perl
libxml-parser-perl
libxml-sax-base-perl
libxml-sax-perl
libxml-twig-perl
libxrender1
libxslt1.1
libxtables12
libxxhash0
libyaml-0-2
libyaml-libyaml-perl
libyang3
libzfs4linux
libzpool5linux
libzstd1
linux-base
locales
login
logrotate
logsave
lsof
lua-lpeg
lvm2
lxcfs
lxc-pve
lzop
mailcap
man-db
manpages
mawk
media-types
memtest86+
mime-support
mokutil
mount
nano
ncurses-base
ncurses-bin
ncurses-term
netbase
netcat-traditional
nfs-common
nftables
nmap
nmap-common
novnc-pve
open-iscsi
openssh-client
openssh-server
openssh-sftp-server
openssl
passwd
pci.ids
pciutils
perl
perl-base
perl-modules-5.36
perl-openssl-defaults
pinentry-curses
postfix
procmail
procps
proxmox-archive-keyring
proxmox-backup-client
proxmox-backup-file-restore
proxmox-backup-restore-image
proxmox-default-kernel
proxmox-firewall
proxmox-grub
proxmox-kernel-6.8
proxmox-kernel-6.8.12-10-pve-signed
proxmox-kernel-6.8.12-9-pve-signed
proxmox-kernel-helper
proxmox-mail-forward
proxmox-mini-journalreader
proxmox-offline-mirror-docs
proxmox-offline-mirror-helper
proxmox-termproxy
proxmox-ve
proxmox-websocket-tunnel
proxmox-widget-toolkit
psmisc
pv
pve-cluster
pve-container
pve-docs
pve-edk2-firmware
pve-edk2-firmware-legacy
pve-edk2-firmware-ovmf
pve-esxi-import-tools
pve-firewall
pve-firmware
pve-ha-manager
pve-i18n
pve-lxc-syscalld
pve-manager
pve-qemu-kvm
pve-xtermjs
python3
python3.11
python3.11-minimal
python3.11-venv
python3-apt
python3-ceph-argparse
python3-ceph-common
python3-cephfs
python3-certifi
python3-chardet
python3-charset-normalizer
python3-debconf
python3-debian
python3-debianbts
python3-distutils
python3-httplib2
python3-idna
python3-jwt
python3-lib2to3
python3-minimal
python3-pip-whl
python3-pkg-resources
python3-prettytable
python3-protobuf
python3-pycurl
python3-pyparsing
python3-pysimplesoap
python3-pyvmomi
python3-rados
python3-rbd
python3-reportbug
python3-requests
python3-rgw
python3-setuptools
python3-setuptools-whl
python3-six
python3-systemd
python3-urllib3
python3-venv
python3-wcwidth
python3-yaml
python-apt-common
qemu-server
qrencode
readline-common
reportbug
rpcbind
rrdcached
rsync
runit-helper
samba-common
samba-libs
sed
sensible-utils
sgml-base
shared-mime-info
shim-helpers-amd64-signed
shim-signed
shim-signed-common
shim-unsigned
smartmontools
smbclient
socat
spiceterm
spl
sqlite3
ssh
ssl-cert
strace
swtpm
swtpm-libs
swtpm-tools
systemd
systemd-boot
systemd-boot-efi
systemd-sysv
sysvinit-utils
tar
tasksel
tasksel-data
tcpdump
thin-provisioning-tools
time
traceroute
tzdata
ucf
udev
uidmap
usbutils
usrmerge
util-linux
util-linux-extra
vim-common
vim-tiny
virtiofsd
vncterm
wamerican
wget
whiptail
xfsprogs
xkb-data
xsltproc
xz-utils
zfs-initramfs
zfsutils-linux
zfs-zed
zlib1g
zstd
+1
View File
@@ -0,0 +1 @@
1.1.9.5
Binary file not shown.

After

Width:  |  Height:  |  Size: 334 KiB

+10 -2
View File
@@ -821,14 +821,22 @@ install_normal_version() {
cp "./version.txt" "$LOCAL_VERSION_FILE"
cp "./install_proxmenux.sh" "$BASE_DIR/install_proxmenux.sh"
# Wipe the scripts tree before copying so any file removed upstream
# (renamed, consolidated, deprecated) disappears from the user install.
# Only $BASE_DIR/scripts/ is cleared; config.json, cache.json,
# components_status.json, version.txt, beta_version.txt, monitor.db,
# smart/, oci/ and the AppImage live outside this path and are preserved.
rm -rf "$BASE_DIR/scripts"
mkdir -p "$BASE_DIR/scripts"
cp -r "./scripts/"* "$BASE_DIR/scripts/"
chmod -R +x "$BASE_DIR/scripts/"
# Only .sh files need the executable bit. Applying +x recursively would
# also flag README.md, .json, .py etc. as executable for no reason.
find "$BASE_DIR/scripts" -type f -name '*.sh' -exec chmod +x {} +
chmod +x "$BASE_DIR/install_proxmenux.sh"
msg_ok "Necessary files created."
chmod +x "$INSTALL_DIR/$MENU_SCRIPT"
((current_step++))
show_progress $current_step $total_steps "Installing ProxMenux Monitor"
+670
View File
@@ -0,0 +1,670 @@
#!/bin/bash
# ==========================================================
# ProxMenux Monitor - Beta Program Installer
# ==========================================================
# Author : MacRimi
# Subproject : ProxMenux Monitor Beta
# Copyright : (c) 2024-2025 MacRimi
# License : GPL-3.0 (https://github.com/MacRimi/ProxMenux/blob/main/LICENSE)
# Version : Beta 1.1
# Branch : develop
# Last Updated : 2026-03-26
# ==========================================================
# Description:
# This script installs the BETA version of ProxMenux Monitor
# from the develop branch on GitHub.
#
# Beta testers are expected to:
# - Report bugs and unexpected behavior via GitHub Issues
# - Provide feedback to help improve the final release
#
# Installs:
# • dialog, curl, jq, git (system dependencies)
# • ProxMenux core files (/usr/local/share/proxmenux)
# • ProxMenux Monitor AppImage (Web dashboard on port 8008)
# • Systemd service (auto-start on boot)
#
# Notes:
# - Clones from the 'develop' branch
# - Beta version file: beta_version.txt in the repository
# - Transition to stable: re-run the official installer
# ==========================================================
# ── Configuration ──────────────────────────────────────────
INSTALL_DIR="/usr/local/bin"
BASE_DIR="/usr/local/share/proxmenux"
CONFIG_FILE="$BASE_DIR/config.json"
CACHE_FILE="$BASE_DIR/cache.json"
UTILS_FILE="$BASE_DIR/utils.sh"
LOCAL_VERSION_FILE="$BASE_DIR/version.txt"
BETA_VERSION_FILE="$BASE_DIR/beta_version.txt"
MENU_SCRIPT="menu"
MONITOR_INSTALL_DIR="$BASE_DIR"
MONITOR_SERVICE_FILE="/etc/systemd/system/proxmenux-monitor.service"
MONITOR_PORT=8008
REPO_URL="https://github.com/MacRimi/ProxMenux.git"
REPO_BRANCH="develop"
TEMP_DIR="/tmp/proxmenux-beta-install-$$"
# ── Colors ─────────────────────────────────────────────────
RESET="\033[0m"
BOLD="\033[1m"
WHITE="\033[38;5;15m"
NEON_PURPLE_BLUE="\033[38;5;99m"
DARK_GRAY="\033[38;5;244m"
ORANGE="\033[38;5;208m"
GN="\033[1;92m"
YW="\033[33m"
YWB="\033[1;33m"
RD="\033[01;31m"
BL="\033[36m"
CL="\033[m"
BGN="\e[1;32m"
TAB=" "
BFR="\\r\\033[K"
HOLD="-"
BOR=" | "
CM="${GN}${CL}"
SPINNER_PID=""
# ── Spinner ────────────────────────────────────────────────
spinner() {
local frames=('⠋' '⠙' '⠹' '⠸' '⠼' '⠴' '⠦' '⠧' '⠇' '⠏')
local spin_i=0
printf "\e[?25l"
while true; do
printf "\r ${YW}%s${CL}" "${frames[spin_i]}"
spin_i=$(( (spin_i + 1) % ${#frames[@]} ))
sleep 0.1
done
}
type_text() {
local text="$1"
local delay=0.04
for ((i=0; i<${#text}; i++)); do
echo -n "${text:$i:1}"
sleep $delay
done
echo
}
msg_info() {
local msg="$1"
echo -ne "${TAB}${YW}${HOLD}${msg}"
spinner &
SPINNER_PID=$!
}
msg_ok() {
if [ -n "$SPINNER_PID" ] && ps -p $SPINNER_PID > /dev/null 2>&1; then
kill $SPINNER_PID > /dev/null 2>&1
SPINNER_PID=""
fi
printf "\e[?25h"
echo -e "${BFR}${TAB}${CM}${GN}${1}${CL}"
}
msg_error() {
if [ -n "$SPINNER_PID" ] && ps -p $SPINNER_PID > /dev/null 2>&1; then
kill $SPINNER_PID > /dev/null 2>&1
SPINNER_PID=""
fi
printf "\e[?25h"
echo -e "${BFR}${TAB}${RD}[ERROR] ${1}${CL}"
}
msg_warn() {
if [ -n "$SPINNER_PID" ] && ps -p $SPINNER_PID > /dev/null 2>&1; then
kill $SPINNER_PID > /dev/null 2>&1
SPINNER_PID=""
fi
printf "\e[?25h"
echo -e "${BFR}${TAB}${YWB}${1}${CL}"
}
msg_title() {
echo -e "\n"
echo -e "${TAB}${BOLD}${HOLD}${BOR}${1}${BOR}${HOLD}${CL}"
echo -e "\n"
}
show_progress() {
echo -e "\n${BOLD}${BL}${TAB}Installing ProxMenux Beta: Step ${1} of ${2}${CL}"
echo
echo -e "${TAB}${BOLD}${YW}${HOLD}${3}${CL}"
}
# ── Cleanup ────────────────────────────────────────────────
cleanup() {
if [ -d "$TEMP_DIR" ]; then
rm -rf "$TEMP_DIR"
fi
}
trap cleanup EXIT
# ── Logo ───────────────────────────────────────────────────
show_proxmenux_logo() {
clear
if [[ -z "$SSH_TTY" && -z "$(who am i | awk '{print $NF}' | grep -E '([0-9]{1,3}\.){3}[0-9]{1,3}')" ]]; then
LOGO=$(cat << "EOF"
\e[0m\e[38;2;61;61;61m▆\e[38;2;60;60;60m▄\e[38;2;54;54;54m▂\e[0m \e[38;2;0;0;0m \e[0m \e[38;2;54;54;54m▂\e[38;2;60;60;60m▄\e[38;2;61;61;61m▆\e[0m
\e[38;2;59;59;59;48;2;62;62;62m▏ \e[38;2;61;61;61;48;2;37;37;37m▇\e[0m\e[38;2;60;60;60m▅\e[38;2;56;56;56m▃\e[38;2;37;37;37m▁ \e[38;2;36;36;36m▁\e[38;2;56;56;56m▃\e[38;2;60;60;60m▅\e[38;2;61;61;61;48;2;37;37;37m▇\e[48;2;62;62;62m \e[0m\e[7m\e[38;2;60;60;60m▁\e[0m
\e[38;2;59;59;59;48;2;62;62;62m▏ \e[0m\e[7m\e[38;2;61;61;61m▂\e[0m\e[38;2;62;62;62;48;2;61;61;61m┈\e[48;2;62;62;62m \e[48;2;61;61;61m┈\e[0m\e[38;2;60;60;60m▆\e[38;2;57;57;57m▄\e[38;2;48;48;48m▂\e[0m \e[38;2;47;47;47m▂\e[38;2;57;57;57m▄\e[38;2;60;60;60m▆\e[38;2;62;62;62;48;2;61;61;61m┈\e[48;2;62;62;62m \e[48;2;61;61;61m┈\e[0m\e[7m\e[38;2;60;60;60m▂\e[38;2;57;57;57m▄\e[38;2;47;47;47m▆\e[0m \e[0m
\e[38;2;59;59;59;48;2;62;62;62m▏ \e[0m\e[38;2;32;32;32m▏\e[7m\e[38;2;39;39;39m▇\e[38;2;57;57;57m▅\e[38;2;60;60;60m▃\e[0m\e[38;2;40;40;40;48;2;61;61;61m▁\e[48;2;62;62;62m \e[38;2;54;54;54;48;2;61;61;61m┊\e[48;2;62;62;62m \e[38;2;39;39;39;48;2;61;61;61m▁\e[0m\e[7m\e[38;2;60;60;60m▃\e[38;2;57;57;57m▅\e[38;2;38;38;38m▇\e[0m \e[38;2;193;60;2m▃\e[38;2;217;67;2m▅\e[38;2;225;70;2m▇\e[0m
\e[38;2;59;59;59;48;2;62;62;62m▏ \e[0m\e[38;2;32;32;32m▏\e[0m \e[38;2;203;63;2m▄\e[38;2;147;45;1m▂\e[0m \e[7m\e[38;2;55;55;55m▆\e[38;2;60;60;60m▄\e[38;2;61;61;61m▂\e[38;2;60;60;60m▄\e[38;2;55;55;55m▆\e[0m \e[38;2;144;44;1m▂\e[38;2;202;62;2m▄\e[38;2;219;68;2m▆\e[38;2;231;72;3;48;2;226;70;2m┈\e[48;2;231;72;3m \e[48;2;225;70;2m▉\e[0m
\e[38;2;59;59;59;48;2;62;62;62m▏ \e[0m\e[38;2;32;32;32m▏\e[7m\e[38;2;121;37;1m▉\e[0m\e[38;2;0;0;0;48;2;231;72;3m \e[0m\e[38;2;221;68;2m▇\e[38;2;208;64;2m▅\e[38;2;212;66;2m▂\e[38;2;123;37;0m▁\e[38;2;211;65;2m▂\e[38;2;207;64;2m▅\e[38;2;220;68;2m▇\e[48;2;231;72;3m \e[38;2;231;72;3;48;2;225;70;2m┈\e[0m\e[7m\e[38;2;221;68;2m▂\e[0m\e[38;2;44;13;0;48;2;231;72;3m \e[38;2;231;72;3;48;2;225;70;2m▉\e[0m
\e[38;2;59;59;59;48;2;62;62;62m▏ \e[0m\e[38;2;32;32;32m▏\e[0m \e[7m\e[38;2;190;59;2m▅\e[38;2;216;67;2m▃\e[38;2;225;70;2m▁\e[0m\e[38;2;95;29;0;48;2;231;72;3m \e[38;2;231;72;3;48;2;230;71;2m┈\e[48;2;231;72;3m \e[0m\e[7m\e[38;2;225;70;2m▁\e[38;2;216;67;2m▃\e[38;2;191;59;2m▅\e[0m \e[38;2;0;0;0;48;2;231;72;3m \e[38;2;231;72;3;48;2;225;70;2m▉\e[0m
\e[38;2;59;59;59;48;2;62;62;62m▏ \e[0m\e[38;2;32;32;32m▏ \e[0m \e[7m\e[38;2;172;53;1m▆\e[38;2;213;66;2m▄\e[38;2;219;68;2m▂\e[38;2;213;66;2m▄\e[38;2;174;54;2m▆\e[0m \e[38;2;0;0;0m \e[0m \e[38;2;0;0;0;48;2;231;72;3m \e[38;2;231;72;3;48;2;225;70;2m▉\e[0m
\e[38;2;59;59;59;48;2;62;62;62m▏ \e[0m\e[38;2;32;32;32m▏ \e[0m \e[38;2;0;0;0;48;2;231;72;3m \e[38;2;231;72;3;48;2;225;70;2m▉\e[0m
\e[7m\e[38;2;52;52;52m▆\e[38;2;59;59;59m▄\e[38;2;61;61;61m▂\e[0m\e[38;2;31;31;31m▏ \e[0m \e[7m\e[38;2;228;71;2m▂\e[38;2;221;69;2m▄\e[38;2;196;60;2m▆\e[0m
EOF
)
TEXT=(
""
""
"${BOLD}ProxMenux${RESET}"
""
"${BOLD}${NEON_PURPLE_BLUE}An Interactive Menu for${RESET}"
"${BOLD}${NEON_PURPLE_BLUE}Proxmox VE management${RESET}"
""
"${BOLD}${YW} ★ BETA PROGRAM ★${RESET}"
""
""
)
mapfile -t logo_lines <<< "$LOGO"
for i in {0..9}; do
echo -e "${TAB}${logo_lines[i]} ${WHITE}${RESET} ${TEXT[i]}"
done
echo -e
else
TEXT=(
"" "" "" ""
"${BOLD}ProxMenux${RESET}"
""
"${BOLD}${NEON_PURPLE_BLUE}An Interactive Menu for${RESET}"
"${BOLD}${NEON_PURPLE_BLUE}Proxmox VE management${RESET}"
""
"${BOLD}${YW} ★ BETA PROGRAM ★${RESET}"
"" "" ""
)
LOGO=(
"${DARK_GRAY}░░░░ ░░░░${RESET}"
"${DARK_GRAY}░░░░░░░ ░░░░░░ ${RESET}"
"${DARK_GRAY}░░░░░░░░░░░ ░░░░░░░ ${RESET}"
"${DARK_GRAY}░░░░ ░░░░░░ ░░░░░░ ${ORANGE}░░${RESET}"
"${DARK_GRAY}░░░░ ░░░░░░░ ${ORANGE}░░▒▒▒${RESET}"
"${DARK_GRAY}░░░░ ░░░ ${ORANGE}░▒▒▒▒▒▒▒${RESET}"
"${DARK_GRAY}░░░░ ${ORANGE}▒▒▒░ ░▒▒▒▒▒▒▒▒▒▒${RESET}"
"${DARK_GRAY}░░░░ ${ORANGE}░▒▒▒▒▒ ▒▒▒▒▒░░ ▒▒▒▒${RESET}"
"${DARK_GRAY}░░░░ ${ORANGE}░░▒▒▒▒▒▒▒░░ ▒▒▒▒${RESET}"
"${DARK_GRAY}░░░░ ${ORANGE}░░░ ▒▒▒▒${RESET}"
"${DARK_GRAY}░░░░ ${ORANGE}▒▒▒▒${RESET}"
"${DARK_GRAY}░░░░ ${ORANGE}▒▒▒░${RESET}"
"${DARK_GRAY} ░░ ${ORANGE}░░ ${RESET}"
)
for i in {0..12}; do
echo -e "${TAB}${LOGO[i]}${RESET} ${TEXT[i]}"
done
echo -e
fi
}
# ── Beta welcome message ───────────────────────────────────
show_beta_welcome() {
local width=62
local line
line=$(printf '─%.0s' $(seq 1 $width))
echo -e "${TAB}${BOLD}${YW}${line}${CL}"
echo -e "${TAB}${BOLD}${YW}${CL}${BOLD} Welcome to the ProxMenux Monitor Beta Program ${YW}${CL}"
echo -e "${TAB}${BOLD}${YW}${line}${CL}"
echo
echo -e "${TAB}${WHITE}You are about to install a ${BOLD}pre-release (beta)${RESET}${WHITE} version of${CL}"
echo -e "${TAB}${WHITE}ProxMenux Monitor, built from the ${BOLD}develop${RESET}${WHITE} branch.${CL}"
echo
echo -e "${TAB}${BOLD}${GN}What this means for you:${CL}"
echo -e "${TAB} ${GN}${CL} You'll get the latest features before the official release."
echo -e "${TAB} ${GN}${CL} Some things may not work perfectly — that's expected."
echo -e "${TAB} ${GN}${CL} Your feedback is what makes the final version better."
echo
echo -e "${TAB}${BOLD}${YW}How to report issues:${CL}"
echo -e "${TAB} ${YW}${CL} Open a GitHub Issue at:"
echo -e "${TAB} ${BL}https://github.com/MacRimi/ProxMenux/issues${CL}"
echo -e "${TAB} ${YW}${CL} Describe what happened, what you expected, and any"
echo -e "${TAB} error messages you saw. Logs help a lot:"
echo -e "${TAB} ${DARK_GRAY}journalctl -u proxmenux-monitor -n 50${CL}"
echo
echo -e "${TAB}${BOLD}${NEON_PURPLE_BLUE}Thank you for being part of the beta program!${CL}"
echo -e "${TAB}${DARK_GRAY}Your help is essential to deliver a stable and polished release.${CL}"
echo
echo -e "${TAB}${BOLD}${YW}${line}${CL}"
echo -e "${TAB}${BOLD}${YW}${CL} ${YW}${CL}"
echo -e "${TAB}${BOLD}${YW}${CL} Press ${BOLD}${GN}[Enter]${CL} to continue with the beta installation, ${YW}${CL}"
echo -e "${TAB}${BOLD}${YW}${CL} or ${BOLD}${RD}[Ctrl+C]${CL} to cancel and exit. ${YW}${CL}"
echo -e "${TAB}${BOLD}${YW}${CL} ${YW}${CL}"
echo -e "${TAB}${BOLD}${YW}${line}${CL}"
echo
read -r -p ""
echo
}
# ── Helpers ────────────────────────────────────────────────
get_server_ip() {
local ip
ip=$(ip route get 1.1.1.1 2>/dev/null | grep -oP 'src \K\S+')
[ -z "$ip" ] && ip=$(hostname -I | awk '{print $1}')
[ -z "$ip" ] && ip="localhost"
echo "$ip"
}
update_config() {
local component="$1"
local status="$2"
local timestamp
timestamp=$(date -u +"%Y-%m-%dT%H:%M:%SZ")
mkdir -p "$(dirname "$CONFIG_FILE")"
[ ! -f "$CONFIG_FILE" ] || ! jq empty "$CONFIG_FILE" >/dev/null 2>&1 && echo '{}' > "$CONFIG_FILE"
local tmp_file
tmp_file=$(mktemp)
if jq --arg comp "$component" --arg stat "$status" --arg time "$timestamp" \
'.[$comp] = {status: $stat, timestamp: $time}' "$CONFIG_FILE" > "$tmp_file" 2>/dev/null; then
mv "$tmp_file" "$CONFIG_FILE"
else
echo '{}' > "$CONFIG_FILE"
fi
[ -f "$tmp_file" ] && rm -f "$tmp_file"
}
reset_update_flag() {
# Reset the update_available flag in config.json after successful update
[ ! -f "$CONFIG_FILE" ] && return 0
local tmp_file
tmp_file=$(mktemp)
if jq '.update_available.beta = false | .update_available.beta_version = ""' "$CONFIG_FILE" > "$tmp_file" 2>/dev/null; then
mv "$tmp_file" "$CONFIG_FILE"
fi
[ -f "$tmp_file" ] && rm -f "$tmp_file"
}
cleanup_corrupted_files() {
if [ -f "$CONFIG_FILE" ] && ! jq empty "$CONFIG_FILE" >/dev/null 2>&1; then
rm -f "$CONFIG_FILE"
fi
if [ -f "$CACHE_FILE" ] && ! jq empty "$CACHE_FILE" >/dev/null 2>&1; then
rm -f "$CACHE_FILE"
fi
}
detect_latest_appimage() {
local appimage_dir="$TEMP_DIR/AppImage"
[ ! -d "$appimage_dir" ] && return 1
local latest
latest=$(find "$appimage_dir" -name "ProxMenux-*.AppImage" -type f | sort -V | tail -1)
[ -z "$latest" ] && return 1
echo "$latest"
}
get_appimage_version() {
local filename
filename=$(basename "$1")
echo "$filename" | grep -oP 'ProxMenux-\K[0-9]+\.[0-9]+\.[0-9]+'
}
# ── Monitor install ────────────────────────────────────────
install_proxmenux_monitor() {
local appimage_source
appimage_source=$(detect_latest_appimage)
if [ -z "$appimage_source" ] || [ ! -f "$appimage_source" ]; then
msg_error "ProxMenux Monitor AppImage not found in $TEMP_DIR/AppImage/"
msg_warn "Make sure the AppImage directory exists in the develop branch."
update_config "proxmenux_monitor" "appimage_not_found"
return 1
fi
local appimage_version
appimage_version=$(get_appimage_version "$appimage_source")
systemctl is-active --quiet proxmenux-monitor.service 2>/dev/null && \
systemctl stop proxmenux-monitor.service
local service_exists=false
[ -f "$MONITOR_SERVICE_FILE" ] && service_exists=true
local sha256_file="$TEMP_DIR/AppImage/ProxMenux-Monitor.AppImage.sha256"
if [ -f "$sha256_file" ]; then
msg_info "Verifying AppImage integrity..."
local expected_hash actual_hash
expected_hash=$(grep -Eo '^[a-f0-9]+' "$sha256_file" | tr -d '\n')
actual_hash=$(sha256sum "$appimage_source" | awk '{print $1}')
if [ "$expected_hash" != "$actual_hash" ]; then
msg_error "SHA256 verification failed! The AppImage may be corrupted."
return 1
fi
msg_ok "SHA256 verification passed."
else
msg_warn "SHA256 checksum file not found. Skipping verification."
fi
msg_info "Installing ProxMenux Monitor (beta)..."
mkdir -p "$MONITOR_INSTALL_DIR"
local target_path="$MONITOR_INSTALL_DIR/ProxMenux-Monitor.AppImage"
cp "$appimage_source" "$target_path"
chmod +x "$target_path"
# Copy shutdown-notify.sh script for systemd ExecStop
local shutdown_script_src="$TEMP_DIR/scripts/shutdown-notify.sh"
local shutdown_script_dst="$MONITOR_INSTALL_DIR/scripts/shutdown-notify.sh"
if [ -f "$shutdown_script_src" ]; then
cp "$shutdown_script_src" "$shutdown_script_dst"
chmod +x "$shutdown_script_dst"
msg_ok "Shutdown notification script installed."
else
msg_warn "Shutdown script not found at $shutdown_script_src"
fi
msg_ok "ProxMenux Monitor beta v${appimage_version} installed."
if [ "$service_exists" = false ]; then
return 0
else
# Check if service needs to be updated (missing ExecStop or outdated config)
if ! grep -q "ExecStop=" "$MONITOR_SERVICE_FILE" 2>/dev/null; then
msg_info "Updating service configuration (adding shutdown notification)..."
update_monitor_service
fi
systemctl start proxmenux-monitor.service
sleep 2
if systemctl is-active --quiet proxmenux-monitor.service; then
update_config "proxmenux_monitor" "beta_updated"
return 2
else
msg_warn "Service failed to restart. Check: journalctl -u proxmenux-monitor"
update_config "proxmenux_monitor" "failed"
return 1
fi
fi
}
# Update existing service file with new configuration
update_monitor_service() {
local exec_path="$MONITOR_INSTALL_DIR/ProxMenux-Monitor.AppImage"
cat > "$MONITOR_SERVICE_FILE" << EOF
[Unit]
Description=ProxMenux Monitor - Web Dashboard (Beta)
After=network.target
Before=shutdown.target reboot.target halt.target
Conflicts=shutdown.target reboot.target halt.target
[Service]
Type=simple
User=root
WorkingDirectory=$MONITOR_INSTALL_DIR
ExecStart=$exec_path
ExecStop=/bin/bash $MONITOR_INSTALL_DIR/scripts/shutdown-notify.sh
Restart=on-failure
RestartSec=10
Environment="PORT=$MONITOR_PORT"
TimeoutStopSec=45
KillMode=mixed
KillSignal=SIGTERM
[Install]
WantedBy=multi-user.target
EOF
systemctl daemon-reload
msg_ok "Service configuration updated."
}
create_monitor_service() {
msg_info "Creating ProxMenux Monitor service..."
local exec_path="$MONITOR_INSTALL_DIR/ProxMenux-Monitor.AppImage"
if [ -f "$TEMP_DIR/systemd/proxmenux-monitor.service" ]; then
sed "s|ExecStart=.*|ExecStart=$exec_path|g" \
"$TEMP_DIR/systemd/proxmenux-monitor.service" > "$MONITOR_SERVICE_FILE"
msg_ok "Service file loaded from repository."
else
cat > "$MONITOR_SERVICE_FILE" << EOF
[Unit]
Description=ProxMenux Monitor - Web Dashboard (Beta)
After=network.target
Before=shutdown.target reboot.target halt.target
Conflicts=shutdown.target reboot.target halt.target
[Service]
Type=simple
User=root
WorkingDirectory=$MONITOR_INSTALL_DIR
ExecStart=$exec_path
ExecStop=/bin/bash $MONITOR_INSTALL_DIR/scripts/shutdown-notify.sh
Restart=on-failure
RestartSec=10
Environment="PORT=$MONITOR_PORT"
TimeoutStopSec=45
KillMode=mixed
KillSignal=SIGTERM
[Install]
WantedBy=multi-user.target
EOF
msg_ok "Default service file created."
fi
systemctl daemon-reload
systemctl enable proxmenux-monitor.service > /dev/null 2>&1
systemctl start proxmenux-monitor.service > /dev/null 2>&1
sleep 3
if systemctl is-active --quiet proxmenux-monitor.service; then
msg_ok "ProxMenux Monitor service started successfully."
update_config "proxmenux_monitor" "beta_installed"
return 0
else
msg_warn "ProxMenux Monitor service failed to start."
echo -e "${TAB}${DARK_GRAY}Check logs : journalctl -u proxmenux-monitor -n 20${CL}"
echo -e "${TAB}${DARK_GRAY}Check status: systemctl status proxmenux-monitor${CL}"
update_config "proxmenux_monitor" "failed"
return 1
fi
}
# ── Main install ───────────────────────────────────────────
install_beta() {
local total_steps=4
local current_step=1
# ── Step 1: Dependencies ──────────────────────────────
show_progress $current_step $total_steps "Installing system dependencies"
if ! command -v jq > /dev/null 2>&1; then
apt-get update > /dev/null 2>&1
if apt-get install -y jq > /dev/null 2>&1 && command -v jq > /dev/null 2>&1; then
update_config "jq" "installed"
else
local jq_url="https://github.com/jqlang/jq/releases/download/jq-1.7.1/jq-linux-amd64"
if wget -q -O /usr/local/bin/jq "$jq_url" 2>/dev/null && chmod +x /usr/local/bin/jq \
&& command -v jq > /dev/null 2>&1; then
update_config "jq" "installed_from_github"
else
msg_error "Failed to install jq. Please install it manually and re-run."
update_config "jq" "failed"
return 1
fi
fi
else
update_config "jq" "already_installed"
fi
local BASIC_DEPS=("dialog" "curl" "git")
if [ -z "${APT_UPDATED:-}" ]; then
apt-get update -y > /dev/null 2>&1 || true
APT_UPDATED=1
fi
for pkg in "${BASIC_DEPS[@]}"; do
if ! dpkg -l | grep -qw "$pkg"; then
if apt-get install -y "$pkg" > /dev/null 2>&1; then
update_config "$pkg" "installed"
else
msg_error "Failed to install $pkg. Please install it manually."
update_config "$pkg" "failed"
return 1
fi
else
update_config "$pkg" "already_installed"
fi
done
msg_ok "Dependencies installed: jq, dialog, curl, git."
# ── Step 2: Clone develop branch ─────────────────────
((current_step++))
show_progress $current_step $total_steps "Cloning ProxMenux develop branch"
msg_info "Cloning branch '${REPO_BRANCH}' from repository..."
if ! git clone --depth 1 --branch "$REPO_BRANCH" "$REPO_URL" "$TEMP_DIR" 2>/dev/null; then
msg_error "Failed to clone branch '$REPO_BRANCH' from $REPO_URL"
exit 1
fi
msg_ok "Repository cloned successfully (branch: ${REPO_BRANCH})."
# Read beta version if available
local beta_version="unknown"
if [ -f "$TEMP_DIR/beta_version.txt" ]; then
beta_version=$(cat "$TEMP_DIR/beta_version.txt" | tr -d '[:space:]')
fi
cd "$TEMP_DIR"
# ── Step 3: Files ─────────────────────────────────────
((current_step++))
show_progress $current_step $total_steps "Creating directories and copying files"
mkdir -p "$BASE_DIR" "$INSTALL_DIR"
[ ! -f "$CONFIG_FILE" ] && echo '{}' > "$CONFIG_FILE"
# Preserve user/runtime directories that must never be overwritten
mkdir -p "$BASE_DIR/oci"
cp "./scripts/utils.sh" "$UTILS_FILE"
cp "./menu" "$INSTALL_DIR/$MENU_SCRIPT"
cp "./version.txt" "$LOCAL_VERSION_FILE" 2>/dev/null || true
# Store beta version marker
if [ -f "$TEMP_DIR/beta_version.txt" ]; then
cp "$TEMP_DIR/beta_version.txt" "$BETA_VERSION_FILE"
else
echo "$beta_version" > "$BETA_VERSION_FILE"
fi
cp "./install_proxmenux.sh" "$BASE_DIR/install_proxmenux.sh" 2>/dev/null || true
cp "./install_proxmenux_beta.sh" "$BASE_DIR/install_proxmenux_beta.sh" 2>/dev/null || true
# Wipe the scripts tree before copying so any file removed upstream
# (renamed, consolidated, deprecated) disappears from the user install.
# Only $BASE_DIR/scripts/ is cleared; config.json, cache.json,
# components_status.json, version.txt, beta_version.txt, monitor.db,
# smart/, oci/ and the AppImage live outside this path and are preserved.
rm -rf "$BASE_DIR/scripts"
mkdir -p "$BASE_DIR/scripts"
cp -r "./scripts/"* "$BASE_DIR/scripts/"
# Only .sh files need the executable bit. Applying +x recursively would
# also flag README.md, .json, .py etc. as executable for no reason.
find "$BASE_DIR/scripts" -type f -name '*.sh' -exec chmod +x {} +
if [ -d "./oci" ]; then
mkdir -p "$BASE_DIR/oci"
cp -r "./oci/"* "$BASE_DIR/oci/" 2>/dev/null || true
fi
chmod +x "$INSTALL_DIR/$MENU_SCRIPT"
[ -f "$BASE_DIR/install_proxmenux.sh" ] && chmod +x "$BASE_DIR/install_proxmenux.sh"
[ -f "$BASE_DIR/install_proxmenux_beta.sh" ] && chmod +x "$BASE_DIR/install_proxmenux_beta.sh"
# Store beta flag in config
update_config "beta_program" "active"
update_config "beta_version" "$beta_version"
update_config "install_branch" "$REPO_BRANCH"
msg_ok "Files installed. Beta version: ${beta_version}."
# ── Step 4: Monitor ───────────────────────────────────
((current_step++))
show_progress $current_step $total_steps "Installing ProxMenux Monitor (beta)"
install_proxmenux_monitor
local monitor_status=$?
if [ $monitor_status -eq 0 ]; then
create_monitor_service
elif [ $monitor_status -eq 2 ]; then
msg_ok "ProxMenux Monitor beta updated successfully."
fi
# Reset the update indicator flag after successful installation
reset_update_flag
msg_ok "Beta installation completed."
}
# ── Stable transition notice ───────────────────────────────
check_stable_available() {
# Called if a stable version is detected (future use by update logic)
# When main's version.txt > beta_version.txt, the menu/updater can call this
echo -e "\n${TAB}${BOLD}${GN}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${CL}"
echo -e "${TAB}${BOLD}${GN} A stable release is now available!${CL}"
echo -e "${TAB}${WHITE} To leave the beta program and switch to the stable version,${CL}"
echo -e "${TAB}${WHITE} run the official installer:${CL}"
echo -e ""
echo -e "${TAB} ${YWB}bash -c \"\$(wget -qLO - https://raw.githubusercontent.com/MacRimi/ProxMenux/main/install_proxmenux.sh)\"${CL}"
echo -e ""
echo -e "${TAB}${DARK_GRAY} This will cleanly replace your beta install with the stable release.${CL}"
echo -e "${TAB}${BOLD}${GN}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${CL}\n"
}
# ── Entry point ────────────────────────────────────────────
if [ "$(id -u)" -ne 0 ]; then
echo -e "${RD}[ERROR] This script must be run as root.${CL}"
exit 1
fi
cleanup_corrupted_files
show_proxmenux_logo
show_beta_welcome
msg_title "Installing ProxMenux Beta — branch: develop"
install_beta
# Load utils if available
[ -f "$UTILS_FILE" ] && source "$UTILS_FILE"
msg_title "ProxMenux Beta installed successfully"
if systemctl is-active --quiet proxmenux-monitor.service; then
local_ip=$(get_server_ip)
echo -e "${GN}🌐 ProxMenux Monitor (beta) is running${CL}: ${BL}http://${local_ip}:${MONITOR_PORT}${CL}"
echo
fi
echo -ne "${GN}"
type_text "To run ProxMenux, execute this command in your terminal:"
echo -e "${YWB} menu${CL}"
echo
echo -e "${TAB}${DARK_GRAY}Report issues at: https://github.com/MacRimi/ProxMenux/issues${CL}"
echo
exit 0
+103 -44
View File
@@ -4,98 +4,157 @@
# ProxMenux - A menu-driven script for Proxmox VE management
# ==========================================================
# Author : MacRimi
# Copyright : (c) 2024 MacRimi
# License : (GPL-3.0) (https://github.com/MacRimi/ProxMenux/blob/main/LICENSE)
# Version : 1.1
# Last Updated: 04/07/2025
# Copyright : (c) 2024-2025 MacRimi
# License : GPL-3.0 (https://github.com/MacRimi/ProxMenux/blob/main/LICENSE)
# Version : 1.2
# Last Updated: 18/03/2026
# ==========================================================
# Description:
# This script serves as the main entry point for ProxMenux,
# a menu-driven tool designed for Proxmox VE management.
# - Displays the ProxMenux logo on startup.
# - Loads necessary configurations and language settings.
# - Checks for available updates and installs them if confirmed.
# - Downloads and executes the latest main menu script.
#
# Key Features:
# - Ensures ProxMenux is always up-to-date by fetching the latest version.
# - Uses whiptail for interactive menus and language selection.
# - Loads utility functions and translation support.
# - Maintains a cache system to improve performance.
# - Executes the ProxMenux main menu dynamically from the repository.
#
# This script ensures a streamlined and automated experience
# for managing Proxmox VE using ProxMenux.
# Main entry point for ProxMenux.
# - Loads configuration and utility functions.
# - Detects if running in Beta Program mode (develop branch).
# - Checks for updates from the appropriate branch (main or develop).
# - In beta mode: compares beta_version.txt; notifies when a stable
# release is available and prompts the user to switch.
# - Launches the main menu.
# ==========================================================
# Configuration ============================================
REPO_URL="https://raw.githubusercontent.com/MacRimi/ProxMenux/main"
# ── Configuration ──────────────────────────────────────────
BASE_DIR="/usr/local/share/proxmenux"
LOCAL_SCRIPTS="$BASE_DIR/scripts"
CONFIG_FILE="$BASE_DIR/config.json"
CACHE_FILE="$BASE_DIR/cache.json"
UTILS_FILE="$BASE_DIR/utils.sh"
LOCAL_VERSION_FILE="$BASE_DIR/version.txt"
BETA_VERSION_FILE="$BASE_DIR/beta_version.txt"
VENV_PATH="/opt/googletrans-env"
if [[ -f "$UTILS_FILE" ]]; then
REPO_MAIN="https://raw.githubusercontent.com/MacRimi/ProxMenux/main"
REPO_DEVELOP="https://raw.githubusercontent.com/MacRimi/ProxMenux/develop"
source "$UTILS_FILE"
fi
# ── Load utilities ─────────────────────────────────────────
[[ -f "$UTILS_FILE" ]] && source "$UTILS_FILE"
: "${LOCAL_SCRIPTS:=/usr/local/share/proxmenux/scripts}"
# =========================================================
# ── Detect beta mode ───────────────────────────────────────
# Returns 0 (true) if this install is part of the beta program.
is_beta() {
[[ -f "$CONFIG_FILE" ]] || return 1
local beta_flag
beta_flag=$(jq -r '.beta_program.status // empty' "$CONFIG_FILE" 2>/dev/null)
[[ "$beta_flag" == "active" ]]
}
# ── Check for updates ──────────────────────────────────────
check_updates() {
local VERSION_URL INSTALL_URL INSTALL_SCRIPT
local REMOTE_VERSION LOCAL_VERSION
VERSION_URL="$REPO_URL/version.txt"
INSTALL_URL="$REPO_URL/install_proxmenux.sh"
INSTALL_SCRIPT="$BASE_DIR/install_proxmenux.sh"
if is_beta; then
check_updates_beta
else
check_updates_stable
fi
}
# ── Stable update check (main branch) ─────────────────────
check_updates_stable() {
local VERSION_URL="$REPO_MAIN/version.txt"
local INSTALL_URL="$REPO_MAIN/install_proxmenux.sh"
local INSTALL_SCRIPT="$BASE_DIR/install_proxmenux.sh"
[[ ! -f "$LOCAL_VERSION_FILE" ]] && return 0
local REMOTE_VERSION LOCAL_VERSION
REMOTE_VERSION="$(curl -fsSL "$VERSION_URL" 2>/dev/null | head -n 1)"
[[ -z "$REMOTE_VERSION" ]] && return 0
LOCAL_VERSION="$(head -n 1 "$LOCAL_VERSION_FILE" 2>/dev/null)"
[[ -z "$LOCAL_VERSION" ]] && return 0
[[ "$LOCAL_VERSION" = "$REMOTE_VERSION" ]] && return 0
if whiptail --title "$(translate 'Update Available')" \
--yesno "$(translate 'New version available') ($REMOTE_VERSION)\n\n$(translate 'Do you want to update now?')" \
10 60 --defaultno; then
msg_warn "$(translate 'Starting ProxMenux update...')"
if curl -fsSL "$INSTALL_URL" -o "$INSTALL_SCRIPT"; then
chmod +x "$INSTALL_SCRIPT"
bash "$INSTALL_SCRIPT" --update
return 0
fi
fi
}
# ── Beta update check (develop branch) ────────────────────
check_updates_beta() {
local BETA_VERSION_URL="$REPO_DEVELOP/beta_version.txt"
local STABLE_VERSION_URL="$REPO_MAIN/version.txt"
local INSTALL_BETA_URL="$REPO_DEVELOP/install_proxmenux_beta.sh"
local INSTALL_STABLE_URL="$REPO_MAIN/install_proxmenux.sh"
local INSTALL_SCRIPT="$BASE_DIR/install_proxmenux_beta.sh"
# ── 1. Check if a stable release has superseded the beta ──
# If main's version.txt exists and is newer than local beta_version.txt,
# the beta cycle is over and we invite the user to switch to stable.
local STABLE_VERSION BETA_LOCAL_VERSION
STABLE_VERSION="$(curl -fsSL "$STABLE_VERSION_URL" 2>/dev/null | head -n 1)"
BETA_LOCAL_VERSION="$(head -n 1 "$BETA_VERSION_FILE" 2>/dev/null)"
if [[ -n "$STABLE_VERSION" && -n "$BETA_LOCAL_VERSION" ]]; then
# Simple string comparison is enough if versions follow semver x.y.z
if [[ "$STABLE_VERSION" != "$BETA_LOCAL_VERSION" ]] && \
printf '%s\n' "$BETA_LOCAL_VERSION" "$STABLE_VERSION" | sort -V | tail -1 | grep -qx "$STABLE_VERSION"; then
# Stable is newer — offer migration out of beta
if whiptail --title "🎉 Stable Release Available" \
--yesno "A stable release of ProxMenux is now available!\n\nStable version : $STABLE_VERSION\nYour beta : $BETA_LOCAL_VERSION\n\nThe beta program for this cycle is complete.\nWould you like to switch to the stable release now?\n\n(Choosing 'No' keeps you on the beta for now.)" \
16 68; then
msg_warn "Switching to stable release $STABLE_VERSION ..."
local tmp_installer="/tmp/install_proxmenux_stable_$$.sh"
if curl -fsSL "$INSTALL_STABLE_URL" -o "$tmp_installer"; then
chmod +x "$tmp_installer"
bash "$tmp_installer"
rm -f "$tmp_installer"
else
msg_error "Could not download the stable installer. Try manually:"
echo
echo " bash -c \"\$(wget -qLO - $INSTALL_STABLE_URL)\""
echo
fi
return 0
fi
# User chose to stay on beta — continue normally
return 0
fi
fi
# ── 2. Check for a newer beta build on develop ─────────────
[[ ! -f "$BETA_VERSION_FILE" ]] && return 0
local REMOTE_BETA_VERSION
REMOTE_BETA_VERSION="$(curl -fsSL "$BETA_VERSION_URL" 2>/dev/null | head -n 1)"
[[ -z "$REMOTE_BETA_VERSION" ]] && return 0
[[ "$BETA_LOCAL_VERSION" = "$REMOTE_BETA_VERSION" ]] && return 0
if whiptail --title "Beta Update Available" \
--yesno "A new beta build is available!\n\nInstalled beta : $BETA_LOCAL_VERSION\nNew beta build : $REMOTE_BETA_VERSION\n\nThis is a pre-release build from the develop branch.\nDo you want to update now?" \
13 64 --defaultno; then
msg_warn "Updating to beta build $REMOTE_BETA_VERSION ..."
if curl -fsSL "$INSTALL_BETA_URL" -o "$INSTALL_SCRIPT"; then
chmod +x "$INSTALL_SCRIPT"
bash "$INSTALL_SCRIPT" --update
else
msg_error "Could not download the beta installer from the develop branch."
fi
fi
}
# ── Main ───────────────────────────────────────────────────
main_menu() {
local MAIN_MENU="$LOCAL_SCRIPTS/menus/main_menu.sh"
exec bash "$MAIN_MENU"
}
+254
View File
@@ -0,0 +1,254 @@
{
"version": "1.0.0",
"last_updated": "2025-01-15T10:00:00Z",
"apps": {
"secure-gateway": {
"id": "secure-gateway",
"name": "Secure Gateway",
"short_name": "VPN Gateway",
"subtitle": "Tailscale VPN Gateway",
"version": "1.0.0",
"category": "security",
"subcategory": "remote_access",
"icon": "shield-check",
"icon_type": "shield",
"color": "#0EA5E9",
"summary": "Secure remote access without opening ports",
"description": "Deploy a managed VPN gateway using Tailscale for zero-trust access to your Proxmox infrastructure. Access ProxMenux Monitor, Proxmox UI, VMs, and LXC containers from anywhere without exposing ports to the internet.",
"documentation_url": "https://macrimi.github.io/ProxMenux/docs/secure-gateway",
"code_url": "https://github.com/MacRimi/ProxMenux/tree/main/Scripts/oci",
"features": [
"Zero-trust network access",
"No port forwarding required",
"End-to-end encryption",
"Easy mobile access",
"MagicDNS for easy hostname access",
"Access control via Tailscale admin"
],
"container": {
"type": "lxc",
"template": "alpine",
"install_method": "apk",
"packages": ["tailscale"],
"services": ["tailscale"],
"privileged": false,
"memory": 256,
"cores": 1,
"disk_size": 2,
"requires_ip_forward": true,
"features": ["nesting=1"],
"lxc_config": [
"lxc.cgroup2.devices.allow: c 10:200 rwm",
"lxc.mount.entry: /dev/net/tun dev/net/tun none bind,create=file"
]
},
"volumes": {
"state": {
"container_path": "/var/lib/tailscale",
"persistent": true,
"description": "Tailscale state and keys"
}
},
"environment": [
{
"name": "TS_STATE_DIR",
"value": "/var/lib/tailscale"
},
{
"name": "TS_USERSPACE",
"value": "false"
},
{
"name": "TS_AUTHKEY",
"value": "$auth_key"
},
{
"name": "TS_HOSTNAME",
"value": "$hostname"
},
{
"name": "TS_ROUTES",
"value": "$advertise_routes"
},
{
"name": "TS_EXTRA_ARGS",
"value": "$extra_args"
}
],
"config_schema": {
"auth_key": {
"type": "password",
"label": "Tailscale Auth Key",
"description": "Pre-authentication key from Tailscale admin console. Generate one at the link below.",
"placeholder": "tskey-auth-xxxxx",
"required": true,
"sensitive": true,
"env_var": "TS_AUTHKEY",
"help_url": "https://login.tailscale.com/admin/settings/keys",
"help_text": "Generate Auth Key"
},
"hostname": {
"type": "text",
"label": "Device Hostname",
"description": "Name shown in Tailscale admin console",
"placeholder": "proxmox-gateway",
"default": "proxmox-gateway",
"required": false,
"env_var": "TS_HOSTNAME",
"validation": {
"pattern": "^[a-zA-Z0-9-]+$",
"max_length": 63,
"message": "Only letters, numbers, and hyphens allowed"
}
},
"access_mode": {
"type": "select",
"label": "Access Scope",
"description": "What should be accessible through this gateway",
"default": "host_only",
"required": true,
"options": [
{
"value": "host_only",
"label": "Proxmox Only",
"description": "Access only this Proxmox server (UI and ProxMenux Monitor)"
},
{
"value": "proxmox_network",
"label": "Full Local Network",
"description": "Access all devices on your local network (NAS, printers, VMs, etc.)"
},
{
"value": "custom",
"label": "Custom Subnets",
"description": "Select specific subnets to expose"
}
]
},
"advertise_routes": {
"type": "networks",
"label": "Advertised Networks",
"description": "Select networks to make accessible through the VPN",
"required": false,
"depends_on": {
"field": "access_mode",
"values": ["custom"]
},
"env_var": "TS_ROUTES",
"env_format": "csv"
},
"exit_node": {
"type": "boolean",
"label": "Exit Node",
"description": "Use this gateway as your internet exit point when away from home. All your internet traffic will appear to come from your Proxmox server's IP address.",
"default": false,
"required": false,
"flag": "--advertise-exit-node",
"warning": "Requires approval in Tailscale Admin. When enabled on your device, ALL internet traffic routes through your Proxmox server."
},
"accept_routes": {
"type": "boolean",
"label": "Accept Routes",
"description": "Allow this gateway to access networks advertised by OTHER Tailscale nodes in your tailnet. Useful if you have multiple Tailscale subnet routers.",
"default": false,
"required": false,
"flag": "--accept-routes"
}
},
"healthcheck": {
"command": ["tailscale", "status", "--json"],
"interval_seconds": 30,
"timeout_seconds": 10,
"retries": 3,
"healthy_condition": "BackendState == Running"
},
"requirements": {
"min_memory_mb": 64,
"min_disk_mb": 100,
"proxmox_min_version": "9.1",
"checks": [
{
"type": "proxmox_version",
"min": "9.1",
"message": "OCI containers require Proxmox VE 9.1+"
}
]
},
"security_notes": [
"Requires NET_ADMIN capability for VPN tunneling",
"Uses /dev/net/tun for network virtualization",
"Auth key is stored encrypted at rest",
"No ports are opened on the host firewall",
"All traffic is end-to-end encrypted"
],
"ui": {
"wizard_steps": [
{
"id": "intro",
"title": "Secure Remote Access",
"description": "Set up secure VPN access to your Proxmox server"
},
{
"id": "auth",
"title": "Tailscale Authentication",
"description": "Connect to your Tailscale account",
"fields": ["auth_key", "hostname"]
},
{
"id": "access",
"title": "Access Scope",
"description": "Choose what to make accessible",
"fields": ["access_mode", "advertise_routes"]
},
{
"id": "options",
"title": "Advanced Options",
"description": "Additional configuration",
"fields": ["exit_node", "accept_routes"]
},
{
"id": "deploy",
"title": "Deploy Gateway",
"description": "Review and deploy"
}
],
"show_in_sections": ["security"],
"dashboard_widget": false,
"status_indicators": {
"running": {
"color": "green",
"icon": "check-circle",
"label": "Connected"
},
"stopped": {
"color": "yellow",
"icon": "pause-circle",
"label": "Stopped"
},
"error": {
"color": "red",
"icon": "x-circle",
"label": "Error"
}
}
},
"metadata": {
"author": "ProxMenux",
"license": "MIT",
"upstream": "https://tailscale.com",
"tags": ["vpn", "remote-access", "tailscale", "zero-trust", "security"]
}
}
}
}
-861
View File
@@ -1,861 +0,0 @@
#!/bin/bash
# ==========================================================
# ProxMenux - Complete Post-Installation Script with Registration
# ==========================================================
# Author : MacRimi
# Copyright : (c) 2024 MacRimi
# License : (GPL-3.0) (https://github.com/MacRimi/ProxMenux/blob/main/LICENSE)
# Version : 1.0
# Last Updated: 06/07/2025
# ==========================================================
# Description:
#
# The script performs system optimizations including:
# - Repository configuration and system upgrades
# - Subscription banner removal and UI enhancements
# - Advanced memory management and kernel optimizations
# - Network stack tuning and security hardening
# - Storage optimizations including log2ram for SSD protection
# - System limits increases and entropy generation improvements
# - Journald and logrotate optimizations for better log management
# - Security enhancements including RPC disabling and time synchronization
# - Bash environment customization and system monitoring setup
#
# Key Features:
# - Zero-interaction automation: Runs completely unattended
# - Intelligent hardware detection: Automatically detects SSD/NVMe for log2ram
# - RAM-aware configurations: Adjusts settings based on available system memory
# - Comprehensive error handling: Robust installation with fallback mechanisms
# - Registration system: Tracks installed optimizations for easy management
# - Reboot management: Intelligently handles reboot requirements
# - Translation support: Multi-language compatible through ProxMenux framework
# - Rollback compatibility: All optimizations can be reversed using the uninstall script
#
# This script is based on the post-install script customizable
# ==========================================================
# Configuration
LOCAL_SCRIPTS="/usr/local/share/proxmenux/scripts"
BASE_DIR="/usr/local/share/proxmenux"
UTILS_FILE="$BASE_DIR/utils.sh"
VENV_PATH="/opt/googletrans-env"
TOOLS_JSON="/usr/local/share/proxmenux/installed_tools.json"
if [[ -f "$UTILS_FILE" ]]; then
source "$UTILS_FILE"
fi
load_language
initialize_cache
# Global variables
OS_CODENAME="$(grep "VERSION_CODENAME=" /etc/os-release | cut -d"=" -f 2 | xargs)"
RAM_SIZE_GB=$(( $(vmstat -s | grep -i "total memory" | xargs | cut -d" " -f 1) / 1024 / 1000))
NECESSARY_REBOOT=0
SCRIPT_TITLE="Customizable post-installation optimization script"
# ==========================================================
# Tool registration system
ensure_tools_json() {
[ -f "$TOOLS_JSON" ] || echo "{}" > "$TOOLS_JSON"
}
register_tool() {
local tool="$1"
local state="$2"
ensure_tools_json
jq --arg t "$tool" --argjson v "$state" '.[$t]=$v' "$TOOLS_JSON" > "$TOOLS_JSON.tmp" && mv "$TOOLS_JSON.tmp" "$TOOLS_JSON"
}
# ==========================================================
lvm_repair_check() {
msg_info "$(translate "Checking and repairing old LVM PV headers (if needed)...")"
pvs_output=$(LC_ALL=C pvs -v 2>&1 | grep "old PV header")
if [ -z "$pvs_output" ]; then
msg_ok "$(translate "No PVs with old headers found.")"
register_tool "lvm_repair" true
return
fi
declare -A vg_map
while read -r line; do
pv=$(echo "$line" | grep -o '/dev/[^ ]*')
vg=$(pvs -o vg_name --noheadings "$pv" | awk '{print $1}')
if [ -n "$vg" ]; then
vg_map["$vg"]=1
fi
done <<< "$pvs_output"
for vg in "${!vg_map[@]}"; do
msg_warn "$(translate "Old PV header(s) found in VG $vg. Updating metadata...")"
vgck --updatemetadata "$vg"
vgchange -ay "$vg"
if [ $? -ne 0 ]; then
msg_warn "$(translate "Metadata update failed for VG $vg. Review manually.")"
else
msg_ok "$(translate "Metadata updated successfully for VG $vg")"
fi
done
msg_ok "$(translate "LVM PV headers check completed")"
register_tool "lvm_repair" true
}
# ==========================================================
cleanup_duplicate_repos() {
local sources_file="/etc/apt/sources.list"
local temp_file=$(mktemp)
local cleaned_count=0
declare -A seen_repos
while IFS= read -r line || [[ -n "$line" ]]; do
if [[ "$line" =~ ^[[:space:]]*# ]] || [[ -z "$line" ]]; then
echo "$line" >> "$temp_file"
continue
fi
if [[ "$line" =~ ^deb ]]; then
read -r _ url dist components <<< "$line"
local key="${url}_${dist}"
if [[ -v "seen_repos[$key]" ]]; then
echo "# $line" >> "$temp_file"
cleaned_count=$((cleaned_count + 1))
else
echo "$line" >> "$temp_file"
seen_repos[$key]="$components"
fi
else
echo "$line" >> "$temp_file"
fi
done < "$sources_file"
mv "$temp_file" "$sources_file"
chmod 644 "$sources_file"
local pve_files=(/etc/apt/sources.list.d/*proxmox*.list /etc/apt/sources.list.d/*pve*.list)
local pve_content="deb http://download.proxmox.com/debian/pve ${OS_CODENAME} pve-no-subscription"
local pve_public_repo="/etc/apt/sources.list.d/pve-public-repo.list"
local pve_public_repo_exists=false
if [ -f "$pve_public_repo" ] && grep -q "^deb.*pve-no-subscription" "$pve_public_repo"; then
pve_public_repo_exists=true
fi
for file in "${pve_files[@]}"; do
if [ -f "$file" ] && grep -q "^deb.*pve-no-subscription" "$file"; then
if ! $pve_public_repo_exists && [[ "$file" == "$pve_public_repo" ]]; then
sed -i 's/^# *deb/deb/' "$file"
pve_public_repo_exists=true
elif [[ "$file" != "$pve_public_repo" ]]; then
sed -i 's/^deb/# deb/' "$file"
cleaned_count=$((cleaned_count + 1))
fi
fi
done
apt update
}
apt_upgrade() {
NECESSARY_REBOOT=1
if [ -f /etc/apt/sources.list.d/pve-enterprise.list ] && grep -q "^deb" /etc/apt/sources.list.d/pve-enterprise.list; then
msg_info "$(translate "Disabling enterprise Proxmox repository...")"
sed -i "s/^deb/#deb/g" /etc/apt/sources.list.d/pve-enterprise.list
msg_ok "$(translate "Enterprise Proxmox repository disabled")"
fi
if [ -f /etc/apt/sources.list.d/ceph.list ] && grep -q "^deb" /etc/apt/sources.list.d/ceph.list; then
msg_info "$(translate "Disabling enterprise Proxmox Ceph repository...")"
sed -i "s/^deb/#deb/g" /etc/apt/sources.list.d/ceph.list
msg_ok "$(translate "Enterprise Proxmox Ceph repository disabled")"
fi
if [ ! -f /etc/apt/sources.list.d/pve-public-repo.list ] || ! grep -q "pve-no-subscription" /etc/apt/sources.list.d/pve-public-repo.list; then
msg_info "$(translate "Enabling free public Proxmox repository...")"
echo "deb http://download.proxmox.com/debian/pve ${OS_CODENAME} pve-no-subscription" > /etc/apt/sources.list.d/pve-public-repo.list
msg_ok "$(translate "Free public Proxmox repository enabled")"
fi
sources_file="/etc/apt/sources.list"
need_update=false
sed -i 's|ftp.es.debian.org|deb.debian.org|g' "$sources_file"
if grep -q "^deb http://security.debian.org ${OS_CODENAME}-security main contrib" "$sources_file"; then
sed -i "s|^deb http://security.debian.org ${OS_CODENAME}-security main contrib|deb http://security.debian.org/debian-security ${OS_CODENAME}-security main contrib non-free non-free-firmware|" "$sources_file"
msg_ok "$(translate "Replaced security repository with full version")"
need_update=true
fi
if ! grep -q "deb http://security.debian.org/debian-security ${OS_CODENAME}-security" "$sources_file"; then
echo "deb http://security.debian.org/debian-security ${OS_CODENAME}-security main contrib non-free non-free-firmware" >> "$sources_file"
need_update=true
fi
if ! grep -q "deb http://deb.debian.org/debian ${OS_CODENAME} " "$sources_file"; then
echo "deb http://deb.debian.org/debian ${OS_CODENAME} main contrib non-free non-free-firmware" >> "$sources_file"
need_update=true
fi
if ! grep -q "deb http://deb.debian.org/debian ${OS_CODENAME}-updates" "$sources_file"; then
echo "deb http://deb.debian.org/debian ${OS_CODENAME}-updates main contrib non-free non-free-firmware" >> "$sources_file"
need_update=true
fi
msg_ok "$(translate "Debian repositories configured correctly")"
# ===================================================
if [ ! -f /etc/apt/apt.conf.d/no-bookworm-firmware.conf ]; then
msg_info "$(translate "Disabling non-free firmware warnings...")"
echo 'APT::Get::Update::SourceListWarnings::NonFreeFirmware "false";' > /etc/apt/apt.conf.d/no-bookworm-firmware.conf
msg_ok "$(translate "Non-free firmware warnings disabled")"
fi
msg_info "$(translate "Updating package lists...")"
if apt-get update > /dev/null 2>&1; then
msg_ok "$(translate "Package lists updated")"
else
msg_error "$(translate "Failed to update package lists")"
return 1
fi
msg_info "$(translate "Removing conflicting utilities...")"
if /usr/bin/env DEBIAN_FRONTEND=noninteractive apt-get -y -o Dpkg::Options::='--force-confdef' purge ntp openntpd systemd-timesyncd > /dev/null 2>&1; then
msg_ok "$(translate "Conflicting utilities removed")"
else
msg_error "$(translate "Failed to remove conflicting utilities")"
fi
msg_info "$(translate "Performing packages upgrade...")"
apt-get install pv -y > /dev/null 2>&1
total_packages=$(apt-get -s dist-upgrade | grep "^Inst" | wc -l)
if [ "$total_packages" -eq 0 ]; then
total_packages=1
fi
msg_ok "$(translate "Packages upgrade successful")"
tput civis
tput sc
(
/usr/bin/env DEBIAN_FRONTEND=noninteractive apt-get -y -o Dpkg::Options::='--force-confdef' dist-upgrade 2>&1 | \
while IFS= read -r line; do
if [[ "$line" =~ ^(Setting up|Unpacking|Preparing to unpack|Processing triggers for) ]]; then
package_name=$(echo "$line" | sed -E 's/.*(Setting up|Unpacking|Preparing to unpack|Processing triggers for) ([^ ]+).*/\2/')
[ -z "$package_name" ] && package_name="$(translate "Unknown")"
tput rc
tput ed
row=$(( $(tput lines) - 6 ))
tput cup $row 0; echo "$(translate "Installing packages...")"
tput cup $((row + 1)) 0; echo "──────────────────────────────────────────────"
tput cup $((row + 2)) 0; echo "Package: $package_name"
tput cup $((row + 3)) 0; echo "Progress: [ ] 0%"
tput cup $((row + 4)) 0; echo "──────────────────────────────────────────────"
for i in $(seq 1 10); do
progress=$((i * 10))
tput cup $((row + 3)) 9
printf "[%-50s] %3d%%" "$(printf "#%.0s" $(seq 1 $((progress/2))))" "$progress"
done
fi
done
)
if [ $? -eq 0 ]; then
tput rc
tput ed
msg_ok "$(translate "System upgrade completed")"
fi
msg_info "$(translate "Installing additional Proxmox packages...")"
if /usr/bin/env DEBIAN_FRONTEND=noninteractive apt-get -y -o Dpkg::Options::='--force-confdef' install zfsutils-linux proxmox-backup-restore-image chrony > /dev/null 2>&1; then
msg_ok "$(translate "Additional Proxmox packages installed")"
else
msg_error "$(translate "Failed to install additional Proxmox packages")"
fi
lvm_repair_check
cleanup_duplicate_repos
msg_ok "$(translate "Proxmox update completed")"
}
# ==========================================================
remove_subscription_banner() {
msg_info "$(translate "Removing Proxmox subscription nag banner...")"
local JS_FILE="/usr/share/javascript/proxmox-widget-toolkit/proxmoxlib.js"
local GZ_FILE="/usr/share/javascript/proxmox-widget-toolkit/proxmoxlib.js.gz"
local APT_HOOK="/etc/apt/apt.conf.d/no-nag-script"
if [[ ! -f "$APT_HOOK" ]]; then
cat <<'EOF' > "$APT_HOOK"
DPkg::Post-Invoke { "dpkg -V proxmox-widget-toolkit | grep -q '/proxmoxlib\.js$'; if [ $? -eq 1 ]; then { echo 'Removing subscription nag from UI...'; sed -i '/.*data\.status.*{/{s/\!//;s/active/NoMoreNagging/;s/Active/NoMoreNagging/}' /usr/share/javascript/proxmox-widget-toolkit/proxmoxlib.js; rm -f /usr/share/javascript/proxmox-widget-toolkit/proxmoxlib.js.gz; }; fi"; };
EOF
fi
if [[ -f "$JS_FILE" ]]; then
sed -i '/.*data\.status.*{/{s/\!//;s/active/NoMoreNagging/;s/Active/NoMoreNagging/}' "$JS_FILE"
[[ -f "$GZ_FILE" ]] && rm -f "$GZ_FILE"
touch "$JS_FILE"
fi
apt --reinstall install proxmox-widget-toolkit -y > /dev/null 2>&1
msg_ok "$(translate "Subscription nag banner removed successfully")"
register_tool "subscription_banner" true
}
# ==========================================================
configure_time_sync() {
msg_info "$(translate "Configuring system time settings...")"
this_ip=$(dig +short myip.opendns.com @resolver1.opendns.com)
if [ -z "$this_ip" ]; then
msg_warn "$(translate "Failed to obtain public IP address")"
timezone="UTC"
else
timezone=$(curl -s "https://ipapi.co/${this_ip}/timezone")
if [ -z "$timezone" ]; then
msg_warn "$(translate "Failed to determine timezone from IP address")"
timezone="UTC"
else
msg_ok "$(translate "Found timezone $timezone for IP $this_ip")"
fi
fi
msg_info "$(translate "Enabling automatic time synchronization...")"
if timedatectl set-ntp true; then
msg_ok "$(translate "Time settings configured - Timezone:") $timezone"
register_tool "time_sync" true
else
msg_error "$(translate "Failed to enable automatic time synchronization")"
fi
}
# ==========================================================
skip_apt_languages() {
msg_info "$(translate "Configuring APT to skip downloading additional languages...")"
local default_locale=""
if [ -f /etc/default/locale ]; then
default_locale=$(grep '^LANG=' /etc/default/locale | cut -d= -f2 | tr -d '"')
elif [ -f /etc/environment ]; then
default_locale=$(grep '^LANG=' /etc/environment | cut -d= -f2 | tr -d '"')
fi
default_locale="${default_locale:-en_US.UTF-8}"
local normalized_locale=$(echo "$default_locale" | tr 'A-Z' 'a-z' | sed 's/utf-8/utf8/;s/-/_/')
if ! locale -a | grep -qi "^$normalized_locale$"; then
if ! grep -qE "^${default_locale}[[:space:]]+UTF-8" /etc/locale.gen; then
echo "$default_locale UTF-8" >> /etc/locale.gen
fi
locale-gen "$default_locale" > /dev/null 2>&1
fi
echo 'Acquire::Languages "none";' > /etc/apt/apt.conf.d/99-disable-translations
msg_ok "$(translate "APT configured to skip additional languages")"
register_tool "apt_languages" true
}
# ==========================================================
optimize_journald() {
msg_info "$(translate "Limiting size and optimizing journald...")"
NECESSARY_REBOOT=1
cat <<EOF > /etc/systemd/journald.conf
[Journal]
Storage=persistent
SplitMode=none
RateLimitInterval=0
RateLimitIntervalSec=0
RateLimitBurst=0
ForwardToSyslog=no
ForwardToWall=yes
Seal=no
Compress=yes
SystemMaxUse=64M
RuntimeMaxUse=60M
MaxLevelStore=warning
MaxLevelSyslog=warning
MaxLevelKMsg=warning
MaxLevelConsole=notice
MaxLevelWall=crit
EOF
systemctl restart systemd-journald.service > /dev/null 2>&1
journalctl --vacuum-size=64M --vacuum-time=1d > /dev/null 2>&1
journalctl --rotate > /dev/null 2>&1
msg_ok "$(translate "Journald optimized - Max size: 64M")"
register_tool "journald" true
}
# ==========================================================
optimize_logrotate() {
msg_info "$(translate "Optimizing logrotate configuration...")"
local logrotate_conf="/etc/logrotate.conf"
local backup_conf="${logrotate_conf}.bak"
if ! grep -q "# ProxMenux optimized configuration" "$logrotate_conf"; then
cp "$logrotate_conf" "$backup_conf"
cat <<EOF > "$logrotate_conf"
# ProxMenux optimized configuration
daily
su root adm
rotate 7
create
compress
size=10M
delaycompress
copytruncate
include /etc/logrotate.d
EOF
systemctl restart logrotate > /dev/null 2>&1
fi
msg_ok "$(translate "Logrotate optimization completed")"
register_tool "logrotate" true
}
# ==========================================================
increase_system_limits() {
msg_info "$(translate "Increasing various system limits...")"
NECESSARY_REBOOT=1
cat > /etc/sysctl.d/99-maxwatches.conf << EOF
# ProxMenux configuration
fs.inotify.max_user_watches = 1048576
fs.inotify.max_user_instances = 1048576
fs.inotify.max_queued_events = 1048576
EOF
cat > /etc/security/limits.d/99-limits.conf << EOF
# ProxMenux configuration
* soft nproc 1048576
* hard nproc 1048576
* soft nofile 1048576
* hard nofile 1048576
root soft nproc unlimited
root hard nproc unlimited
root soft nofile unlimited
root hard nofile unlimited
EOF
cat > /etc/sysctl.d/99-maxkeys.conf << EOF
# ProxMenux configuration
kernel.keys.root_maxkeys=1000000
kernel.keys.maxkeys=1000000
EOF
for file in /etc/systemd/system.conf /etc/systemd/user.conf; do
if ! grep -q "^DefaultLimitNOFILE=" "$file"; then
echo "DefaultLimitNOFILE=256000" >> "$file"
fi
done
for file in /etc/pam.d/common-session /etc/pam.d/runuser-l; do
if ! grep -q "^session required pam_limits.so" "$file"; then
echo 'session required pam_limits.so' >> "$file"
fi
done
if ! grep -q "ulimit -n 256000" /root/.profile; then
echo "ulimit -n 256000" >> /root/.profile
fi
cat > /etc/sysctl.d/99-swap.conf << EOF
# ProxMenux configuration
vm.swappiness = 10
vm.vfs_cache_pressure = 100
EOF
cat > /etc/sysctl.d/99-fs.conf << EOF
# ProxMenux configuration
fs.nr_open = 12000000
fs.file-max = 9223372036854775807
fs.aio-max-nr = 1048576
EOF
msg_ok "$(translate "System limits increase completed.")"
register_tool "system_limits" true
}
# ==========================================================
configure_entropy() {
msg_info "$(translate "Configuring entropy generation to prevent slowdowns...")"
/usr/bin/env DEBIAN_FRONTEND=noninteractive apt-get -y -o Dpkg::Options::='--force-confdef' install haveged > /dev/null 2>&1
cat <<EOF > /etc/default/haveged
# -w sets low entropy watermark (in bits)
DAEMON_ARGS="-w 1024"
EOF
systemctl daemon-reload > /dev/null 2>&1
systemctl enable haveged > /dev/null 2>&1
msg_ok "$(translate "Entropy generation configuration completed")"
register_tool "entropy" true
}
# ==========================================================
optimize_memory_settings() {
msg_info "$(translate "Optimizing memory settings...")"
NECESSARY_REBOOT=1
cat <<EOF > /etc/sysctl.d/99-memory.conf
# Balanced Memory Optimization
vm.swappiness = 10
vm.dirty_ratio = 15
vm.dirty_background_ratio = 5
vm.overcommit_memory = 1
vm.max_map_count = 65530
EOF
if [ -f /proc/sys/vm/compaction_proactiveness ]; then
echo "vm.compaction_proactiveness = 20" >> /etc/sysctl.d/99-memory.conf
fi
msg_ok "$(translate "Memory optimization completed.")"
register_tool "memory_settings" true
}
# ==========================================================
configure_kernel_panic() {
msg_info "$(translate "Configuring kernel panic behavior")"
NECESSARY_REBOOT=1
cat <<EOF > /etc/sysctl.d/99-kernelpanic.conf
# Enable restart on kernel panic, kernel oops and hardlockup
kernel.core_pattern = /var/crash/core.%t.%p
kernel.panic = 10
kernel.panic_on_oops = 1
kernel.hardlockup_panic = 1
EOF
msg_ok "$(translate "Kernel panic behavior configuration completed")"
register_tool "kernel_panic" true
}
# ==========================================================
force_apt_ipv4() {
msg_info "$(translate "Configuring APT to use IPv4...")"
echo 'Acquire::ForceIPv4 "true";' > /etc/apt/apt.conf.d/99-force-ipv4
msg_ok "$(translate "APT IPv4 configuration completed")"
register_tool "apt_ipv4" true
}
# ==========================================================
apply_network_optimizations() {
msg_info "$(translate "Optimizing network settings...")"
NECESSARY_REBOOT=1
cat <<EOF > /etc/sysctl.d/99-network.conf
net.core.netdev_max_backlog=8192
net.core.optmem_max=8192
net.core.rmem_max=16777216
net.core.somaxconn=8151
net.core.wmem_max=16777216
net.ipv4.conf.all.accept_redirects = 0
net.ipv4.conf.all.accept_source_route = 0
net.ipv4.conf.all.log_martians = 0
net.ipv4.conf.all.rp_filter = 1
net.ipv4.conf.all.secure_redirects = 0
net.ipv4.conf.all.send_redirects = 0
net.ipv4.conf.default.accept_redirects = 0
net.ipv4.conf.default.accept_source_route = 0
net.ipv4.conf.default.log_martians = 0
net.ipv4.conf.default.rp_filter = 1
net.ipv4.conf.default.secure_redirects = 0
net.ipv4.conf.default.send_redirects = 0
net.ipv4.icmp_echo_ignore_broadcasts = 1
net.ipv4.icmp_ignore_bogus_error_responses = 1
net.ipv4.ip_local_port_range=1024 65535
net.ipv4.tcp_base_mss = 1024
net.ipv4.tcp_challenge_ack_limit = 999999999
net.ipv4.tcp_fin_timeout=10
net.ipv4.tcp_keepalive_intvl=30
net.ipv4.tcp_keepalive_probes=3
net.ipv4.tcp_keepalive_time=240
net.ipv4.tcp_limit_output_bytes=65536
net.ipv4.tcp_max_syn_backlog=8192
net.ipv4.tcp_max_tw_buckets = 1440000
net.ipv4.tcp_mtu_probing = 1
net.ipv4.tcp_rfc1337=1
net.ipv4.tcp_rmem=8192 87380 16777216
net.ipv4.tcp_sack=1
net.ipv4.tcp_slow_start_after_idle=0
net.ipv4.tcp_syn_retries=3
net.ipv4.tcp_synack_retries = 2
net.ipv4.tcp_tw_recycle = 0
net.ipv4.tcp_tw_reuse = 0
net.ipv4.tcp_wmem=8192 65536 16777216
net.netfilter.nf_conntrack_generic_timeout = 60
net.netfilter.nf_conntrack_helper=0
net.netfilter.nf_conntrack_max = 524288
net.netfilter.nf_conntrack_tcp_timeout_established = 28800
net.unix.max_dgram_qlen = 4096
EOF
sysctl --system > /dev/null 2>&1
local interfaces_file="/etc/network/interfaces"
if ! grep -q 'source /etc/network/interfaces.d/*' "$interfaces_file"; then
echo "source /etc/network/interfaces.d/*" >> "$interfaces_file"
fi
msg_ok "$(translate "Network optimization completed")"
register_tool "network_optimization" true
}
# ==========================================================
disable_rpc() {
msg_info "$(translate "Disabling portmapper/rpcbind for security...")"
systemctl disable rpcbind > /dev/null 2>&1
systemctl stop rpcbind > /dev/null 2>&1
msg_ok "$(translate "portmapper/rpcbind has been disabled and removed")"
register_tool "disable_rpc" true
}
# ==========================================================
customize_bashrc() {
msg_info "$(translate "Customizing bashrc for root user...")"
local bashrc="/root/.bashrc"
local bash_profile="/root/.bash_profile"
if [ ! -f "${bashrc}.bak" ]; then
cp "$bashrc" "${bashrc}.bak"
fi
cat >> "$bashrc" << 'EOF'
# ProxMenux customizations
export HISTTIMEFORMAT="%d/%m/%y %T "
export PS1="\[\e[31m\][\[\e[m\]\[\e[38;5;172m\]\u\[\e[m\]@\[\e[38;5;153m\]\h\[\e[m\] \[\e[38;5;214m\]\W\[\e[m\]\[\e[31m\]]\[\e[m\]\\$ "
alias l='ls -CF'
alias la='ls -A'
alias ll='ls -alF'
alias ls='ls --color=auto'
alias grep='grep --color=auto'
alias fgrep='fgrep --color=auto'
alias egrep='egrep --color=auto'
source /etc/profile.d/bash_completion.sh
EOF
if ! grep -q "source /root/.bashrc" "$bash_profile"; then
echo "source /root/.bashrc" >> "$bash_profile"
fi
msg_ok "$(translate "Bashrc customization completed")"
register_tool "bashrc_custom" true
}
# ==========================================================
install_log2ram_auto() {
msg_info "$(translate "Checking if system disk is SSD or M.2...")"
ROOT_PART=$(lsblk -no NAME,MOUNTPOINT | grep ' /$' | awk '{print $1}')
SYSTEM_DISK=$(lsblk -no PKNAME /dev/$ROOT_PART 2>/dev/null)
SYSTEM_DISK=${SYSTEM_DISK:-sda}
if [[ "$SYSTEM_DISK" == nvme* || "$(cat /sys/block/$SYSTEM_DISK/queue/rotational 2>/dev/null)" == "0" ]]; then
msg_ok "$(translate "System disk ($SYSTEM_DISK) is SSD or M.2. Proceeding with log2ram setup.")"
else
msg_warn "$(translate "System disk ($SYSTEM_DISK) is not SSD/M.2. Skipping log2ram installation.")"
return 0
fi
# Clean up previous state
rm -rf /tmp/log2ram
rm -f /etc/systemd/system/log2ram*
rm -f /etc/systemd/system/log2ram-daily.*
rm -f /etc/cron.d/log2ram*
rm -f /usr/sbin/log2ram
rm -f /etc/log2ram.conf
rm -f /usr/local/bin/log2ram-check.sh
rm -rf /var/log.hdd
systemctl daemon-reexec >/dev/null 2>&1
systemctl daemon-reload >/dev/null 2>&1
msg_info "$(translate "Installing log2ram from GitHub...")"
git clone https://github.com/azlux/log2ram.git /tmp/log2ram >/dev/null 2>>/tmp/log2ram_install.log
cd /tmp/log2ram || return 1
bash install.sh >>/tmp/log2ram_install.log 2>&1
if [[ -f /etc/log2ram.conf ]] && systemctl list-units --all | grep -q log2ram; then
msg_ok "$(translate "log2ram installed successfully")"
else
msg_error "$(translate "Failed to install log2ram. See /tmp/log2ram_install.log")"
return 1
fi
# Detect RAM (in MB first for better accuracy)
RAM_SIZE_MB=$(free -m | awk '/^Mem:/{print $2}')
RAM_SIZE_GB=$((RAM_SIZE_MB / 1024))
[[ -z "$RAM_SIZE_GB" || "$RAM_SIZE_GB" -eq 0 ]] && RAM_SIZE_GB=4
if (( RAM_SIZE_GB <= 8 )); then
LOG2RAM_SIZE="128M"
CRON_HOURS=1
elif (( RAM_SIZE_GB <= 16 )); then
LOG2RAM_SIZE="256M"
CRON_HOURS=3
else
LOG2RAM_SIZE="512M"
CRON_HOURS=6
fi
msg_ok "$(translate "Detected RAM:") $RAM_SIZE_GB GB — $(translate "log2ram size set to:") $LOG2RAM_SIZE"
sed -i "s/^SIZE=.*/SIZE=$LOG2RAM_SIZE/" /etc/log2ram.conf
rm -f /etc/cron.hourly/log2ram
echo "0 */$CRON_HOURS * * * root /usr/sbin/log2ram write" > /etc/cron.d/log2ram
msg_ok "$(translate "log2ram write scheduled every") $CRON_HOURS $(translate "hour(s)")"
cat << 'EOF' > /usr/local/bin/log2ram-check.sh
#!/bin/bash
CONF_FILE="/etc/log2ram.conf"
SIZE_VALUE=$(grep '^SIZE=' "$CONF_FILE" | cut -d'=' -f2)
# Convert to KB: handle M (megabytes) and G (gigabytes)
if [[ "$SIZE_VALUE" == *"G" ]]; then
LIMIT_KB=$(($(echo "$SIZE_VALUE" | tr -d 'G') * 1024 * 1024))
else
LIMIT_KB=$(($(echo "$SIZE_VALUE" | tr -d 'M') * 1024))
fi
USED_KB=$(df /var/log --output=used | tail -1)
THRESHOLD=$(( LIMIT_KB * 90 / 100 ))
if (( USED_KB > THRESHOLD )); then
/usr/sbin/log2ram write
fi
EOF
chmod +x /usr/local/bin/log2ram-check.sh
echo "*/5 * * * * root /usr/local/bin/log2ram-check.sh" > /etc/cron.d/log2ram-auto-sync
msg_ok "$(translate "Auto-sync enabled when /var/log exceeds 90% of") $LOG2RAM_SIZE"
register_tool "log2ram" true
}
# ==========================================================
run_complete_optimization() {
clear
show_proxmenux_logo
msg_title "$(translate "ProxMenux Optimization Post-Installation")"
ensure_tools_json
apt_upgrade
remove_subscription_banner
configure_time_sync
skip_apt_languages
optimize_journald
optimize_logrotate
increase_system_limits
configure_entropy
optimize_memory_settings
configure_kernel_panic
force_apt_ipv4
apply_network_optimizations
disable_rpc
customize_bashrc
install_log2ram_auto
echo -e
msg_success "$(translate "Complete post-installation optimization finished!")"
if [[ "$NECESSARY_REBOOT" -eq 1 ]]; then
whiptail --title "Reboot Required" \
--yesno "$(translate "Some changes require a reboot to take effect. Do you want to restart now?")" 10 60
if [[ $? -eq 0 ]]; then
msg_info "$(translate "Removing no longer required packages and purging old cached updates...")"
apt-get -y autoremove >/dev/null 2>&1
apt-get -y autoclean >/dev/null 2>&1
msg_ok "$(translate "Cleanup finished")"
msg_success "$(translate "Press Enter to continue...")"
read -r
msg_warn "$(translate "Rebooting the system...")"
reboot
else
msg_info "$(translate "Removing no longer required packages and purging old cached updates...")"
apt-get -y autoremove >/dev/null 2>&1
apt-get -y autoclean >/dev/null 2>&1
msg_ok "$(translate "Cleanup finished")"
msg_info2 "$(translate "You can reboot later manually.")"
msg_success "$(translate "Press Enter to continue...")"
read -r
exit 0
fi
fi
msg_success "$(translate "All changes applied. No reboot required.")"
msg_success "$(translate "Press Enter to return to menu...")"
read -r
clear
}
if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then
run_complete_optimization
fi
@@ -0,0 +1,166 @@
#!/bin/bash
# ==========================================================
# ProxMenux - Apply Pending Restore On Boot
# ==========================================================
PENDING_BASE="${PMX_RESTORE_PENDING_BASE:-/var/lib/proxmenux/restore-pending}"
CURRENT_LINK="${PENDING_BASE}/current"
LOG_DIR="${PMX_RESTORE_LOG_DIR:-/var/log/proxmenux}"
DEST_PREFIX="${PMX_RESTORE_DEST_PREFIX:-/}"
PRE_BACKUP_BASE="${PMX_RESTORE_PRE_BACKUP_BASE:-/root/proxmenux-pre-restore}"
RECOVERY_BASE="${PMX_RESTORE_RECOVERY_BASE:-/root/proxmenux-recovery}"
mkdir -p "$LOG_DIR" "$PENDING_BASE/completed" >/dev/null 2>&1 || true
LOG_FILE="${LOG_DIR}/proxmenux-restore-onboot-$(date +%Y%m%d_%H%M%S).log"
exec >>"$LOG_FILE" 2>&1
echo "=== ProxMenux pending restore started at $(date -Iseconds) ==="
if [[ ! -e "$CURRENT_LINK" ]]; then
echo "No pending restore link found. Nothing to do."
exit 0
fi
PENDING_DIR="$(readlink -f "$CURRENT_LINK" 2>/dev/null || echo "$CURRENT_LINK")"
if [[ ! -d "$PENDING_DIR" ]]; then
echo "Pending restore directory not found: $PENDING_DIR"
rm -f "$CURRENT_LINK" >/dev/null 2>&1 || true
exit 0
fi
APPLY_LIST="${PENDING_DIR}/apply-on-boot.list"
PLAN_ENV="${PENDING_DIR}/plan.env"
STATE_FILE="${PENDING_DIR}/state"
if [[ -f "$PLAN_ENV" ]]; then
# shellcheck source=/dev/null
source "$PLAN_ENV"
fi
: "${HB_RESTORE_INCLUDE_ZFS:=0}"
if [[ ! -f "$APPLY_LIST" ]]; then
echo "Apply list missing: $APPLY_LIST"
echo "failed" >"$STATE_FILE"
exit 1
fi
echo "Pending dir: $PENDING_DIR"
echo "Apply list: $APPLY_LIST"
echo "Include ZFS: $HB_RESTORE_INCLUDE_ZFS"
echo "running" >"$STATE_FILE"
backup_root="${PRE_BACKUP_BASE}/$(date +%Y%m%d_%H%M%S)-onboot"
mkdir -p "$backup_root" >/dev/null 2>&1 || true
cluster_recovery_root=""
applied=0
skipped=0
failed=0
while IFS= read -r rel; do
[[ -z "$rel" ]] && continue
src="${PENDING_DIR}/rootfs/${rel}"
dst="${DEST_PREFIX%/}/${rel}"
if [[ ! -e "$src" ]]; then
((skipped++))
continue
fi
# Never restore cluster virtual filesystem data live.
if [[ "$rel" == etc/pve* ]] || [[ "$rel" == var/lib/pve-cluster* ]]; then
if [[ -z "$cluster_recovery_root" ]]; then
cluster_recovery_root="${RECOVERY_BASE}/$(date +%Y%m%d_%H%M%S)-onboot"
mkdir -p "$cluster_recovery_root" >/dev/null 2>&1 || true
fi
mkdir -p "$cluster_recovery_root/$(dirname "$rel")" >/dev/null 2>&1 || true
cp -a "$src" "$cluster_recovery_root/$rel" >/dev/null 2>&1 || true
((skipped++))
continue
fi
# /etc/zfs is opt-in.
if [[ "$rel" == etc/zfs || "$rel" == etc/zfs/* ]]; then
if [[ "$HB_RESTORE_INCLUDE_ZFS" != "1" ]]; then
((skipped++))
continue
fi
fi
if [[ -e "$dst" ]]; then
mkdir -p "$backup_root/$(dirname "$rel")" >/dev/null 2>&1 || true
cp -a "$dst" "$backup_root/$rel" >/dev/null 2>&1 || true
fi
if [[ -d "$src" ]]; then
mkdir -p "$dst" >/dev/null 2>&1 || true
if rsync -aAXH --delete "$src/" "$dst/" >/dev/null 2>&1; then
((applied++))
else
((failed++))
fi
else
mkdir -p "$(dirname "$dst")" >/dev/null 2>&1 || true
if cp -a "$src" "$dst" >/dev/null 2>&1; then
((applied++))
else
((failed++))
fi
fi
done <"$APPLY_LIST"
systemctl daemon-reload >/dev/null 2>&1 || true
command -v update-initramfs >/dev/null 2>&1 && update-initramfs -u -k all >/dev/null 2>&1 || true
command -v update-grub >/dev/null 2>&1 && update-grub >/dev/null 2>&1 || true
echo "Applied: $applied"
echo "Skipped: $skipped"
echo "Failed: $failed"
echo "Backup before restore: $backup_root"
if [[ -n "$cluster_recovery_root" ]]; then
helper="${cluster_recovery_root}/apply-cluster-restore.sh"
cat > "$helper" <<EOF
#!/bin/bash
set -euo pipefail
RECOVERY_ROOT="${cluster_recovery_root}"
echo "Cluster recovery helper"
echo "Source: \$RECOVERY_ROOT"
echo
echo "WARNING: run this only in a maintenance window."
echo
read -r -p "Type YES to continue: " ans
[[ "\$ans" == "YES" ]] || { echo "Aborted."; exit 1; }
systemctl stop pve-cluster || true
[[ -d "\$RECOVERY_ROOT/etc/pve" ]] && mkdir -p /etc/pve && cp -a "\$RECOVERY_ROOT/etc/pve/." /etc/pve/ || true
[[ -d "\$RECOVERY_ROOT/var/lib/pve-cluster" ]] && mkdir -p /var/lib/pve-cluster && cp -a "\$RECOVERY_ROOT/var/lib/pve-cluster/." /var/lib/pve-cluster/ || true
systemctl start pve-cluster || true
echo "Cluster recovery finished."
EOF
chmod +x "$helper" >/dev/null 2>&1 || true
echo "Cluster paths extracted to: $cluster_recovery_root"
echo "Cluster recovery helper: $helper"
fi
if [[ "$failed" -eq 0 ]]; then
echo "completed" >"$STATE_FILE"
else
echo "completed_with_errors" >"$STATE_FILE"
fi
restore_id="$(basename "$PENDING_DIR")"
mv "$PENDING_DIR" "${PENDING_BASE}/completed/${restore_id}" >/dev/null 2>&1 || true
rm -f "$CURRENT_LINK" >/dev/null 2>&1 || true
systemctl disable proxmenux-restore-onboot.service >/dev/null 2>&1 || true
echo "=== ProxMenux pending restore finished at $(date -Iseconds) ==="
echo "Log file: $LOG_FILE"
exit 0
File diff suppressed because it is too large Load Diff
+387
View File
@@ -0,0 +1,387 @@
#!/bin/bash
# ==========================================================
# ProxMenux - Scheduled Backup Jobs
# ==========================================================
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
LOCAL_SCRIPTS_LOCAL="$(cd "$SCRIPT_DIR/.." && pwd)"
LOCAL_SCRIPTS_DEFAULT="/usr/local/share/proxmenux/scripts"
LOCAL_SCRIPTS="$LOCAL_SCRIPTS_DEFAULT"
BASE_DIR="/usr/local/share/proxmenux"
UTILS_FILE="$LOCAL_SCRIPTS/utils.sh"
if [[ -f "$LOCAL_SCRIPTS_LOCAL/utils.sh" ]]; then
LOCAL_SCRIPTS="$LOCAL_SCRIPTS_LOCAL"
UTILS_FILE="$LOCAL_SCRIPTS/utils.sh"
elif [[ ! -f "$UTILS_FILE" ]]; then
UTILS_FILE="$BASE_DIR/utils.sh"
fi
if [[ -f "$UTILS_FILE" ]]; then
# shellcheck source=/dev/null
source "$UTILS_FILE"
else
echo "ERROR: utils.sh not found." >&2
exit 1
fi
LIB_FILE="$SCRIPT_DIR/lib_host_backup_common.sh"
[[ ! -f "$LIB_FILE" ]] && LIB_FILE="$LOCAL_SCRIPTS_DEFAULT/backup_restore/lib_host_backup_common.sh"
if [[ -f "$LIB_FILE" ]]; then
# shellcheck source=/dev/null
source "$LIB_FILE"
else
msg_error "$(translate "Cannot load backup library: lib_host_backup_common.sh")"
exit 1
fi
load_language
initialize_cache
JOBS_DIR="/var/lib/proxmenux/backup-jobs"
LOG_DIR="/var/log/proxmenux/backup-jobs"
mkdir -p "$JOBS_DIR" "$LOG_DIR" >/dev/null 2>&1 || true
_job_file() { echo "${JOBS_DIR}/$1.env"; }
_job_paths_file() { echo "${JOBS_DIR}/$1.paths"; }
_service_file() { echo "/etc/systemd/system/proxmenux-backup-$1.service"; }
_timer_file() { echo "/etc/systemd/system/proxmenux-backup-$1.timer"; }
_normalize_uint() {
local v="${1:-0}"
[[ "$v" =~ ^[0-9]+$ ]] || v=0
echo "$v"
}
_write_job_env() {
local file="$1"
shift
{
echo "# ProxMenux scheduled backup job"
local kv key val
for kv in "$@"; do
key="${kv%%=*}"
val="${kv#*=}"
printf '%s=%q\n' "$key" "$val"
done
} > "$file"
}
_list_jobs() {
local f
for f in "$JOBS_DIR"/*.env; do
[[ -f "$f" ]] || continue
basename "$f" .env
done | sort
}
_show_job_status() {
local id="$1"
local timer_state="disabled"
local service_state="unknown"
systemctl is-enabled --quiet "proxmenux-backup-${id}.timer" >/dev/null 2>&1 && timer_state="enabled"
service_state=$(systemctl is-active "proxmenux-backup-${id}.service" 2>/dev/null || echo "inactive")
echo "${timer_state}/${service_state}"
}
_write_job_units() {
local id="$1"
local on_calendar="$2"
local runner="$LOCAL_SCRIPTS/backup_restore/run_scheduled_backup.sh"
[[ ! -f "$runner" ]] && runner="$SCRIPT_DIR/run_scheduled_backup.sh"
cat > "$(_service_file "$id")" <<EOF
[Unit]
Description=ProxMenux Scheduled Backup Job (${id})
After=network-online.target
Wants=network-online.target
[Service]
Type=oneshot
ExecStart=${runner} ${id}
Nice=10
IOSchedulingClass=best-effort
IOSchedulingPriority=7
EOF
cat > "$(_timer_file "$id")" <<EOF
[Unit]
Description=ProxMenux Scheduled Backup Timer (${id})
[Timer]
OnCalendar=${on_calendar}
Persistent=true
RandomizedDelaySec=120
Unit=proxmenux-backup-${id}.service
[Install]
WantedBy=timers.target
EOF
systemctl daemon-reload >/dev/null 2>&1 || true
}
_prompt_retention() {
local __out_var="$1"
local last hourly daily weekly monthly yearly
last=$(dialog --backtitle "ProxMenux" --title "$(translate "Retention")" \
--inputbox "$(translate "keep-last (0 disables)")" 9 60 "7" 3>&1 1>&2 2>&3) || return 1
hourly=$(dialog --backtitle "ProxMenux" --title "$(translate "Retention")" \
--inputbox "$(translate "keep-hourly (0 disables)")" 9 60 "0" 3>&1 1>&2 2>&3) || return 1
daily=$(dialog --backtitle "ProxMenux" --title "$(translate "Retention")" \
--inputbox "$(translate "keep-daily (0 disables)")" 9 60 "7" 3>&1 1>&2 2>&3) || return 1
weekly=$(dialog --backtitle "ProxMenux" --title "$(translate "Retention")" \
--inputbox "$(translate "keep-weekly (0 disables)")" 9 60 "4" 3>&1 1>&2 2>&3) || return 1
monthly=$(dialog --backtitle "ProxMenux" --title "$(translate "Retention")" \
--inputbox "$(translate "keep-monthly (0 disables)")" 9 60 "3" 3>&1 1>&2 2>&3) || return 1
yearly=$(dialog --backtitle "ProxMenux" --title "$(translate "Retention")" \
--inputbox "$(translate "keep-yearly (0 disables)")" 9 60 "0" 3>&1 1>&2 2>&3) || return 1
last=$(_normalize_uint "$last")
hourly=$(_normalize_uint "$hourly")
daily=$(_normalize_uint "$daily")
weekly=$(_normalize_uint "$weekly")
monthly=$(_normalize_uint "$monthly")
yearly=$(_normalize_uint "$yearly")
local -n out="$__out_var"
out=(
"KEEP_LAST=$last"
"KEEP_HOURLY=$hourly"
"KEEP_DAILY=$daily"
"KEEP_WEEKLY=$weekly"
"KEEP_MONTHLY=$monthly"
"KEEP_YEARLY=$yearly"
)
}
_create_job() {
local id backend on_calendar profile_mode
id=$(dialog --backtitle "ProxMenux" --title "$(translate "New backup job")" \
--inputbox "$(translate "Job ID (letters, numbers, - _)")" 9 68 "hostcfg-daily" 3>&1 1>&2 2>&3) || return 1
[[ -z "$id" ]] && return 1
id=$(echo "$id" | tr -cs '[:alnum:]_-' '-' | sed 's/^-*//; s/-*$//')
[[ -z "$id" ]] && return 1
[[ -f "$(_job_file "$id")" ]] && {
dialog --backtitle "ProxMenux" --title "$(translate "Error")" \
--msgbox "$(translate "A job with this ID already exists.")" 8 62
return 1
}
backend=$(dialog --backtitle "ProxMenux" --title "$(translate "Backend")" \
--menu "\n$(translate "Select backup backend:")" 14 70 6 \
"local" "Local archive" \
"borg" "Borg repository" \
"pbs" "Proxmox Backup Server" \
3>&1 1>&2 2>&3) || return 1
on_calendar=$(dialog --backtitle "ProxMenux" --title "$(translate "Schedule")" \
--inputbox "$(translate "systemd OnCalendar expression")"$'\n'"$(translate "Example: daily or Mon..Fri 03:00")" \
11 72 "daily" 3>&1 1>&2 2>&3) || return 1
[[ -z "$on_calendar" ]] && return 1
profile_mode=$(dialog --backtitle "ProxMenux" --title "$(translate "Profile")" \
--menu "\n$(translate "Select backup profile:")" 12 68 4 \
"default" "Default critical paths" \
"custom" "Custom selected paths" \
3>&1 1>&2 2>&3) || return 1
local -a paths=()
hb_select_profile_paths "$profile_mode" paths || return 1
local -a retention=()
_prompt_retention retention || return 1
local -a lines=(
"JOB_ID=$id"
"BACKEND=$backend"
"ON_CALENDAR=$on_calendar"
"PROFILE_MODE=$profile_mode"
"ENABLED=1"
)
lines+=("${retention[@]}")
case "$backend" in
local)
local dest_dir ext
dest_dir=$(hb_prompt_dest_dir) || return 1
ext=$(dialog --backtitle "ProxMenux" --title "$(translate "Archive format")" \
--menu "\n$(translate "Select local archive format:")" 12 62 4 \
"tar.zst" "tar + zstd (preferred)" \
"tar.gz" "tar + gzip" \
3>&1 1>&2 2>&3) || return 1
lines+=("LOCAL_DEST_DIR=$dest_dir" "LOCAL_ARCHIVE_EXT=$ext")
;;
borg)
local repo passphrase
hb_select_borg_repo repo || return 1
hb_prepare_borg_passphrase || return 1
passphrase="${BORG_PASSPHRASE:-}"
lines+=(
"BORG_REPO=$repo"
"BORG_PASSPHRASE=$passphrase"
"BORG_ENCRYPT_MODE=${BORG_ENCRYPT_MODE:-none}"
)
;;
pbs)
hb_select_pbs_repository || return 1
hb_ask_pbs_encryption
local bid
bid="hostcfg-$(hostname)"
bid=$(dialog --backtitle "ProxMenux" --title "PBS" \
--inputbox "$(translate "Backup ID for this job:")" \
"$HB_UI_INPUT_H" "$HB_UI_INPUT_W" "$bid" 3>&1 1>&2 2>&3) || return 1
bid=$(echo "$bid" | tr -cs '[:alnum:]_-' '-' | sed 's/-*$//')
lines+=(
"PBS_REPOSITORY=${HB_PBS_REPOSITORY}"
"PBS_PASSWORD=${HB_PBS_SECRET}"
"PBS_BACKUP_ID=${bid}"
"PBS_KEYFILE=${HB_PBS_KEYFILE:-}"
"PBS_ENCRYPTION_PASSWORD=${HB_PBS_ENC_PASS:-}"
)
;;
esac
_write_job_env "$(_job_file "$id")" "${lines[@]}"
: > "$(_job_paths_file "$id")"
local p
for p in "${paths[@]}"; do
echo "$p" >> "$(_job_paths_file "$id")"
done
_write_job_units "$id" "$on_calendar"
systemctl enable --now "proxmenux-backup-${id}.timer" >/dev/null 2>&1 || true
show_proxmenux_logo
msg_title "$(translate "Scheduled backup job created")"
echo -e ""
echo -e "${TAB}${BGN}$(translate "Job ID:")${CL} ${BL}${id}${CL}"
echo -e "${TAB}${BGN}$(translate "Backend:")${CL} ${BL}${backend}${CL}"
echo -e "${TAB}${BGN}$(translate "Schedule:")${CL} ${BL}${on_calendar}${CL}"
echo -e "${TAB}${BGN}$(translate "Status:")${CL} ${BL}$(_show_job_status "$id")${CL}"
echo -e ""
msg_success "$(translate "Press Enter to continue...")"
read -r
return 0
}
_pick_job() {
local title="$1"
local __out_var="$2"
local -a ids=()
mapfile -t ids < <(_list_jobs)
if [[ ${#ids[@]} -eq 0 ]]; then
dialog --backtitle "ProxMenux" --title "$(translate "No jobs")" \
--msgbox "$(translate "No scheduled backup jobs found.")" 8 62
return 1
fi
local -a menu=()
local i=1 id
for id in "${ids[@]}"; do
menu+=("$i" "$id [$(_show_job_status "$id")]")
((i++))
done
local sel
sel=$(dialog --backtitle "ProxMenux" --title "$title" \
--menu "\n$(translate "Select a job:")" "$HB_UI_MENU_H" "$HB_UI_MENU_W" "$HB_UI_MENU_LIST" \
"${menu[@]}" 3>&1 1>&2 2>&3) || return 1
local picked="${ids[$((sel-1))]}"
local -n out="$__out_var"
out="$picked"
return 0
}
_job_run_now() {
local id=""
_pick_job "$(translate "Run job now")" id || return 1
local runner="$LOCAL_SCRIPTS/backup_restore/run_scheduled_backup.sh"
[[ ! -f "$runner" ]] && runner="$SCRIPT_DIR/run_scheduled_backup.sh"
if "$runner" "$id"; then
msg_ok "$(translate "Job executed successfully.")"
else
msg_warn "$(translate "Job execution finished with errors. Check logs.")"
fi
msg_success "$(translate "Press Enter to continue...")"
read -r
}
_job_toggle() {
local id=""
_pick_job "$(translate "Enable/Disable job")" id || return 1
if systemctl is-enabled --quiet "proxmenux-backup-${id}.timer" >/dev/null 2>&1; then
systemctl disable --now "proxmenux-backup-${id}.timer" >/dev/null 2>&1 || true
msg_warn "$(translate "Job timer disabled:") $id"
else
systemctl enable --now "proxmenux-backup-${id}.timer" >/dev/null 2>&1 || true
msg_ok "$(translate "Job timer enabled:") $id"
fi
msg_success "$(translate "Press Enter to continue...")"
read -r
}
_job_delete() {
local id=""
_pick_job "$(translate "Delete job")" id || return 1
if ! whiptail --title "$(translate "Confirm delete")" \
--yesno "$(translate "Delete scheduled backup job?")"$'\n\n'"ID: ${id}" 10 66; then
return 1
fi
systemctl disable --now "proxmenux-backup-${id}.timer" >/dev/null 2>&1 || true
rm -f "$(_service_file "$id")" "$(_timer_file "$id")" "$(_job_file "$id")" "$(_job_paths_file "$id")"
systemctl daemon-reload >/dev/null 2>&1 || true
msg_ok "$(translate "Job deleted:") $id"
msg_success "$(translate "Press Enter to continue...")"
read -r
}
_show_jobs() {
local tmp
tmp=$(mktemp) || return
{
echo "=== $(translate "Scheduled backup jobs") ==="
echo ""
local id
while IFS= read -r id; do
[[ -z "$id" ]] && continue
echo "$id [$(_show_job_status "$id")]"
if [[ -f "${LOG_DIR}/${id}-last.status" ]]; then
sed 's/^/ /' "${LOG_DIR}/${id}-last.status"
fi
echo ""
done < <(_list_jobs)
} > "$tmp"
dialog --backtitle "ProxMenux" --title "$(translate "Scheduled backup jobs")" \
--textbox "$tmp" 28 100 || true
rm -f "$tmp"
}
main_menu() {
while true; do
local choice
choice=$(dialog --backtitle "ProxMenux" \
--title "$(translate "Backup scheduler and retention")" \
--menu "\n$(translate "Choose action:")" "$HB_UI_MENU_H" "$HB_UI_MENU_W" "$HB_UI_MENU_LIST" \
1 "$(translate "Create scheduled backup job")" \
2 "$(translate "Show jobs and last run status")" \
3 "$(translate "Run a job now")" \
4 "$(translate "Enable / disable job timer")" \
5 "$(translate "Delete job")" \
0 "$(translate "Return")" \
3>&1 1>&2 2>&3) || return 0
case "$choice" in
1) _create_job ;;
2) _show_jobs ;;
3) _job_run_now ;;
4) _job_toggle ;;
5) _job_delete ;;
0) return 0 ;;
esac
done
}
main_menu
@@ -0,0 +1,770 @@
#!/bin/bash
# ==========================================================
# ProxMenux - Host Config Backup/Restore - Shared Library
# ==========================================================
# Author : MacRimi
# Copyright : (c) 2024 MacRimi
# License : MIT
# Version : 1.0
# Last Updated: 08/04/2026
# ==========================================================
# Do not execute directly — source from backup_host.sh
# Library guard
[[ "${BASH_SOURCE[0]}" == "$0" ]] && {
echo "This file is a library. Source it, do not run it directly." >&2; exit 1
}
HB_STATE_DIR="/usr/local/share/proxmenux"
HB_BORG_VERSION="1.2.8"
HB_BORG_LINUX64_SHA256="cfa50fb704a93d3a4fa258120966345fddb394f960dca7c47fcb774d0172f40b"
HB_BORG_LINUX64_URL="https://github.com/borgbackup/borg/releases/download/${HB_BORG_VERSION}/borg-linux64"
# Translation wrapper — safe fallback if translate not yet loaded
hb_translate() {
declare -f translate >/dev/null 2>&1 && translate "$1" || echo "$1"
}
# ==========================================================
# UI SIZE CONSTANTS
# ==========================================================
HB_UI_MENU_H=22
HB_UI_MENU_W=84
HB_UI_MENU_LIST=10
HB_UI_INPUT_H=10
HB_UI_INPUT_W=72
HB_UI_PASS_H=10
HB_UI_PASS_W=72
HB_UI_YESNO_H=10
HB_UI_YESNO_W=78
# ==========================================================
# DEFAULT PROFILE PATHS
# ==========================================================
hb_default_profile_paths() {
local paths=(
"/etc/pve"
"/etc/network"
"/etc/hosts"
"/etc/hostname"
"/etc/ssh"
"/etc/systemd/system"
"/etc/modules"
"/etc/modules-load.d"
"/etc/modprobe.d"
"/etc/udev/rules.d"
"/etc/default/grub"
"/etc/fstab"
"/etc/kernel"
"/etc/apt"
"/etc/vzdump.conf"
"/etc/postfix"
"/etc/resolv.conf"
"/etc/timezone"
"/etc/iscsi"
"/etc/multipath"
"/usr/local/bin"
"/usr/local/share/proxmenux"
"/root"
"/etc/cron.d"
"/etc/cron.daily"
"/etc/cron.hourly"
"/etc/cron.weekly"
"/etc/cron.monthly"
"/etc/cron.allow"
"/etc/cron.deny"
"/var/spool/cron/crontabs"
"/var/lib/pve-cluster"
)
if [[ -d /etc/zfs ]] || command -v zpool >/dev/null 2>&1; then
paths+=("/etc/zfs")
fi
printf '%s\n' "${paths[@]}"
}
# ==========================================================
# PATH CLASSIFICATION (restore safety)
# Returns: dangerous | reboot | hot
# ==========================================================
hb_classify_path() {
local rel="$1" # without leading /
case "$rel" in
etc/pve|etc/pve/*|\
var/lib/pve-cluster|var/lib/pve-cluster/*|\
etc/network|etc/network/*)
echo "dangerous" ;;
etc/modules|etc/modules/*|\
etc/modules-load.d|etc/modules-load.d/*|\
etc/modprobe.d|etc/modprobe.d/*|\
etc/udev/rules.d|etc/udev/rules.d/*|\
etc/default/grub|\
etc/fstab|\
etc/kernel|etc/kernel/*|\
etc/iscsi|etc/iscsi/*|\
etc/multipath|etc/multipath/*|\
etc/zfs|etc/zfs/*)
echo "reboot" ;;
*)
echo "hot" ;;
esac
}
hb_path_warning() {
local rel="$1"
case "$rel" in
etc/pve|etc/pve/*)
hb_translate "/etc/pve is managed by pmxcfs (cluster filesystem). Applying this on a running node can corrupt cluster state. Use 'Export to file' and apply it manually during a maintenance window." ;;
var/lib/pve-cluster|var/lib/pve-cluster/*)
hb_translate "/var/lib/pve-cluster is live cluster data. Never restore this while the node is running. Use 'Export to file' for manual recovery only." ;;
etc/network|etc/network/*)
hb_translate "/etc/network controls active interfaces. Applying may immediately change or drop network connectivity, including active SSH sessions." ;;
esac
}
# ==========================================================
# PROFILE PATH SELECTION
# ==========================================================
hb_select_profile_paths() {
local mode="$1"
local __out_var="$2"
local -n __out_ref="$__out_var"
mapfile -t __defaults < <(hb_default_profile_paths)
if [[ "$mode" == "default" ]]; then
__out_ref=("${__defaults[@]}")
return 0
fi
local options=() idx=1 path
for path in "${__defaults[@]}"; do
options+=("$idx" "$path" "off")
((idx++))
done
local selected
selected=$(dialog --backtitle "ProxMenux" \
--title "$(hb_translate "Custom backup profile")" \
--separate-output --checklist \
"$(hb_translate "Select paths to include:")" \
26 86 18 "${options[@]}" 3>&1 1>&2 2>&3) || return 1
__out_ref=()
local choice
while read -r choice; do
[[ -z "$choice" ]] && continue
__out_ref+=("${__defaults[$((choice-1))]}")
done <<< "$selected"
if [[ ${#__out_ref[@]} -eq 0 ]]; then
dialog --backtitle "ProxMenux" --title "$(hb_translate "Error")" \
--msgbox "$(hb_translate "No paths selected. Select at least one path.")" 8 60
return 1
fi
}
# ==========================================================
# STAGING OPERATIONS
# ==========================================================
hb_prepare_staging() {
local staging_root="$1"; shift
local paths=("$@")
rm -rf "$staging_root"
mkdir -p "$staging_root/rootfs" "$staging_root/metadata"
local selected_file="$staging_root/metadata/selected_paths.txt"
local missing_file="$staging_root/metadata/missing_paths.txt"
: > "$selected_file"
: > "$missing_file"
local p rel target
for p in "${paths[@]}"; do
rel="${p#/}"
echo "$rel" >> "$selected_file"
[[ -e "$p" ]] || { echo "$p" >> "$missing_file"; continue; }
target="$staging_root/rootfs/$rel"
if [[ -d "$p" ]]; then
mkdir -p "$target"
local -a rsync_opts=(
-aAXH --numeric-ids
--exclude "images/"
--exclude "dump/"
--exclude "tmp/"
--exclude "*.log"
)
# /root is included by default for easier recovery, but avoid volatile/sensitive noise.
if [[ "$rel" == "root" || "$rel" == "root/"* ]]; then
rsync_opts+=(
--exclude ".bash_history"
--exclude ".cache/"
--exclude "tmp/"
--exclude ".local/share/Trash/"
)
fi
# Runtime pending-restore data belongs in /var/lib/proxmenux, never in app code tree.
if [[ "$rel" == "usr/local/share/proxmenux" || "$rel" == "usr/local/share/proxmenux/"* ]]; then
rsync_opts+=(
--exclude "restore-pending/"
)
fi
rsync "${rsync_opts[@]}" "$p/" "$target/" 2>/dev/null || true
else
mkdir -p "$(dirname "$target")"
cp -a "$p" "$target" 2>/dev/null || true
fi
done
# Metadata snapshot
local meta="$staging_root/metadata"
{
echo "generated_at=$(date -Iseconds)"
echo "hostname=$(hostname)"
echo "kernel=$(uname -r)"
} > "$meta/run_info.env"
command -v pveversion >/dev/null 2>&1 && pveversion -v > "$meta/pveversion.txt" 2>&1 || true
command -v lsblk >/dev/null 2>&1 && lsblk -f > "$meta/lsblk.txt" 2>&1 || true
command -v qm >/dev/null 2>&1 && qm list > "$meta/qm-list.txt" 2>&1 || true
command -v pct >/dev/null 2>&1 && pct list > "$meta/pct-list.txt" 2>&1 || true
command -v zpool >/dev/null 2>&1 && zpool status > "$meta/zpool.txt" 2>&1 || true
# Manifest + checksums
(
cd "$staging_root/rootfs" || return 1
find . -mindepth 1 -print | sort > "$meta/manifest.txt"
find . -type f -print0 | sort -z | xargs -0 sha256sum 2>/dev/null \
> "$meta/checksums.sha256" || true
)
}
hb_load_restore_paths() {
local restore_root="$1"
local __out_var="$2"
local -n __out="$__out_var"
__out=()
local selected="$restore_root/metadata/selected_paths.txt"
if [[ -f "$selected" ]]; then
while IFS= read -r line; do
[[ -n "$line" ]] && __out+=("$line")
done < "$selected"
fi
# Fallback: scan rootfs
if [[ ${#__out[@]} -eq 0 ]]; then
local p
while IFS= read -r p; do
[[ -n "$p" && -e "$restore_root/rootfs/${p#/}" ]] && __out+=("${p#/}")
done < <(hb_default_profile_paths)
fi
}
# ==========================================================
# PBS CONFIG — auto-detect from storage.cfg + manual
# ==========================================================
hb_collect_pbs_configs() {
HB_PBS_NAMES=()
HB_PBS_REPOS=()
HB_PBS_SECRETS=()
HB_PBS_SOURCES=()
if [[ -f /etc/pve/storage.cfg ]]; then
local current="" server="" datastore="" username="" pw_file pw_val
while IFS= read -r line; do
line="${line%%#*}"
line="${line#"${line%%[![:space:]]*}"}"
line="${line%"${line##*[![:space:]]}"}"
[[ -z "$line" ]] && continue
if [[ $line =~ ^pbs:[[:space:]]*(.+)$ ]]; then
if [[ -n "$current" && -n "$server" && -n "$datastore" && -n "$username" ]]; then
pw_file="/etc/pve/priv/storage/${current}.pw"
pw_val="$([[ -f "$pw_file" ]] && cat "$pw_file" || echo "")"
HB_PBS_NAMES+=("$current")
HB_PBS_REPOS+=("${username}@${server}:${datastore}")
HB_PBS_SECRETS+=("$pw_val")
HB_PBS_SOURCES+=("proxmox")
fi
current="${BASH_REMATCH[1]}"; server="" datastore="" username=""
elif [[ -n "$current" ]]; then
[[ $line =~ ^[[:space:]]+server[[:space:]]+(.+)$ ]] && server="${BASH_REMATCH[1]}"
[[ $line =~ ^[[:space:]]+datastore[[:space:]]+(.+)$ ]] && datastore="${BASH_REMATCH[1]}"
[[ $line =~ ^[[:space:]]+username[[:space:]]+(.+)$ ]] && username="${BASH_REMATCH[1]}"
if [[ $line =~ ^[a-zA-Z]+:[[:space:]] &&
-n "$server" && -n "$datastore" && -n "$username" ]]; then
pw_file="/etc/pve/priv/storage/${current}.pw"
pw_val="$([[ -f "$pw_file" ]] && cat "$pw_file" || echo "")"
HB_PBS_NAMES+=("$current")
HB_PBS_REPOS+=("${username}@${server}:${datastore}")
HB_PBS_SECRETS+=("$pw_val")
HB_PBS_SOURCES+=("proxmox")
current="" server="" datastore="" username=""
fi
fi
done < /etc/pve/storage.cfg
# Last stanza
if [[ -n "$current" && -n "$server" && -n "$datastore" && -n "$username" ]]; then
pw_file="/etc/pve/priv/storage/${current}.pw"
pw_val="$([[ -f "$pw_file" ]] && cat "$pw_file" || echo "")"
HB_PBS_NAMES+=("$current")
HB_PBS_REPOS+=("${username}@${server}:${datastore}")
HB_PBS_SECRETS+=("$pw_val")
HB_PBS_SOURCES+=("proxmox")
fi
fi
# Manual configs
local manual_cfg="$HB_STATE_DIR/pbs-manual-configs.txt"
if [[ -f "$manual_cfg" ]]; then
local line name repo sf
while IFS= read -r line; do
line="${line%%#*}"
line="${line#"${line%%[![:space:]]*}"}"
line="${line%"${line##*[![:space:]]}"}"
[[ -z "$line" ]] && continue
name="${line%%|*}"; repo="${line##*|}"
sf="$HB_STATE_DIR/pbs-pass-${name}.txt"
HB_PBS_NAMES+=("$name"); HB_PBS_REPOS+=("$repo")
HB_PBS_SECRETS+=("$([[ -f "$sf" ]] && cat "$sf" || echo "")")
HB_PBS_SOURCES+=("manual")
done < "$manual_cfg"
fi
}
hb_configure_pbs_manual() {
local name user host datastore repo secret
name=$(dialog --backtitle "ProxMenux" --title "$(hb_translate "Add PBS")" \
--inputbox "$(hb_translate "Configuration name:")" \
"$HB_UI_INPUT_H" "$HB_UI_INPUT_W" "PBS-$(date +%m%d)" 3>&1 1>&2 2>&3) || return 1
[[ -z "$name" ]] && return 1
user=$(dialog --backtitle "ProxMenux" --title "$(hb_translate "Add PBS")" \
--inputbox "$(hb_translate "Username (e.g. root@pam or user@pbs!token):")" \
"$HB_UI_INPUT_H" "$HB_UI_INPUT_W" "root@pam" 3>&1 1>&2 2>&3) || return 1
host=$(dialog --backtitle "ProxMenux" --title "$(hb_translate "Add PBS")" \
--inputbox "$(hb_translate "PBS host or IP address:")" \
"$HB_UI_INPUT_H" "$HB_UI_INPUT_W" "" 3>&1 1>&2 2>&3) || return 1
[[ -z "$host" ]] && return 1
datastore=$(dialog --backtitle "ProxMenux" --title "$(hb_translate "Add PBS")" \
--inputbox "$(hb_translate "Datastore name:")" \
"$HB_UI_INPUT_H" "$HB_UI_INPUT_W" "" 3>&1 1>&2 2>&3) || return 1
[[ -z "$datastore" ]] && return 1
secret=$(dialog --backtitle "ProxMenux" --title "$(hb_translate "Add PBS")" \
--insecure --passwordbox "$(hb_translate "Password or API token secret:")" \
"$HB_UI_PASS_H" "$HB_UI_PASS_W" "" 3>&1 1>&2 2>&3) || return 1
repo="${user}@${host}:${datastore}"
mkdir -p "$HB_STATE_DIR"
local cfg_line="${name}|${repo}"
local manual_cfg="$HB_STATE_DIR/pbs-manual-configs.txt"
touch "$manual_cfg"
grep -Fxq "$cfg_line" "$manual_cfg" || echo "$cfg_line" >> "$manual_cfg"
printf '%s' "$secret" > "$HB_STATE_DIR/pbs-pass-${name}.txt"
chmod 600 "$HB_STATE_DIR/pbs-pass-${name}.txt"
HB_PBS_NAME="$name"; HB_PBS_REPOSITORY="$repo"; HB_PBS_SECRET="$secret"
}
hb_select_pbs_repository() {
hb_collect_pbs_configs
local menu=() i=1 idx
for idx in "${!HB_PBS_NAMES[@]}"; do
local src="${HB_PBS_SOURCES[$idx]}"
local label="${HB_PBS_NAMES[$idx]}${HB_PBS_REPOS[$idx]} [$src]"
[[ -z "${HB_PBS_SECRETS[$idx]}" ]] && label+="$(hb_translate "no password")"
menu+=("$i" "$label"); ((i++))
done
menu+=("$i" "$(hb_translate "+ Add new PBS manually")")
local choice
choice=$(dialog --backtitle "ProxMenux" \
--title "$(hb_translate "Select PBS repository")" \
--menu "\n$(hb_translate "Available PBS repositories:")" \
"$HB_UI_MENU_H" "$HB_UI_MENU_W" "$HB_UI_MENU_LIST" "${menu[@]}" 3>&1 1>&2 2>&3) || return 1
if [[ "$choice" == "$i" ]]; then
hb_configure_pbs_manual || return 1
else
local sel=$((choice-1))
HB_PBS_NAME="${HB_PBS_NAMES[$sel]}"
export HB_PBS_REPOSITORY="${HB_PBS_REPOS[$sel]}"
HB_PBS_SECRET="${HB_PBS_SECRETS[$sel]}"
if [[ -z "$HB_PBS_SECRET" ]]; then
HB_PBS_SECRET=$(dialog --backtitle "ProxMenux" --title "PBS" \
--insecure --passwordbox \
"$(hb_translate "Password for:") $HB_PBS_NAME" \
"$HB_UI_PASS_H" "$HB_UI_PASS_W" "" 3>&1 1>&2 2>&3) || return 1
mkdir -p "$HB_STATE_DIR"
printf '%s' "$HB_PBS_SECRET" > "$HB_STATE_DIR/pbs-pass-${HB_PBS_NAME}.txt"
chmod 600 "$HB_STATE_DIR/pbs-pass-${HB_PBS_NAME}.txt"
fi
fi
}
hb_ask_pbs_encryption() {
local key_file="$HB_STATE_DIR/pbs-key.conf"
local enc_pass_file="$HB_STATE_DIR/pbs-encryption-pass.txt"
export HB_PBS_KEYFILE_OPT=""
export HB_PBS_ENC_PASS=""
dialog --backtitle "ProxMenux" --title "$(hb_translate "Encryption")" \
--yesno "$(hb_translate "Encrypt this backup with a keyfile?")" \
"$HB_UI_YESNO_H" "$HB_UI_YESNO_W" || return 0
if [[ -f "$key_file" ]]; then
export HB_PBS_KEYFILE_OPT="--keyfile $key_file"
if [[ -f "$enc_pass_file" ]]; then
HB_PBS_ENC_PASS="$(<"$enc_pass_file")"
export HB_PBS_ENC_PASS
fi
msg_ok "$(hb_translate "Using existing encryption key:") $key_file"
return 0
fi
# No key — offer to create one
dialog --backtitle "ProxMenux" --title "$(hb_translate "Encryption")" \
--yesno "$(hb_translate "No encryption key found. Create one now?")" \
"$HB_UI_YESNO_H" "$HB_UI_YESNO_W" || return 0
local pass1 pass2
while true; do
pass1=$(dialog --backtitle "ProxMenux" --insecure --passwordbox \
"$(hb_translate "Encryption passphrase (separate from PBS password):")" \
"$HB_UI_PASS_H" "$HB_UI_PASS_W" "" 3>&1 1>&2 2>&3) || return 0
pass2=$(dialog --backtitle "ProxMenux" --insecure --passwordbox \
"$(hb_translate "Confirm encryption passphrase:")" \
"$HB_UI_PASS_H" "$HB_UI_PASS_W" "" 3>&1 1>&2 2>&3) || return 0
[[ "$pass1" == "$pass2" ]] && break
dialog --backtitle "ProxMenux" \
--msgbox "$(hb_translate "Passphrases do not match. Try again.")" 8 50
done
msg_info "$(hb_translate "Creating PBS encryption key...")"
if PBS_ENCRYPTION_PASSWORD="$pass1" \
proxmox-backup-client key create "$key_file" >/dev/null 2>&1; then
printf '%s' "$pass1" > "$enc_pass_file"
chmod 600 "$enc_pass_file"
msg_ok "$(hb_translate "Encryption key created:") $key_file"
HB_PBS_KEYFILE_OPT="--keyfile $key_file"
HB_PBS_ENC_PASS="$pass1"
local key_warn_msg
key_warn_msg="$(hb_translate "IMPORTANT: Back up this key file. Without it the backup cannot be restored.")"$'\n\n'"$(hb_translate "Key:") $key_file"
dialog --backtitle "ProxMenux" --msgbox \
"$key_warn_msg" \
10 74
else
msg_error "$(hb_translate "Failed to create encryption key. Backup will proceed without encryption.")"
fi
}
# ==========================================================
# BORG
# ==========================================================
hb_ensure_borg() {
command -v borg >/dev/null 2>&1 && { echo "borg"; return 0; }
local appimage="$HB_STATE_DIR/borg"
local tmp_file
[[ -x "$appimage" ]] && { echo "$appimage"; return 0; }
command -v sha256sum >/dev/null 2>&1 || {
msg_error "$(hb_translate "sha256sum not found. Cannot verify Borg binary.")"
return 1
}
msg_info "$(hb_translate "Borg not found. Downloading borg") ${HB_BORG_VERSION}..."
mkdir -p "$HB_STATE_DIR"
tmp_file=$(mktemp "$HB_STATE_DIR/.borg-download.XXXXXX") || return 1
if wget -qO "$tmp_file" "$HB_BORG_LINUX64_URL"; then
if echo "${HB_BORG_LINUX64_SHA256} $tmp_file" | sha256sum -c - >/dev/null 2>&1; then
mv -f "$tmp_file" "$appimage"
else
rm -f "$tmp_file"
msg_error "$(hb_translate "Borg binary checksum verification failed.")"
return 1
fi
chmod +x "$appimage"
msg_ok "$(hb_translate "Borg ready.")"
echo "$appimage"; return 0
fi
rm -f "$tmp_file"
msg_error "$(hb_translate "Failed to download Borg.")"
return 1
}
hb_borg_init_if_needed() {
local borg_bin="$1" repo="$2" encrypt_mode="$3"
"$borg_bin" list "$repo" >/dev/null 2>&1 && return 0
if "$borg_bin" help repo-create >/dev/null 2>&1; then
"$borg_bin" repo-create -e "$encrypt_mode" "$repo"
else
"$borg_bin" init --encryption="$encrypt_mode" "$repo"
fi
}
hb_prepare_borg_passphrase() {
local pass_file="$HB_STATE_DIR/borg-pass.txt"
BORG_ENCRYPT_MODE="none"
unset BORG_PASSPHRASE
if [[ -f "$pass_file" ]]; then
export BORG_PASSPHRASE
BORG_PASSPHRASE="$(<"$pass_file")"
BORG_ENCRYPT_MODE="repokey"
return 0
fi
dialog --backtitle "ProxMenux" --title "$(hb_translate "Borg encryption")" \
--yesno "$(hb_translate "Encrypt this Borg repository?")" \
"$HB_UI_YESNO_H" "$HB_UI_YESNO_W" || return 0
local pass1 pass2
while true; do
pass1=$(dialog --backtitle "ProxMenux" --insecure --passwordbox \
"$(hb_translate "Borg passphrase:")" \
"$HB_UI_PASS_H" "$HB_UI_PASS_W" "" 3>&1 1>&2 2>&3) || return 1
pass2=$(dialog --backtitle "ProxMenux" --insecure --passwordbox \
"$(hb_translate "Confirm Borg passphrase:")" \
"$HB_UI_PASS_H" "$HB_UI_PASS_W" "" 3>&1 1>&2 2>&3) || return 1
[[ "$pass1" == "$pass2" ]] && break
dialog --backtitle "ProxMenux" \
--msgbox "$(hb_translate "Passphrases do not match.")" 8 50
done
mkdir -p "$HB_STATE_DIR"
printf '%s' "$pass1" > "$pass_file"
chmod 600 "$pass_file"
export BORG_PASSPHRASE="$pass1"
export BORG_ENCRYPT_MODE="repokey"
}
hb_select_borg_repo() {
local _borg_repo_var="$1"
local -n _borg_repo_ref="$_borg_repo_var"
local type
type=$(dialog --backtitle "ProxMenux" \
--title "$(hb_translate "Borg repository location")" \
--menu "\n$(hb_translate "Select repository destination:")" \
"$HB_UI_MENU_H" "$HB_UI_MENU_W" "$HB_UI_MENU_LIST" \
"local" "$(hb_translate 'Local directory')" \
"usb" "$(hb_translate 'Mounted external disk')" \
"remote" "$(hb_translate 'Remote server via SSH')" \
3>&1 1>&2 2>&3) || return 1
unset BORG_RSH
case "$type" in
local)
_borg_repo_ref=$(dialog --backtitle "ProxMenux" \
--inputbox "$(hb_translate "Borg repository path:")" \
"$HB_UI_INPUT_H" "$HB_UI_INPUT_W" "/backup/borgbackup" \
3>&1 1>&2 2>&3) || return 1
mkdir -p "$_borg_repo_ref" 2>/dev/null || true
;;
usb)
local mnt
mnt=$(hb_prompt_mounted_path "/mnt/backup") || return 1
_borg_repo_ref="$mnt/borgbackup"
mkdir -p "$_borg_repo_ref" 2>/dev/null || true
;;
remote)
local user host rpath ssh_key
user=$(dialog --backtitle "ProxMenux" --inputbox "$(hb_translate "SSH user:")" \
"$HB_UI_INPUT_H" "$HB_UI_INPUT_W" "root" 3>&1 1>&2 2>&3) || return 1
host=$(dialog --backtitle "ProxMenux" --inputbox "$(hb_translate "SSH host or IP:")" \
"$HB_UI_INPUT_H" "$HB_UI_INPUT_W" "" 3>&1 1>&2 2>&3) || return 1
rpath=$(dialog --backtitle "ProxMenux" \
--inputbox "$(hb_translate "Remote repository path:")" \
"$HB_UI_INPUT_H" "$HB_UI_INPUT_W" "/backup/borgbackup" \
3>&1 1>&2 2>&3) || return 1
if dialog --backtitle "ProxMenux" \
--yesno "$(hb_translate "Use a custom SSH key?")" \
"$HB_UI_YESNO_H" "$HB_UI_YESNO_W"; then
ssh_key=$(dialog --backtitle "ProxMenux" \
--fselect "$HOME/.ssh/" 12 70 3>&1 1>&2 2>&3) || return 1
export BORG_RSH="ssh -i $ssh_key -o StrictHostKeyChecking=accept-new"
fi
_borg_repo_ref="ssh://$user@$host/$rpath"
;;
esac
}
# ==========================================================
# COMMON PROMPTS
# ==========================================================
hb_trim_dialog_value() {
local value="$1"
value="${value//$'\r'/}"
value="${value//$'\n'/}"
value="${value#"${value%%[![:space:]]*}"}"
value="${value%"${value##*[![:space:]]}"}"
printf '%s' "$value"
}
hb_prompt_mounted_path() {
local default_path="${1:-/mnt/backup}"
local out
out=$(dialog --backtitle "ProxMenux" \
--title "$(hb_translate "Mounted disk path")" \
--inputbox "$(hb_translate "Path where the external disk is mounted:")" \
"$HB_UI_INPUT_H" "$HB_UI_INPUT_W" "$default_path" 3>&1 1>&2 2>&3) || return 1
out=$(hb_trim_dialog_value "$out")
[[ -n "$out" && -d "$out" ]] || { msg_error "$(hb_translate "Path does not exist.")"; return 1; }
if ! mountpoint -q "$out" 2>/dev/null; then
dialog --backtitle "ProxMenux" --title "$(hb_translate "Warning")" \
--yesno "$(hb_translate "This path is not a registered mount point. Use it anyway?")" \
"$HB_UI_YESNO_H" "$HB_UI_YESNO_W" || return 1
fi
echo "$out"
}
hb_prompt_dest_dir() {
local selection out
selection=$(dialog --backtitle "ProxMenux" \
--title "$(hb_translate "Select destination")" \
--menu "\n$(hb_translate "Choose where to save the backup:")" \
"$HB_UI_MENU_H" "$HB_UI_MENU_W" "$HB_UI_MENU_LIST" \
"vzdump" "$(hb_translate '/var/lib/vz/dump (Proxmox default vzdump path)')" \
"backup" "$(hb_translate '/backup')" \
"local" "$(hb_translate 'Custom local directory')" \
"usb" "$(hb_translate 'Mounted external disk')" \
3>&1 1>&2 2>&3) || return 1
case "$selection" in
vzdump) out="/var/lib/vz/dump" ;;
backup) out="/backup" ;;
local)
out=$(dialog --backtitle "ProxMenux" \
--inputbox "$(hb_translate "Enter directory path:")" \
"$HB_UI_INPUT_H" "$HB_UI_INPUT_W" "/backup" 3>&1 1>&2 2>&3) || return 1
;;
usb) out=$(hb_prompt_mounted_path "/mnt/backup") || return 1 ;;
esac
out=$(hb_trim_dialog_value "$out")
[[ -n "$out" ]] || return 1
mkdir -p "$out" || { msg_error "$(hb_translate "Cannot create:") $out"; return 1; }
echo "$out"
}
hb_prompt_restore_source_dir() {
local choice out
choice=$(dialog --backtitle "ProxMenux" \
--title "$(hb_translate "Restore source location")" \
--menu "\n$(hb_translate "Where are the backup archives stored?")" \
"$HB_UI_MENU_H" "$HB_UI_MENU_W" "$HB_UI_MENU_LIST" \
"vzdump" "$(hb_translate '/var/lib/vz/dump (Proxmox default)')" \
"backup" "$(hb_translate '/backup')" \
"usb" "$(hb_translate 'Mounted external disk')" \
"custom" "$(hb_translate 'Custom path')" \
3>&1 1>&2 2>&3) || return 1
case "$choice" in
vzdump) out="/var/lib/vz/dump" ;;
backup) out="/backup" ;;
usb) out=$(hb_prompt_mounted_path "/mnt/backup") || return 1 ;;
custom)
out=$(dialog --backtitle "ProxMenux" \
--inputbox "$(hb_translate "Enter path:")" \
"$HB_UI_INPUT_H" "$HB_UI_INPUT_W" "/backup" 3>&1 1>&2 2>&3) || return 1
;;
esac
out=$(hb_trim_dialog_value "$out")
[[ -n "$out" && -d "$out" ]] || {
msg_error "$(hb_translate "Directory does not exist.")"
return 1
}
echo "$out"
}
hb_prompt_local_archive() {
local base_dir="$1"
local title="${2:-$(hb_translate "Select backup archive")}"
local -a rows=() files=() menu=()
# Single find pass using -printf: no per-file stat subprocesses.
# maxdepth 6 catches nested backup layouts commonly used in /var/lib/vz/dump.
mapfile -t rows < <(
find "$base_dir" -maxdepth 6 -type f \
\( -name '*.tar.zst' -o -name '*.tar.gz' -o -name '*.tar' \) \
-printf '%T@|%s|%p\n' 2>/dev/null \
| sort -t'|' -k1,1nr \
| head -200
)
if [[ ${#rows[@]} -eq 0 ]]; then
local no_backups_msg
no_backups_msg="$(hb_translate "No backup archives were found in:") $base_dir"$'\n\n'"$(hb_translate "Select another source path and try again.")"
dialog --backtitle "ProxMenux" \
--title "$(hb_translate "No backups found")" \
--msgbox "$no_backups_msg" \
10 78 || true
return 1
fi
local i=1 row epoch size path date_str size_str label
for row in "${rows[@]}"; do
epoch="${row%%|*}"; row="${row#*|}"
size="${row%%|*}"; path="${row#*|}"
epoch="${epoch%%.*}" # drop sub-second fraction from %T@
date_str=$(date -d "@$epoch" '+%Y-%m-%d %H:%M' 2>/dev/null || echo "-")
size_str=$(numfmt --to=iec-i --suffix=B "$size" 2>/dev/null || echo "${size}B")
label="${path#$base_dir/} $date_str $size_str"
files+=("$path"); menu+=("$i" "$label"); ((i++))
done
local choice
choice=$(dialog --backtitle "ProxMenux" --title "$title" \
--menu "\n$(hb_translate "Detected backups — newest first:")" \
"$HB_UI_MENU_H" "$HB_UI_MENU_W" "$HB_UI_MENU_LIST" "${menu[@]}" 3>&1 1>&2 2>&3) || return 1
echo "${files[$((choice-1))]}"
}
# ==========================================================
# UTILITIES
# ==========================================================
hb_human_elapsed() {
local secs="$1"
if (( secs < 60 )); then printf '%ds' "$secs"
elif (( secs < 3600 )); then printf '%dm %ds' "$((secs/60))" "$((secs%60))"
else printf '%dh %dm' "$((secs/3600))" "$(( (secs%3600)/60 ))"
fi
}
hb_file_size() {
local path="$1"
if [[ -f "$path" ]]; then
numfmt --to=iec-i --suffix=B "$(stat -c %s "$path" 2>/dev/null || echo 0)" 2>/dev/null \
|| du -sh "$path" 2>/dev/null | awk '{print $1}'
elif [[ -d "$path" ]]; then
du -sh "$path" 2>/dev/null | awk '{print $1}'
else
echo "-"
fi
}
hb_show_log() {
local logfile="$1" title="${2:-$(hb_translate "Operation log")}"
[[ -f "$logfile" && -s "$logfile" ]] || return 0
dialog --backtitle "ProxMenux" --exit-label "OK" \
--title "$title" --textbox "$logfile" 26 110 || true
}
hb_require_cmd() {
local cmd="$1" pkg="${2:-$1}"
command -v "$cmd" >/dev/null 2>&1 && return 0
if command -v apt-get >/dev/null 2>&1; then
msg_warn "$(hb_translate "Installing dependency:") $pkg"
apt-get update -qq >/dev/null 2>&1 && apt-get install -y "$pkg" >/dev/null 2>&1
fi
command -v "$cmd" >/dev/null 2>&1
}
@@ -0,0 +1,243 @@
#!/bin/bash
# ==========================================================
# ProxMenux - Run Scheduled Host Backup Job
# ==========================================================
set -u
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
LOCAL_SCRIPTS_LOCAL="$(cd "$SCRIPT_DIR/.." && pwd)"
LOCAL_SCRIPTS_DEFAULT="/usr/local/share/proxmenux/scripts"
LOCAL_SCRIPTS="$LOCAL_SCRIPTS_DEFAULT"
BASE_DIR="/usr/local/share/proxmenux"
UTILS_FILE="$LOCAL_SCRIPTS/utils.sh"
if [[ -f "$LOCAL_SCRIPTS_LOCAL/utils.sh" ]]; then
LOCAL_SCRIPTS="$LOCAL_SCRIPTS_LOCAL"
UTILS_FILE="$LOCAL_SCRIPTS/utils.sh"
elif [[ ! -f "$UTILS_FILE" ]]; then
UTILS_FILE="$BASE_DIR/utils.sh"
fi
if [[ -f "$UTILS_FILE" ]]; then
# shellcheck source=/dev/null
source "$UTILS_FILE"
else
echo "ERROR: utils.sh not found" >&2
exit 1
fi
LIB_FILE="$SCRIPT_DIR/lib_host_backup_common.sh"
[[ ! -f "$LIB_FILE" ]] && LIB_FILE="$LOCAL_SCRIPTS_DEFAULT/backup_restore/lib_host_backup_common.sh"
if [[ -f "$LIB_FILE" ]]; then
# shellcheck source=/dev/null
source "$LIB_FILE"
else
echo "ERROR: lib_host_backup_common.sh not found" >&2
exit 1
fi
JOBS_DIR="${PMX_BACKUP_JOBS_DIR:-/var/lib/proxmenux/backup-jobs}"
LOG_DIR="${PMX_BACKUP_LOG_DIR:-/var/log/proxmenux/backup-jobs}"
LOCK_DIR="${PMX_BACKUP_LOCK_DIR:-/var/lock}"
mkdir -p "$JOBS_DIR" "$LOG_DIR" >/dev/null 2>&1 || true
_sb_prune_local() {
local job_id="$1"
local dest_dir="$2"
local ext="$3" # tar.zst or tar.gz
local keep_last="${KEEP_LAST:-0}"
local -a files=()
mapfile -t files < <(find "$dest_dir" -maxdepth 1 -type f -name "${job_id}-*.${ext}" | sort -r)
[[ ${#files[@]} -eq 0 ]] && return 0
if [[ "$keep_last" =~ ^[0-9]+$ ]] && (( keep_last > 0 )); then
local idx=0
for f in "${files[@]}"; do
idx=$((idx+1))
(( idx <= keep_last )) && continue
rm -f "$f" || true
done
fi
}
_sb_run_local() {
local stage_root="$1"
local job_id="$2"
local ts="$3"
local dest_dir="$4"
local archive_ext="${LOCAL_ARCHIVE_EXT:-tar.zst}"
local archive="${dest_dir}/${job_id}-${ts}.${archive_ext}"
mkdir -p "$dest_dir" || return 1
if [[ "$archive_ext" == "tar.zst" ]] && command -v zstd >/dev/null 2>&1; then
tar --zstd -cf "$archive" -C "$stage_root" . >/dev/null 2>&1 || return 1
else
archive="${dest_dir}/${job_id}-${ts}.tar.gz"
tar -czf "$archive" -C "$stage_root" . >/dev/null 2>&1 || return 1
archive_ext="tar.gz"
fi
_sb_prune_local "$job_id" "$dest_dir" "$archive_ext"
echo "LOCAL_ARCHIVE=$archive"
return 0
}
_sb_run_borg() {
local stage_root="$1"
local archive_name="$2"
local borg_bin repo passphrase
borg_bin=$(hb_ensure_borg) || return 1
repo="${BORG_REPO:-}"
passphrase="${BORG_PASSPHRASE:-}"
[[ -z "$repo" || -z "$passphrase" ]] && return 1
export BORG_PASSPHRASE="$passphrase"
if ! hb_borg_init_if_needed "$borg_bin" "$repo" "${BORG_ENCRYPT_MODE:-none}" >/dev/null 2>&1; then
return 1
fi
(cd "$stage_root" && "$borg_bin" create --stats \
"${repo}::${archive_name}" rootfs metadata) >/dev/null 2>&1 || return 1
"$borg_bin" prune -v --list "$repo" \
${KEEP_LAST:+--keep-last "$KEEP_LAST"} \
${KEEP_HOURLY:+--keep-hourly "$KEEP_HOURLY"} \
${KEEP_DAILY:+--keep-daily "$KEEP_DAILY"} \
${KEEP_WEEKLY:+--keep-weekly "$KEEP_WEEKLY"} \
${KEEP_MONTHLY:+--keep-monthly "$KEEP_MONTHLY"} \
${KEEP_YEARLY:+--keep-yearly "$KEEP_YEARLY"} \
>/dev/null 2>&1 || true
echo "BORG_ARCHIVE=${archive_name}"
return 0
}
_sb_run_pbs() {
local stage_root="$1"
local backup_id="$2"
local epoch="$3"
local -a cmd=(
proxmox-backup-client backup
"hostcfg.pxar:${stage_root}/rootfs"
--repository "$PBS_REPOSITORY"
--backup-type host
--backup-id "$backup_id"
--backup-time "$epoch"
)
[[ -z "${PBS_REPOSITORY:-}" || -z "${PBS_PASSWORD:-}" ]] && return 1
if [[ -n "${PBS_KEYFILE:-}" ]]; then
cmd+=(--keyfile "$PBS_KEYFILE")
fi
env PBS_PASSWORD="$PBS_PASSWORD" PBS_ENCRYPTION_PASSWORD="${PBS_ENCRYPTION_PASSWORD:-}" \
"${cmd[@]}" >/dev/null 2>&1 || return 1
# Best effort prune for PBS group.
proxmox-backup-client prune "host/${backup_id}" --repository "$PBS_REPOSITORY" \
${KEEP_LAST:+--keep-last "$KEEP_LAST"} \
${KEEP_HOURLY:+--keep-hourly "$KEEP_HOURLY"} \
${KEEP_DAILY:+--keep-daily "$KEEP_DAILY"} \
${KEEP_WEEKLY:+--keep-weekly "$KEEP_WEEKLY"} \
${KEEP_MONTHLY:+--keep-monthly "$KEEP_MONTHLY"} \
${KEEP_YEARLY:+--keep-yearly "$KEEP_YEARLY"} \
>/dev/null 2>&1 || true
echo "PBS_SNAPSHOT=host/${backup_id}/${epoch}"
return 0
}
main() {
local job_id="${1:-}"
[[ -z "$job_id" ]] && { echo "Usage: $0 <job_id>" >&2; exit 1; }
local job_file="${JOBS_DIR}/${job_id}.env"
[[ -f "$job_file" ]] || { echo "Job not found: $job_id" >&2; exit 1; }
# shellcheck source=/dev/null
source "$job_file"
local lock_file="${LOCK_DIR}/proxmenux-backup-${job_id}.lock"
if command -v flock >/dev/null 2>&1; then
exec 9>"$lock_file" || exit 1
if ! flock -n 9; then
echo "Another run is active for job ${job_id}" >&2
exit 1
fi
fi
local ts log_file stage_root summary_file
ts="$(date +%Y%m%d_%H%M%S)"
log_file="${LOG_DIR}/${job_id}-${ts}.log"
summary_file="${LOG_DIR}/${job_id}-last.status"
stage_root="$(mktemp -d /tmp/proxmenux-sched-stage.XXXXXX)"
{
echo "JOB_ID=${job_id}"
echo "RUN_AT=$(date -Iseconds)"
echo "BACKEND=${BACKEND:-}"
echo "PROFILE_MODE=${PROFILE_MODE:-default}"
} >"$summary_file"
{
echo "=== Scheduled backup job ${job_id} started at $(date -Iseconds) ==="
echo "Backend: ${BACKEND:-}"
} >"$log_file"
local -a paths=()
if [[ "${PROFILE_MODE:-default}" == "custom" && -f "${JOBS_DIR}/${job_id}.paths" ]]; then
mapfile -t paths < "${JOBS_DIR}/${job_id}.paths"
else
mapfile -t paths < <(hb_default_profile_paths)
fi
if [[ ${#paths[@]} -eq 0 ]]; then
echo "No paths configured for job" >>"$log_file"
echo "RESULT=failed" >>"$summary_file"
rm -rf "$stage_root"
exit 1
fi
hb_prepare_staging "$stage_root" "${paths[@]}" >>"$log_file" 2>&1
local rc=1
case "${BACKEND:-}" in
local)
_sb_run_local "$stage_root" "$job_id" "$ts" "${LOCAL_DEST_DIR:-/var/lib/vz/dump}" >>"$log_file" 2>&1
rc=$?
;;
borg)
_sb_run_borg "$stage_root" "${job_id}-${ts}" >>"$log_file" 2>&1
rc=$?
;;
pbs)
_sb_run_pbs "$stage_root" "${PBS_BACKUP_ID:-hostcfg-$(hostname)}" "$(date +%s)" >>"$log_file" 2>&1
rc=$?
;;
*)
echo "Unknown backend: ${BACKEND:-}" >>"$log_file"
rc=1
;;
esac
rm -rf "$stage_root"
if [[ $rc -eq 0 ]]; then
echo "RESULT=ok" >>"$summary_file"
echo "LOG_FILE=${log_file}" >>"$summary_file"
echo "=== Job finished OK at $(date -Iseconds) ===" >>"$log_file"
exit 0
else
echo "RESULT=failed" >>"$summary_file"
echo "LOG_FILE=${log_file}" >>"$summary_file"
echo "=== Job finished with errors at $(date -Iseconds) ===" >>"$log_file"
exit 1
fi
}
main "$@"
@@ -0,0 +1,284 @@
#!/bin/bash
# ==========================================================
# ProxMenux - Backup/Restore Test Matrix (non-destructive)
# ==========================================================
set -u
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
RUNNER="${SCRIPT_DIR}/run_scheduled_backup.sh"
APPLY_ONBOOT="${SCRIPT_DIR}/apply_pending_restore.sh"
HOST_SCRIPT="${SCRIPT_DIR}/backup_host.sh"
LIB_SCRIPT="${SCRIPT_DIR}/lib_host_backup_common.sh"
SCHED_SCRIPT="${SCRIPT_DIR}/backup_scheduler.sh"
KEEP_TMP=0
if [[ "${1:-}" == "--keep-tmp" ]]; then
KEEP_TMP=1
fi
TMP_ROOT="$(mktemp -d /tmp/proxmenux-brtest.XXXXXX)"
REPORT_FILE="/tmp/proxmenux-backup-restore-test-$(date +%Y%m%d_%H%M%S).log"
PASS=0
FAIL=0
SKIP=0
log() {
echo "$*" | tee -a "$REPORT_FILE"
}
pass() {
PASS=$((PASS + 1))
log "[PASS] $*"
}
fail() {
FAIL=$((FAIL + 1))
log "[FAIL] $*"
}
skip() {
SKIP=$((SKIP + 1))
log "[SKIP] $*"
}
cleanup() {
if [[ "$KEEP_TMP" -eq 0 ]]; then
rm -rf "$TMP_ROOT"
else
log "[INFO] Temp root preserved: $TMP_ROOT"
fi
}
trap cleanup EXIT
assert_file_contains() {
local file="$1"
local needle="$2"
if [[ -f "$file" ]] && grep -q "$needle" "$file"; then
return 0
fi
return 1
}
run_cmd_expect_ok() {
local desc="$1"
shift
if "$@" >>"$REPORT_FILE" 2>&1; then
pass "$desc"
return 0
fi
fail "$desc"
return 1
}
run_cmd_expect_fail() {
local desc="$1"
shift
if "$@" >>"$REPORT_FILE" 2>&1; then
fail "$desc"
return 1
fi
pass "$desc"
return 0
}
syntax_tests() {
log "\n=== Syntax checks ==="
run_cmd_expect_ok "bash -n backup_host.sh" bash -n "$HOST_SCRIPT"
run_cmd_expect_ok "bash -n lib_host_backup_common.sh" bash -n "$LIB_SCRIPT"
run_cmd_expect_ok "bash -n backup_scheduler.sh" bash -n "$SCHED_SCRIPT"
run_cmd_expect_ok "bash -n run_scheduled_backup.sh" bash -n "$RUNNER"
run_cmd_expect_ok "bash -n apply_pending_restore.sh" bash -n "$APPLY_ONBOOT"
}
scheduler_e2e_tests() {
log "\n=== Scheduler E2E (sandbox) ==="
if ! help mapfile >/dev/null 2>&1; then
skip "Scheduler E2E skipped: current bash does not provide mapfile (requires bash >= 4)."
return
fi
local jobs_dir="$TMP_ROOT/backup-jobs"
local logs_dir="$TMP_ROOT/backup-jobs-logs"
local lock_dir="$TMP_ROOT/locks"
local archives_dir="$TMP_ROOT/archives"
mkdir -p "$jobs_dir" "$logs_dir" "$lock_dir" "$archives_dir"
cat > "$jobs_dir/t1.env" <<EOJ
JOB_ID=t1
BACKEND=local
PROFILE_MODE=custom
LOCAL_DEST_DIR=${archives_dir}
LOCAL_ARCHIVE_EXT=tar.gz
KEEP_LAST=2
KEEP_HOURLY=0
KEEP_DAILY=0
KEEP_WEEKLY=0
KEEP_MONTHLY=0
KEEP_YEARLY=0
EOJ
cat > "$jobs_dir/t1.paths" <<EOP
/etc/hosts
/etc/resolv.conf
EOP
local i
for i in 1 2 3; do
if PMX_BACKUP_JOBS_DIR="$jobs_dir" PMX_BACKUP_LOG_DIR="$logs_dir" PMX_BACKUP_LOCK_DIR="$lock_dir" \
bash "$RUNNER" t1 >>"$REPORT_FILE" 2>&1; then
:
else
fail "Runner execution #$i for t1"
return
fi
sleep 1
done
local archive_count
archive_count="$(find "$archives_dir" -maxdepth 1 -type f -name 't1-*.tar.gz' | wc -l | tr -d ' ')"
if [[ "$archive_count" == "2" ]]; then
pass "Retention KEEP_LAST=2 keeps exactly 2 archives"
else
fail "Retention expected 2 archives, got $archive_count"
fi
if assert_file_contains "$logs_dir/t1-last.status" "RESULT=ok"; then
pass "t1-last.status reports RESULT=ok"
else
fail "t1-last.status does not report RESULT=ok"
fi
cat > "$jobs_dir/tbad.env" <<EOJ
JOB_ID=tbad
BACKEND=invalid
PROFILE_MODE=custom
KEEP_LAST=1
EOJ
echo "/etc/hosts" > "$jobs_dir/tbad.paths"
run_cmd_expect_fail "Invalid backend fails" \
env PMX_BACKUP_JOBS_DIR="$jobs_dir" PMX_BACKUP_LOG_DIR="$logs_dir" PMX_BACKUP_LOCK_DIR="$lock_dir" \
bash "$RUNNER" tbad
if assert_file_contains "$logs_dir/tbad-last.status" "RESULT=failed"; then
pass "tbad-last.status reports RESULT=failed"
else
fail "tbad-last.status does not report RESULT=failed"
fi
cat > "$jobs_dir/tempty.env" <<EOJ
JOB_ID=tempty
BACKEND=local
PROFILE_MODE=custom
LOCAL_DEST_DIR=${archives_dir}
LOCAL_ARCHIVE_EXT=tar.gz
KEEP_LAST=1
EOJ
: > "$jobs_dir/tempty.paths"
run_cmd_expect_fail "Empty paths fails" \
env PMX_BACKUP_JOBS_DIR="$jobs_dir" PMX_BACKUP_LOG_DIR="$logs_dir" PMX_BACKUP_LOCK_DIR="$lock_dir" \
bash "$RUNNER" tempty
if assert_file_contains "$logs_dir/tempty-last.status" "RESULT=failed"; then
pass "tempty-last.status reports RESULT=failed"
else
fail "tempty-last.status does not report RESULT=failed"
fi
}
pending_restore_tests() {
log "\n=== Pending restore E2E (sandbox) ==="
local pending_base="$TMP_ROOT/restore-pending"
local logs_dir="$TMP_ROOT/restore-logs"
local target_root="$TMP_ROOT/target"
local pre_backup_base="$TMP_ROOT/pre-restore"
local recovery_base="$TMP_ROOT/recovery"
mkdir -p "$pending_base/r1/rootfs/etc/pve" "$pending_base/r1/rootfs/etc/zfs" "$pending_base/r1/rootfs/etc" "$target_root/etc"
echo "new-value" > "$pending_base/r1/rootfs/etc/test.conf"
echo "cluster-data" > "$pending_base/r1/rootfs/etc/pve/cluster.cfg"
echo "zfs-data" > "$pending_base/r1/rootfs/etc/zfs/zpool.cache"
echo "old-value" > "$target_root/etc/test.conf"
cat > "$pending_base/r1/apply-on-boot.list" <<EOL
etc/test.conf
etc/pve/cluster.cfg
etc/zfs/zpool.cache
EOL
cat > "$pending_base/r1/plan.env" <<EOP
HB_RESTORE_INCLUDE_ZFS=0
EOP
ln -sfn "$pending_base/r1" "$pending_base/current"
if PMX_RESTORE_PENDING_BASE="$pending_base" PMX_RESTORE_LOG_DIR="$logs_dir" \
PMX_RESTORE_DEST_PREFIX="$target_root" PMX_RESTORE_PRE_BACKUP_BASE="$pre_backup_base" \
PMX_RESTORE_RECOVERY_BASE="$recovery_base" \
bash "$APPLY_ONBOOT" >>"$REPORT_FILE" 2>&1; then
pass "apply_pending_restore completes"
else
fail "apply_pending_restore completes"
return
fi
if assert_file_contains "$target_root/etc/test.conf" "new-value"; then
pass "Regular file restored into target prefix"
else
fail "Regular file was not restored"
fi
if [[ -e "$target_root/etc/pve/cluster.cfg" ]]; then
fail "Cluster file should not be restored live"
else
pass "Cluster file skipped from live restore"
fi
if find "$recovery_base" -type f -name cluster.cfg 2>/dev/null | grep -q .; then
pass "Cluster file extracted to recovery directory"
else
fail "Cluster file not found in recovery directory"
fi
if assert_file_contains "$pending_base/completed/r1/state" "completed"; then
pass "Pending restore state marked completed"
else
fail "Pending restore state not marked completed"
fi
if [[ -e "$pending_base/current" ]]; then
fail "current symlink should be removed"
else
pass "current symlink removed"
fi
}
main() {
log "ProxMenux backup/restore test matrix"
log "Report: $REPORT_FILE"
log "Temp root: $TMP_ROOT"
syntax_tests
scheduler_e2e_tests
pending_restore_tests
log "\n=== Summary ==="
log "PASS=$PASS"
log "FAIL=$FAIL"
log "SKIP=$SKIP"
if [[ "$FAIL" -eq 0 ]]; then
log "RESULT=OK"
exit 0
else
log "RESULT=FAILED"
exit 1
fi
}
main "$@"
-204
View File
@@ -1,204 +0,0 @@
#!/bin/bash
# ==========================================================
# ProxMenux - A menu-driven script for Proxmox VE management
# ==========================================================
# Author : MacRimi
# Copyright : (c) 2024 MacRimi
# License : (GPL-3.0) (https://github.com/MacRimi/ProxMenux/blob/main/LICENSE)
# Version : 1.1
# Last Updated: 17/08/2025
# ==========================================================
# Description:
# This script automates the process of enabling and configuring Intel Integrated GPU (iGPU) support in Proxmox VE LXC containers.
# Its goal is to simplify the configuration of hardware-accelerated graphical capabilities within containers, allowing for efficient
# use of Intel iGPUs for tasks such as transcoding, rendering, and accelerating graphics-intensive applications.
# ==========================================================
# Configuration ============================================
LOCAL_SCRIPTS="/usr/local/share/proxmenux/scripts"
BASE_DIR="/usr/local/share/proxmenux"
UTILS_FILE="$BASE_DIR/utils.sh"
VENV_PATH="/opt/googletrans-env"
if [[ -f "$UTILS_FILE" ]]; then
source "$UTILS_FILE"
fi
load_language
initialize_cache
# ==========================================================
select_container() {
CONTAINERS=$(pct list | awk 'NR>1 {print $1, $3}' | xargs -n2)
if [ -z "$CONTAINERS" ]; then
msg_error "$(translate 'No containers available in Proxmox.')"
exit 1
fi
CONTAINER_ID=$(whiptail --title "$(translate 'Select Container')" \
--menu "$(translate 'Select the LXC container:')" 20 70 10 $CONTAINERS 3>&1 1>&2 2>&3)
if [ -z "$CONTAINER_ID" ]; then
msg_error "$(translate 'No container selected. Exiting.')"
exit 1
fi
if ! pct list | awk 'NR>1 {print $1}' | grep -qw "$CONTAINER_ID"; then
msg_error "$(translate 'Container with ID') $CONTAINER_ID $(translate 'does not exist. Exiting.')"
exit 1
fi
msg_ok "$(translate 'Container selected:') $CONTAINER_ID"
}
validate_container_id() {
if [ -z "$CONTAINER_ID" ]; then
msg_error "$(translate 'Container ID not defined. Make sure to select a container first.')"
exit 1
fi
if pct status "$CONTAINER_ID" | grep -q "running"; then
msg_info "$(translate 'Stopping the container before applying configuration...')"
pct stop "$CONTAINER_ID"
msg_ok "$(translate 'Container stopped.')"
fi
}
configure_lxc_for_igpu() {
validate_container_id
CONFIG_FILE="/etc/pve/lxc/${CONTAINER_ID}.conf"
[[ -f "$CONFIG_FILE" ]] || { msg_error "$(translate 'Configuration file for container') $CONTAINER_ID $(translate 'not found.')"; exit 1; }
if [[ ! -d /dev/dri ]]; then
modprobe i915 2>/dev/null || true
for _ in {1..5}; do
[[ -d /dev/dri ]] && break
sleep 1
done
fi
CT_TYPE=$(pct config "$CONTAINER_ID" | awk '/^unprivileged:/ {print $2}')
[[ -z "$CT_TYPE" ]] && CT_TYPE="0"
msg_info "$(translate 'Configuring Intel iGPU passthrough for container...')"
for rn in /dev/dri/renderD*; do
[[ -e "$rn" ]] || continue
chmod 660 "$rn" 2>/dev/null || true
chgrp render "$rn" 2>/dev/null || true
done
mapfile -t RENDER_NODES < <(find /dev/dri -maxdepth 1 -type c -name 'renderD*' 2>/dev/null || true)
mapfile -t CARD_NODES < <(find /dev/dri -maxdepth 1 -type c -name 'card*' 2>/dev/null || true)
FB_NODE=""
[[ -e /dev/fb0 ]] && FB_NODE="/dev/fb0"
if [[ ${#RENDER_NODES[@]} -eq 0 && ${#CARD_NODES[@]} -eq 0 && -z "$FB_NODE" ]]; then
msg_warn "$(translate 'No VA-API devices found on host (/dev/dri*, /dev/fb0). Is i915 loaded?')"
return 0
fi
if grep -q '^features:' "$CONFIG_FILE"; then
grep -Eq '^features:.*(^|,)\s*nesting=1(\s|,|$)' "$CONFIG_FILE" || sed -i 's/^features:\s*/&nesting=1, /' "$CONFIG_FILE"
else
echo "features: nesting=1" >> "$CONFIG_FILE"
fi
if [[ "$CT_TYPE" == "0" ]]; then
sed -i '/^lxc\.cgroup2\.devices\.allow:\s*c\s*226:/d' "$CONFIG_FILE"
sed -i '\|^lxc\.mount\.entry:\s*/dev/dri|d' "$CONFIG_FILE"
sed -i '\|^lxc\.mount\.entry:\s*/dev/fb0|d' "$CONFIG_FILE"
echo "lxc.cgroup2.devices.allow: c 226:* rwm" >> "$CONFIG_FILE"
echo "lxc.mount.entry: /dev/dri dev/dri none bind,optional,create=dir" >> "$CONFIG_FILE"
[[ -n "$FB_NODE" ]] && echo "lxc.mount.entry: /dev/fb0 dev/fb0 none bind,optional,create=file" >> "$CONFIG_FILE"
else
sed -i '/^dev[0-9]\+:/d' "$CONFIG_FILE"
idx=0
for c in "${CARD_NODES[@]}"; do
echo "dev${idx}: $c,gid=44" >> "$CONFIG_FILE"
idx=$((idx+1))
done
for r in "${RENDER_NODES[@]}"; do
echo "dev${idx}: $r,gid=104" >> "$CONFIG_FILE"
idx=$((idx+1))
done
fi
msg_ok "$(translate 'iGPU configuration added to container') $CONTAINER_ID."
}
install_igpu_in_container() {
msg_info2 "$(translate 'Installing iGPU drivers inside the container...')"
tput sc
LOG_FILE=$(mktemp)
pct start "$CONTAINER_ID" >/dev/null 2>&1
script -q -c "pct exec \"$CONTAINER_ID\" -- bash -c '
set -e
getent group video >/dev/null || groupadd -g 44 video
getent group render >/dev/null || groupadd -g 104 render
usermod -aG video,render root || true
apt-get update >/dev/null 2>&1
apt-get install -y va-driver-all ocl-icd-libopencl1 intel-opencl-icd vainfo intel-gpu-tools
chgrp video /dev/dri 2>/dev/null || true
chmod 755 /dev/dri 2>/dev/null || true
'" "$LOG_FILE"
if [ $? -eq 0 ]; then
tput rc
tput ed
rm -f "$LOG_FILE"
msg_ok "$(translate 'iGPU drivers installed inside the container.')"
else
tput rc
tput ed
msg_error "$(translate 'Failed to install iGPU drivers inside the container.')"
cat "$LOG_FILE"
rm -f "$LOG_FILE"
exit 1
fi
}
select_container
show_proxmenux_logo
msg_title "$(translate "Add HW iGPU acceleration to an LXC")"
configure_lxc_for_igpu
install_igpu_in_container
msg_success "$(translate 'iGPU configuration completed in container') $CONTAINER_ID."
echo -e
msg_success "$(translate "Press Enter to return to menu...")"
read -r
-368
View File
@@ -1,368 +0,0 @@
#!/bin/bash
# ==========================================================
# ProxMenux - A menu-driven script for Proxmox VE management
# ==========================================================
# Author : MacRimi
# Copyright : (c) 2024 MacRimi
# License : (GPL-3.0) (https://github.com/MacRimi/ProxMenux/blob/main/LICENSE)
# Version : 1.0
# Last Updated: 28/01/2025
# ==========================================================
# Description:
# This script allows users to assign physical disks to existing
# Proxmox virtual machines (VMs) through an interactive menu.
# - Detects the system disk and excludes it from selection.
# - Lists all available VMs for the user to choose from.
# - Identifies and displays unassigned physical disks.
# - Allows the user to select multiple disks and attach them to a VM.
# - Supports interface types: SATA, SCSI, VirtIO, and IDE.
# - Ensures that disks are not already assigned to active VMs.
# - Warns about disk sharing between multiple VMs to avoid data corruption.
# - Configures the selected disks for the VM and verifies the assignment.
#
# The goal of this script is to simplify the process of assigning
# physical disks to Proxmox VMs, reducing manual configurations
# and preventing potential errors.
# ==========================================================
# Configuration ============================================
LOCAL_SCRIPTS="/usr/local/share/proxmenux/scripts"
BASE_DIR="/usr/local/share/proxmenux"
UTILS_FILE="$BASE_DIR/utils.sh"
VENV_PATH="/opt/googletrans-env"
if [[ -f "$UTILS_FILE" ]]; then
source "$UTILS_FILE"
fi
load_language
initialize_cache
show_proxmenux_logo
# ==========================================================
get_disk_info() {
local disk=$1
MODEL=$(lsblk -dn -o MODEL "$disk" | xargs)
SIZE=$(lsblk -dn -o SIZE "$disk" | xargs)
echo "$MODEL" "$SIZE"
}
VM_LIST=$(qm list | awk 'NR>1 {print $1, $2}')
if [ -z "$VM_LIST" ]; then
whiptail --title "$(translate "Error")" --msgbox "$(translate "No VMs available in the system.")" 8 40
exit 1
fi
VMID=$(whiptail --title "$(translate "Select VM")" --menu "$(translate "Select the VM to which you want to add disks:")" 15 60 8 $VM_LIST 3>&1 1>&2 2>&3)
if [ -z "$VMID" ]; then
whiptail --title "$(translate "Error")" --msgbox "$(translate "No VM was selected.")" 8 40
exit 1
fi
VMID=$(echo "$VMID" | tr -d '"')
msg_ok "$(translate "VM selected successfully.")"
VM_STATUS=$(qm status "$VMID" | awk '{print $2}')
if [ "$VM_STATUS" == "running" ]; then
whiptail --title "$(translate "Warning")" --msgbox "$(translate "The VM is powered on. Turn it off before adding disks.")" 12 60
exit 1
fi
##########################################
msg_info "$(translate "Detecting available disks...")"
USED_DISKS=$(lsblk -n -o PKNAME,TYPE | grep 'lvm' | awk '{print "/dev/" $1}')
MOUNTED_DISKS=$(lsblk -ln -o NAME,MOUNTPOINT | awk '$2!="" {print "/dev/" $1}')
ZFS_DISKS=""
ZFS_RAW=$(zpool list -v -H 2>/dev/null | awk '{print $1}' | grep -v '^NAME$' | grep -v '^-' | grep -v '^mirror')
for entry in $ZFS_RAW; do
path=""
if [[ "$entry" == wwn-* || "$entry" == ata-* ]]; then
if [ -e "/dev/disk/by-id/$entry" ]; then
path=$(readlink -f "/dev/disk/by-id/$entry")
fi
elif [[ "$entry" == /dev/* ]]; then
path="$entry"
fi
if [ -n "$path" ]; then
base_disk=$(lsblk -no PKNAME "$path" 2>/dev/null)
if [ -n "$base_disk" ]; then
ZFS_DISKS+="/dev/$base_disk"$'\n'
fi
fi
done
ZFS_DISKS=$(echo "$ZFS_DISKS" | sort -u)
is_disk_in_use() {
local disk="$1"
while read -r part fstype; do
case "$fstype" in
zfs_member|linux_raid_member)
return 0 ;;
esac
if echo "$MOUNTED_DISKS" | grep -q "/dev/$part"; then
return 0
fi
done < <(lsblk -ln -o NAME,FSTYPE "$disk" | tail -n +2)
if echo "$USED_DISKS" | grep -q "$disk" || echo "$ZFS_DISKS" | grep -q "$disk"; then
return 0
fi
return 1
}
FREE_DISKS=()
LVM_DEVICES=$(pvs --noheadings -o pv_name 2> >(grep -v 'File descriptor .* leaked') | xargs -n1 readlink -f | sort -u)
RAID_ACTIVE=$(grep -Po 'md\d+\s*:\s*active\s+raid[0-9]+' /proc/mdstat | awk '{print $1}' | sort -u)
while read -r DISK; do
[[ "$DISK" =~ /dev/zd ]] && continue
INFO=($(get_disk_info "$DISK"))
MODEL="${INFO[@]::${#INFO[@]}-1}"
SIZE="${INFO[-1]}"
LABEL=""
SHOW_DISK=true
IS_MOUNTED=false
IS_RAID=false
IS_ZFS=false
IS_LVM=false
while read -r part fstype; do
[[ "$fstype" == "zfs_member" ]] && IS_ZFS=true
[[ "$fstype" == "linux_raid_member" ]] && IS_RAID=true
[[ "$fstype" == "LVM2_member" ]] && IS_LVM=true
if grep -q "/dev/$part" <<< "$MOUNTED_DISKS"; then
IS_MOUNTED=true
fi
done < <(lsblk -ln -o NAME,FSTYPE "$DISK" | tail -n +2)
REAL_PATH=$(readlink -f "$DISK")
if echo "$LVM_DEVICES" | grep -qFx "$REAL_PATH"; then
IS_MOUNTED=true
fi
USED_BY=""
REAL_PATH=$(readlink -f "$DISK")
CONFIG_DATA=$(grep -vE '^\s*#' /etc/pve/qemu-server/*.conf /etc/pve/lxc/*.conf 2>/dev/null)
if grep -Fq "$REAL_PATH" <<< "$CONFIG_DATA"; then
USED_BY="$(translate "In use")"
else
for SYMLINK in /dev/disk/by-id/*; do
if [[ "$(readlink -f "$SYMLINK")" == "$REAL_PATH" ]]; then
if grep -Fq "$SYMLINK" <<< "$CONFIG_DATA"; then
USED_BY="$(translate "In use")"
break
fi
fi
done
fi
if $IS_RAID && grep -q "$DISK" <<< "$(cat /proc/mdstat)"; then
if grep -q "active raid" /proc/mdstat; then
SHOW_DISK=false
fi
fi
if $IS_ZFS; then
SHOW_DISK=false
fi
if $IS_MOUNTED; then
SHOW_DISK=false
fi
if qm config "$VMID" | grep -vE '^\s*#|^description:' | grep -q "$DISK"; then
SHOW_DISK=false
fi
if $SHOW_DISK; then
[[ -n "$USED_BY" ]] && LABEL+=" [$USED_BY]"
[[ "$IS_RAID" == true ]] && LABEL+=" ⚠ RAID"
[[ "$IS_LVM" == true ]] && LABEL+=" ⚠ LVM"
[[ "$IS_ZFS" == true ]] && LABEL+=" ⚠ ZFS"
DESCRIPTION=$(printf "%-30s %10s%s" "$MODEL" "$SIZE" "$LABEL")
FREE_DISKS+=("$DISK" "$DESCRIPTION" "OFF")
fi
done < <(lsblk -dn -e 7,11 -o PATH)
if [ "${#FREE_DISKS[@]}" -eq 0 ]; then
cleanup
whiptail --title "$(translate "Error")" --msgbox "$(translate "No disks available for this VM.")" 8 40
clear
exit 1
fi
msg_ok "$(translate "Available disks detected.")"
######################################################
MAX_WIDTH=$(printf "%s\n" "${FREE_DISKS[@]}" | awk '{print length}' | sort -nr | head -n1)
TOTAL_WIDTH=$((MAX_WIDTH + 20))
if [ $TOTAL_WIDTH -lt 50 ]; then
TOTAL_WIDTH=50
fi
SELECTED=$(whiptail --title "$(translate "Select Disks")" --checklist \
"$(translate "Select the disks you want to add:")" 20 $TOTAL_WIDTH 10 "${FREE_DISKS[@]}" 3>&1 1>&2 2>&3)
if [ -z "$SELECTED" ]; then
whiptail --title "$(translate "Error")" --msgbox "$(translate "No disks were selected.")" 10 64
clear
exit 1
fi
msg_ok "$(translate "Disks selected successfully.")"
INTERFACE=$(whiptail --title "$(translate "Interface Type")" --menu "$(translate "Select the interface type for all disks:")" 15 40 4 \
"sata" "$(translate "Add as SATA")" \
"scsi" "$(translate "Add as SCSI")" \
"virtio" "$(translate "Add as VirtIO")" \
"ide" "$(translate "Add as IDE")" 3>&1 1>&2 2>&3)
if [ -z "$INTERFACE" ]; then
whiptail --title "$(translate "Error")" --msgbox "$(translate "No interface type was selected for the disks.")" 8 40
clear
exit 1
fi
msg_ok "$(translate "Interface type selected: $INTERFACE")"
DISKS_ADDED=0
ERROR_MESSAGES=""
SUCCESS_MESSAGES=""
msg_info "$(translate "Processing selected disks...")"
for DISK in $SELECTED; do
DISK=$(echo "$DISK" | tr -d '"')
DISK_INFO=$(get_disk_info "$DISK")
ASSIGNED_TO=""
RUNNING_VMS=""
RUNNING_CTS=""
while read -r VM_ID VM_NAME; do
if [[ "$VM_ID" =~ ^[0-9]+$ ]] && qm config "$VM_ID" | grep -q "$DISK"; then
ASSIGNED_TO+="VM $VM_ID $VM_NAME\n"
VM_STATUS=$(qm status "$VM_ID" | awk '{print $2}')
if [ "$VM_STATUS" == "running" ]; then
RUNNING_VMS+="VM $VM_ID $VM_NAME\n"
fi
fi
done < <(qm list | awk 'NR>1 {print $1, $2}')
while read -r CT_ID CT_NAME; do
if [[ "$CT_ID" =~ ^[0-9]+$ ]] && pct config "$CT_ID" | grep -q "$DISK"; then
ASSIGNED_TO+="CT $CT_ID $CT_NAME\n"
CT_STATUS=$(pct status "$CT_ID" | awk '{print $2}')
if [ "$CT_STATUS" == "running" ]; then
RUNNING_CTS+="CT $CT_ID $CT_NAME\n"
fi
fi
done < <(pct list | awk 'NR>1 {print $1, $2}')
if [ -n "$RUNNING_VMS" ] || [ -n "$RUNNING_CTS" ]; then
ERROR_MESSAGES+="$(translate "The disk") $DISK_INFO $(translate "is currently in use by the following running VM(s) or CT(s):")\\n$RUNNING_VMS$RUNNING_CTS\\n\\n$(translate "You cannot add this disk while the VM or CT is running.")\\n$(translate "Please shut it down first and run this script again to add the disk.")\\n\\n"
continue
fi
if [ -n "$ASSIGNED_TO" ]; then
cleanup
whiptail --title "$(translate "Disk Already Assigned")" --yesno "$(translate "The disk") $DISK_INFO $(translate "is already assigned to the following VM(s) or CT(s):")\\n$ASSIGNED_TO\\n\\n$(translate "Do you want to continue anyway?")" 15 70
if [ $? -ne 0 ]; then
sleep 1
exec "$0"
fi
fi
INDEX=0
while qm config "$VMID" | grep -q "${INTERFACE}${INDEX}"; do
((INDEX++))
done
RESULT=$(qm set "$VMID" -${INTERFACE}${INDEX} "$DISK" 2>&1)
if [ $? -eq 0 ]; then
MESSAGE="$(translate "The disk") $DISK_INFO $(translate "has been successfully added to VM") $VMID."
if [ -n "$ASSIGNED_TO" ]; then
MESSAGE+="\\n\\n$(translate "WARNING: This disk is also assigned to the following VM(s):")\\n$ASSIGNED_TO"
MESSAGE+="\\n$(translate "Make sure not to start VMs that share this disk at the same time to avoid data corruption.")"
fi
SUCCESS_MESSAGES+="$MESSAGE\\n\\n"
((DISKS_ADDED++))
else
ERROR_MESSAGES+="$(translate "Could not add disk") $DISK_INFO $(translate "to VM") $VMID.\\n$(translate "Error:") $RESULT\\n\\n"
fi
done
msg_ok "$(translate "Disk processing completed.")"
if [ -n "$SUCCESS_MESSAGES" ]; then
MSG_LINES=$(echo "$SUCCESS_MESSAGES" | wc -l)
whiptail --title "$(translate "Successful Operations")" --msgbox "$SUCCESS_MESSAGES" 16 70
fi
if [ -n "$ERROR_MESSAGES" ]; then
whiptail --title "$(translate "Warnings and Errors")" --msgbox "$ERROR_MESSAGES" 16 70
fi
exit 0
-537
View File
@@ -1,537 +0,0 @@
#!/bin/bash
# ==========================================================
# ProxMenux - A menu-driven script for Proxmox VE management
# ==========================================================
# Author : MacRimi
# Copyright : (c) 2024 MacRimi
# License : (GPL-3.0) (https://github.com/MacRimi/ProxMenux/blob/main/LICENSE)
# Version : 1.0
# Last Updated: 28/01/2025
# ==========================================================
# Description:
# This script allows users to assign physical disks to existing
# Proxmox containers (CTs) through an interactive menu.
# - Detects the system disk and excludes it from selection.
# - Lists all available CTs for the user to choose from.
# - Identifies and displays unassigned physical disks.
# - Allows the user to select multiple disks and attach them to a CT.
# - Configures the selected disks for the CT and verifies the assignment.
# ==========================================================
# Configuration ============================================
LOCAL_SCRIPTS="/usr/local/share/proxmenux/scripts"
BASE_DIR="/usr/local/share/proxmenux"
UTILS_FILE="$BASE_DIR/utils.sh"
VENV_PATH="/opt/googletrans-env"
if [[ -f "$UTILS_FILE" ]]; then
source "$UTILS_FILE"
fi
load_language
initialize_cache
# ==========================================================
get_disk_info() {
local disk=$1
MODEL=$(lsblk -dn -o MODEL "$disk" | xargs)
SIZE=$(lsblk -dn -o SIZE "$disk" | xargs)
echo "$MODEL" "$SIZE"
}
CT_LIST=$(pct list | awk 'NR>1 {print $1, $3}')
if [ -z "$CT_LIST" ]; then
whiptail --title "$(translate "Error")" --msgbox "$(translate "No CTs available in the system.")" 8 40
exit 1
fi
CTID=$(whiptail --title "$(translate "Select CT")" --menu "$(translate "Select the CT to which you want to add disks:")" 15 60 8 $CT_LIST 3>&1 1>&2 2>&3)
if [ -z "$CTID" ]; then
whiptail --title "$(translate "Error")" --msgbox "$(translate "No CT was selected.")" 8 40
exit 1
fi
CTID=$(echo "$CTID" | tr -d '"')
msg_ok "$(translate "CT selected successfully.")"
CT_STATUS=$(pct status "$CTID" | awk '{print $2}')
if [ "$CT_STATUS" != "running" ]; then
msg_info "$(translate "Starting CT") $CTID..."
pct start "$CTID"
sleep 2
if [ "$(pct status "$CTID" | awk '{print $2}')" != "running" ]; then
msg_error "$(translate "Failed to start the CT.")"
exit 1
fi
msg_ok "$(translate "CT started successfully.")"
fi
CONF_FILE="/etc/pve/lxc/$CTID.conf"
if grep -q '^unprivileged: 1' "$CONF_FILE"; then
if whiptail --title "$(translate "Privileged Container")" \
--yesno "$(translate "The selected container is unprivileged. A privileged container is required for direct device passthrough.")\\n\\n$(translate "Do you want to convert it to a privileged container now?")" 12 70; then
msg_info "$(translate "Stopping container") $CTID..."
pct shutdown "$CTID" &
for i in {1..10}; do
sleep 1
if [ "$(pct status "$CTID" | awk '{print $2}')" != "running" ]; then
break
fi
done
if [ "$(pct status "$CTID" | awk '{print $2}')" == "running" ]; then
msg_error "$(translate "Failed to stop the container.")"
exit 1
fi
msg_ok "$(translate "Container stopped.")"
cp "$CONF_FILE" "$CONF_FILE.bak"
sed -i '/^unprivileged: 1/d' "$CONF_FILE"
echo "unprivileged: 0" >> "$CONF_FILE"
msg_ok "$(translate "Container successfully converted to privileged.")"
msg_info "$(translate "Starting container") $CTID..."
pct start "$CTID"
sleep 2
if [ "$(pct status "$CTID" | awk '{print $2}')" != "running" ]; then
msg_error "$(translate "Failed to start the container.")"
exit 1
fi
msg_ok "$(translate "Container started successfully.")"
else
whiptail --title "$(translate "Aborted")" \
--msgbox "$(translate "Operation cancelled. Cannot continue with an unprivileged container.")" 10 60
exit 1
fi
fi
##########################################
msg_info "$(translate "Detecting available disks...")"
USED_DISKS=$(lsblk -n -o PKNAME,TYPE | grep 'lvm' | awk '{print "/dev/" $1}')
MOUNTED_DISKS=$(lsblk -ln -o NAME,MOUNTPOINT | awk '$2!="" {print "/dev/" $1}')
ZFS_DISKS=""
ZFS_RAW=$(zpool list -v -H 2>/dev/null | awk '{print $1}' | grep -v '^NAME$' | grep -v '^-' | grep -v '^mirror')
for entry in $ZFS_RAW; do
path=""
if [[ "$entry" == wwn-* || "$entry" == ata-* ]]; then
if [ -e "/dev/disk/by-id/$entry" ]; then
path=$(readlink -f "/dev/disk/by-id/$entry")
fi
elif [[ "$entry" == /dev/* ]]; then
path="$entry"
fi
if [ -n "$path" ]; then
base_disk=$(lsblk -no PKNAME "$path" 2>/dev/null)
if [ -n "$base_disk" ]; then
ZFS_DISKS+="/dev/$base_disk"$'\n'
fi
fi
done
ZFS_DISKS=$(echo "$ZFS_DISKS" | sort -u)
is_disk_in_use() {
local disk="$1"
while read -r part fstype; do
case "$fstype" in
zfs_member|linux_raid_member)
return 0 ;;
esac
if echo "$MOUNTED_DISKS" | grep -q "/dev/$part"; then
return 0
fi
done < <(lsblk -ln -o NAME,FSTYPE "$disk" | tail -n +2)
if echo "$USED_DISKS" | grep -q "$disk" || echo "$ZFS_DISKS" | grep -q "$disk"; then
return 0
fi
return 1
}
FREE_DISKS=()
LVM_DEVICES=$(pvs --noheadings -o pv_name 2> >(grep -v 'File descriptor .* leaked') | xargs -r -n1 readlink -f | sort -u)
if [[ -n "$LVM_DEVICES" ]] && echo "$LVM_DEVICES" | grep -qFx "$REAL_PATH"; then
IS_MOUNTED=true
fi
RAID_ACTIVE=$(grep -Po 'md\d+\s*:\s*active\s+raid[0-9]+' /proc/mdstat | awk '{print $1}' | sort -u)
while read -r DISK; do
[[ "$DISK" =~ /dev/zd ]] && continue
INFO=($(get_disk_info "$DISK"))
MODEL="${INFO[@]::${#INFO[@]}-1}"
SIZE="${INFO[-1]}"
LABEL=""
SHOW_DISK=true
IS_MOUNTED=false
IS_RAID=false
IS_ZFS=false
IS_LVM=false
while read -r part fstype; do
[[ "$fstype" == "zfs_member" ]] && IS_ZFS=true
[[ "$fstype" == "linux_raid_member" ]] && IS_RAID=true
[[ "$fstype" == "LVM2_member" ]] && IS_LVM=true
if grep -q "/dev/$part" <<< "$MOUNTED_DISKS"; then
IS_MOUNTED=true
fi
done < <(lsblk -ln -o NAME,FSTYPE "$DISK" | tail -n +2)
REAL_PATH=$(readlink -f "$DISK")
if echo "$LVM_DEVICES" | grep -qFx "$REAL_PATH"; then
IS_MOUNTED=true
fi
USED_BY=""
REAL_PATH=$(readlink -f "$DISK")
CONFIG_DATA=$(grep -vE '^\s*#' /etc/pve/qemu-server/*.conf /etc/pve/lxc/*.conf 2>/dev/null)
if grep -Fq "$REAL_PATH" <<< "$CONFIG_DATA"; then
USED_BY="$(translate "In use")"
else
for SYMLINK in /dev/disk/by-id/*; do
if [[ "$(readlink -f "$SYMLINK")" == "$REAL_PATH" ]]; then
if grep -Fq "$SYMLINK" <<< "$CONFIG_DATA"; then
USED_BY="$(translate "In use")"
break
fi
fi
done
fi
if $IS_RAID && grep -q "$DISK" <<< "$(cat /proc/mdstat)"; then
if grep -q "active raid" /proc/mdstat; then
SHOW_DISK=false
fi
fi
if $IS_ZFS; then
SHOW_DISK=false
fi
if $IS_MOUNTED; then
SHOW_DISK=false
fi
if pct config "$CTID" | grep -vE '^\s*#|^description:' | grep -q "$DISK"; then
SHOW_DISK=false
fi
if $SHOW_DISK; then
[[ -n "$USED_BY" ]] && LABEL+=" [$USED_BY]"
[[ "$IS_RAID" == true ]] && LABEL+=" ⚠ RAID"
[[ "$IS_LVM" == true ]] && LABEL+=" ⚠ LVM"
[[ "$IS_ZFS" == true ]] && LABEL+=" ⚠ ZFS"
DESCRIPTION=$(printf "%-30s %10s%s" "$MODEL" "$SIZE" "$LABEL")
FREE_DISKS+=("$DISK" "$DESCRIPTION" "OFF")
fi
done < <(lsblk -dn -e 7,11 -o PATH)
if [ "${#FREE_DISKS[@]}" -eq 0 ]; then
cleanup
whiptail --title "$(translate "Error")" --msgbox "$(translate "No disks available for this CT.")" 8 40
clear
exit 1
fi
msg_ok "$(translate "Available disks detected.")"
######################################################
MAX_WIDTH=$(printf "%s\n" "${FREE_DISKS[@]}" | awk '{print length}' | sort -nr | head -n1)
TOTAL_WIDTH=$((MAX_WIDTH + 20))
if [ $TOTAL_WIDTH -lt 50 ]; then
TOTAL_WIDTH=50
fi
SELECTED=$(whiptail --title "$(translate "Select Disks")" --radiolist \
"$(translate "Select the disks you want to add:")" 20 $TOTAL_WIDTH 10 "${FREE_DISKS[@]}" 3>&1 1>&2 2>&3)
if [ -z "$SELECTED" ]; then
whiptail --title "$(translate "Error")" --msgbox "$(translate "No disks were selected.")" 10 64
clear
exit 1
fi
msg_ok "$(translate "Disks selected successfully.")"
DISKS_ADDED=0
ERROR_MESSAGES=""
SUCCESS_MESSAGES=""
msg_info "$(translate "Processing selected disks...")"
for DISK in $SELECTED; do
DISK=$(echo "$DISK" | tr -d '"')
DISK_INFO=$(get_disk_info "$DISK")
ASSIGNED_TO=""
RUNNING_CTS=""
RUNNING_VMS=""
# Comprobar CTs
while read -r CT_ID CT_NAME; do
if [[ "$CT_ID" =~ ^[0-9]+$ ]] && pct config "$CT_ID" | grep -q "$DISK"; then
ASSIGNED_TO+="CT $CT_ID $CT_NAME\n"
CT_STATUS=$(pct status "$CT_ID" | awk '{print $2}')
if [ "$CT_STATUS" == "running" ]; then
RUNNING_CTS+="CT $CT_ID $CT_NAME\n"
fi
fi
done < <(pct list | awk 'NR>1 {print $1, $3}')
# Comprobar VMs
while read -r VM_ID VM_NAME; do
if [[ "$VM_ID" =~ ^[0-9]+$ ]] && qm config "$VM_ID" | grep -q "$DISK"; then
ASSIGNED_TO+="VM $VM_ID $VM_NAME\n"
VM_STATUS=$(qm status "$VM_ID" | awk '{print $2}')
if [ "$VM_STATUS" == "running" ]; then
RUNNING_VMS+="VM $VM_ID $VM_NAME\n"
fi
fi
done < <(qm list | awk 'NR>1 {print $1, $2}')
if [ -n "$RUNNING_CTS" ] || [ -n "$RUNNING_VMS" ]; then
ERROR_MESSAGES+="$(translate "The disk") $DISK_INFO $(translate "is in use by the following running VM(s) or CT(s):")\\n$RUNNING_CTS$RUNNING_VMS\\n\\n"
continue
fi
if [ -n "$ASSIGNED_TO" ]; then
cleanup
whiptail --title "$(translate "Disk Already Assigned")" --yesno "$(translate "The disk") $DISK_INFO $(translate "is already assigned to the following VM(s) or CT(s):")\\n$ASSIGNED_TO\\n\\n$(translate "Do you want to continue anyway?")" 15 70
if [ $? -ne 0 ]; then
sleep 1
exec "$0"
fi
fi
cleanup
if lsblk "$DISK" | grep -q "raid" || grep -q "${DISK##*/}" /proc/mdstat; then
whiptail --title "$(translate "RAID Detected")" --msgbox "$(translate "The disk") $DISK_INFO $(translate "appears to be part of a") RAID. $(translate "For security reasons, the system cannot format it.")\\n\\n$(translate "If you are sure you want to use it, please remove the") RAID metadata $(translate "or format it manually using external tools.")\\n\\n$(translate "After that, run this script again to add it.")" 18 70
exit
fi
MOUNT_POINT=$(whiptail --title "$(translate "Mount Point")" --inputbox "$(translate "Enter the mount point for the disk (e.g., /mnt/disk_passthrough):")" 10 60 "/mnt/disk_passthrough" 3>&1 1>&2 2>&3)
if [ -z "$MOUNT_POINT" ]; then
whiptail --title "$(translate "Error")" --msgbox "$(translate "No mount point was specified.")" 8 40
continue
fi
msg_ok "$(translate "Mount point specified: $MOUNT_POINT")"
PARTITION=$(lsblk -rno NAME "$DISK" | awk -v disk="$(basename "$DISK")" '$1 != disk {print $1; exit}')
SKIP_FORMAT=false
if [ -n "$PARTITION" ]; then
PARTITION="/dev/$PARTITION"
CURRENT_FS=$(lsblk -no FSTYPE "$PARTITION" | xargs)
if [[ "$CURRENT_FS" == "ext4" || "$CURRENT_FS" == "xfs" || "$CURRENT_FS" == "btrfs" ]]; then
SKIP_FORMAT=true
msg_ok "$(translate "Detected existing filesystem") $CURRENT_FS $(translate "on") $PARTITION."
else
whiptail --title "$(translate "Unsupported Filesystem")" --yesno "$(translate "The partition") $PARTITION $(translate "has an unsupported filesystem ($CURRENT_FS).\\nDo you want to format it?")" 10 70
if [ $? -ne 0 ]; then
continue
fi
fi
else
CURRENT_FS=$(lsblk -no FSTYPE "$DISK" | xargs)
if [[ "$CURRENT_FS" == "ext4" || "$CURRENT_FS" == "xfs" || "$CURRENT_FS" == "btrfs" ]]; then
SKIP_FORMAT=true
PARTITION="$DISK"
msg_ok "$(translate "Detected filesystem") $CURRENT_FS $(translate "directly on disk") $DISK.)"
else
whiptail --title "$(translate "No Valid Partitions")" --yesno "$(translate "The disk has no partitions and no valid filesystem. Do you want to create a new partition and format it?")" 10 70
if [ $? -ne 0 ]; then
continue
fi
echo -e "$(translate "Creating partition table and partition...")"
parted -s "$DISK" mklabel gpt
parted -s "$DISK" mkpart primary 0% 100%
sleep 2
partprobe "$DISK"
sleep 2
PARTITION=$(lsblk -rno NAME "$DISK" | awk -v disk="$(basename "$DISK")" '$1 != disk {print $1; exit}')
if [ -n "$PARTITION" ]; then
PARTITION="/dev/$PARTITION"
else
whiptail --title "$(translate "Partition Error")" --msgbox "$(translate "Failed to create partition on disk") $DISK_INFO." 8 70
continue
fi
fi
fi
if [ "$SKIP_FORMAT" != true ]; then
CURRENT_FS=$(lsblk -no FSTYPE "$PARTITION" | xargs)
if [[ "$CURRENT_FS" == "ext4" || "$CURRENT_FS" == "xfs" || "$CURRENT_FS" == "btrfs" ]]; then
SKIP_FORMAT=true
msg_ok "$(translate "Detected existing filesystem") $CURRENT_FS $(translate "on") $PARTITION. $(translate "Skipping format.")"
else
FORMAT_TYPE=$(whiptail --title "$(translate "Select Format Type")" --menu "$(translate "Select the filesystem type for") $DISK_INFO:" 15 60 6 \
"ext4" "$(translate "Extended Filesystem 4 (recommended)")" \
"xfs" "$(translate "XFS Filesystem")" \
"btrfs" "$(translate "Btrfs Filesystem")" 3>&1 1>&2 2>&3)
if [ -z "$FORMAT_TYPE" ]; then
whiptail --title "$(translate "Format Cancelled")" --msgbox "$(translate "Format operation cancelled. The disk will not be added.")" 8 60
continue
fi
whiptail --title "$(translate "WARNING")" --yesno "$(translate "WARNING: This operation will FORMAT the disk") $DISK_INFO $(translate "with") $FORMAT_TYPE.\\n\\n$(translate "ALL DATA ON THIS DISK WILL BE PERMANENTLY LOST!")\\n\\n$(translate "Are you sure you want to continue")" 15 70
if [ $? -ne 0 ]; then
whiptail --title "$(translate "Format Cancelled")" --msgbox "$(translate "Format operation cancelled. The disk will not be added.")" 8 60
continue
fi
fi
fi
if [ "$SKIP_FORMAT" != true ]; then
echo -e "$(translate "Formatting partition") $PARTITION $(translate "with") $FORMAT_TYPE..."
case "$FORMAT_TYPE" in
"ext4") mkfs.ext4 -F "$PARTITION" ;;
"xfs") mkfs.xfs -f "$PARTITION" ;;
"btrfs") mkfs.btrfs -f "$PARTITION" ;;
esac
if [ $? -ne 0 ]; then
whiptail --title "$(translate "Format Failed")" --msgbox "$(translate "Failed to format partition") $PARTITION $(translate "with") $FORMAT_TYPE.\\n\\n$(translate "The disk may be in use by the system or have hardware issues.")" 12 70
continue
else
msg_ok "$(translate "Partition") $PARTITION $(translate "successfully formatted with") $FORMAT_TYPE."
partprobe "$DISK"
sleep 2
fi
fi
INDEX=0
while pct config "$CTID" | grep -q "mp${INDEX}:"; do
((INDEX++))
done
##############################################################################
RESULT=$(pct set "$CTID" -mp${INDEX} "$PARTITION,mp=$MOUNT_POINT,backup=0,ro=0,acl=1" 2>&1)
pct exec "$CTID" -- chmod -R 775 "$MOUNT_POINT"
##############################################################################
if [ $? -eq 0 ]; then
MESSAGE="$(translate "The disk") $DISK_INFO $(translate "has been successfully added to CT") $CTID $(translate "as a mount point at") $MOUNT_POINT."
if [ -n "$ASSIGNED_TO" ]; then
MESSAGE+="\\n\\n$(translate "WARNING: This disk is also assigned to the following CT(s):")\\n$ASSIGNED_TO"
MESSAGE+="\\n$(translate "Make sure not to start CTs that share this disk at the same time to avoid data corruption.")"
fi
SUCCESS_MESSAGES+="$MESSAGE\\n\\n"
((DISKS_ADDED++))
else
ERROR_MESSAGES+="$(translate "Could not add disk") $DISK_INFO $(translate "to CT") $CTID.\\n$(translate "Error:") $RESULT\\n\\n"
fi
done
msg_ok "$(translate "Disk processing completed.")"
if [ -n "$SUCCESS_MESSAGES" ]; then
MSG_LINES=$(echo "$SUCCESS_MESSAGES" | wc -l)
whiptail --title "$(translate "Successful Operations")" --msgbox "$SUCCESS_MESSAGES" 16 70
fi
if [ -n "$ERROR_MESSAGES" ]; then
whiptail --title "$(translate "Warnings and Errors")" --msgbox "$ERROR_MESSAGES" 16 70
fi
exit 0
+385
View File
@@ -0,0 +1,385 @@
#!/usr/bin/env bash
# ==========================================================
# ProxMenux - Disk Operations Helpers
# ==========================================================
# Author : MacRimi
# Copyright : (c) 2024 MacRimi
# License : MIT
# Version : 1.0
# Last Updated: 11/04/2026
# ==========================================================
# Shared low-level disk operations: wipe, partition, format.
# Consumed by format-disk.sh, disk_host.sh and future scripts.
#
# Output variables (set by helpers, read by callers):
# DOH_CREATED_PARTITION — partition path set by doh_create_partition()
# DOH_PARTITION_ERROR_DETAIL — error detail set by doh_create_partition()
# ==========================================================
if [[ -n "${__PROXMENUX_DISK_OPS_HELPERS__}" ]]; then
return 0
fi
__PROXMENUX_DISK_OPS_HELPERS__=1
# shellcheck disable=SC2034 # these are output variables read by callers (format-disk.sh, disk_host.sh)
DOH_CREATED_PARTITION=""
DOH_PARTITION_ERROR_DETAIL=""
DOH_FORMAT_ERROR_DETAIL=""
DOH_WIPE_ERROR_DETAIL=""
# Internal: print progress lines only when explicitly enabled by caller.
# Enabled with: export DOH_SHOW_PROGRESS=1
_doh_progress() {
[[ "${DOH_SHOW_PROGRESS:-0}" == "1" ]] || return 0
echo -e "${TAB}${YW}${HOLD}$*${CL}"
}
# Internal: collect command stdout with timeout protection (best-effort).
# Usage: _doh_collect_cmd <seconds> <cmd> [args...]
_doh_collect_cmd() {
local seconds="$1"
shift
if command -v timeout >/dev/null 2>&1; then
timeout --kill-after=2 "${seconds}s" "$@" 2>/dev/null || true
else
"$@" 2>/dev/null || true
fi
}
# Internal: run a command with a timeout, suppressing all output including
# the bash "Killed" job notification that leaks when --kill-after re-raises
# SIGKILL. Plain SIGTERM is not enough for processes stuck in kernel D-state
# (uninterruptible I/O wait on a busy ZFS/LVM disk), so --kill-after=2 is
# needed. The notification is suppressed by temporarily redirecting the
# current shell's stderr with exec before the call and restoring it after.
# Usage: _doh_run_quick_cmd <seconds> <cmd> [args...]
_doh_run_quick_cmd() {
local seconds="$1"
shift
if command -v timeout >/dev/null 2>&1; then
local _saved_stderr
exec {_saved_stderr}>&2 2>/dev/null
timeout --kill-after=2 "${seconds}s" "$@" >/dev/null 2>&1
local rc=$?
exec 2>&"${_saved_stderr}" {_saved_stderr}>&-
return $rc
fi
"$@" >/dev/null 2>&1
}
# Internal: unmount all ZFS datasets then export (or destroy) any ZFS pools
# whose vdevs live on <disk>. Called at the very start of doh_wipe_disk so
# ZFS fully releases the device before wipefs/sgdisk/partprobe touch it.
# If the pool is still held after export, processes on it will be in D-state
# and --kill-after in _doh_run_quick_cmd handles the force-kill.
_doh_release_zfs_pools() {
local disk="$1"
command -v zpool >/dev/null 2>&1 || return 0
local pool_name dev resolved base parent
while read -r pool_name; do
[[ -z "$pool_name" ]] && continue
local found=false
while read -r dev; do
[[ -z "$dev" ]] && continue
if [[ "$dev" == /dev/* ]]; then
resolved=$(readlink -f "$dev" 2>/dev/null)
elif [[ -e "/dev/disk/by-id/$dev" ]]; then
resolved=$(readlink -f "/dev/disk/by-id/$dev" 2>/dev/null)
elif [[ -e "/dev/$dev" ]]; then
resolved=$(readlink -f "/dev/$dev" 2>/dev/null)
else
continue
fi
[[ -z "$resolved" ]] && continue
base=$(lsblk -no PKNAME "$resolved" 2>/dev/null)
parent="${base:+/dev/$base}"
[[ -z "$parent" ]] && parent="$resolved"
if [[ "$parent" == "$disk" || "$resolved" == "$disk" ]]; then
found=true; break
fi
done < <(_doh_collect_cmd 12 zpool list -v -H "$pool_name" | awk '{print $1}' | \
grep -v '^-' | grep -v '^mirror' | grep -v '^raidz' | \
grep -v "^${pool_name}$")
if $found; then
_doh_progress "- Releasing active ZFS pool: $pool_name"
# Unmount all datasets (reverse order: deepest first)
if command -v zfs >/dev/null 2>&1; then
while read -r ds; do
[[ -z "$ds" ]] && continue
timeout 10s zfs unmount -f "$ds" >/dev/null 2>&1 || true
done < <(_doh_collect_cmd 10 zfs list -H -o name -r "$pool_name" | sort -r)
fi
# Export the pool so the kernel releases the block device
timeout 30s zpool export -f "$pool_name" >/dev/null 2>&1 || true
# Wait for udev to finish processing the device release
udevadm settle --timeout=5 >/dev/null 2>&1 || true
sleep 1
fi
done < <(_doh_collect_cmd 8 zpool list -H -o name)
}
# Internal: run a partitioning command with timeout, appending combined output to a file.
# Usage: _doh_part_cmd <seconds> <outfile> <cmd> [args...]
_doh_part_cmd() {
local secs="$1" outfile="$2"
shift 2
if command -v timeout >/dev/null 2>&1; then
timeout --kill-after=3 "${secs}s" "$@" >>"$outfile" 2>&1
else
"$@" >>"$outfile" 2>&1
fi
}
# doh_wipe_disk <disk>
# Unmounts all partitions, deactivates swap, wipes all filesystem metadata
# and partition tables (wipefs + sgdisk + dd first/last 16 MiB).
# Never fails — all sub-commands run with "|| true".
doh_wipe_disk() {
local disk="$1"
local node mountpoint total_sectors seek_sectors discard_max base
DOH_WIPE_ERROR_DETAIL=""
_doh_progress "[1/8] Preparing disk $disk"
# Optional heavy release flow (disabled by default to avoid hangs in busy hosts).
if [[ "${DOH_ENABLE_STACK_RELEASE:-0}" == "1" ]]; then
# Release any ZFS pools using this disk so the kernel lets go of it
_doh_release_zfs_pools "$disk"
# Deactivate any LVM VGs backed by this disk
if command -v vgchange >/dev/null 2>&1; then
local pv rp vg
while read -r pv; do
rp=$(readlink -f "$pv" 2>/dev/null)
base=$(lsblk -no PKNAME "${rp:-$pv}" 2>/dev/null)
if [[ "/dev/${base}" == "$disk" || "$rp" == "$disk" ]]; then
vg=$(_doh_collect_cmd 8 pvs --noheadings -o vg_name "${rp:-$pv}" | xargs)
[[ -n "$vg" ]] && _doh_run_quick_cmd 8 vgchange -an "$vg" || true
fi
done < <(_doh_collect_cmd 8 pvs --noheadings -o pv_name | xargs -r -n1)
fi
fi
# Unmount all partitions
_doh_progress "[2/8] Unmounting partitions"
while read -r node mountpoint; do
[[ -z "$node" || -z "$mountpoint" ]] && continue
_doh_run_quick_cmd 8 umount -f "$node" || true
done < <(lsblk -lnpo NAME,MOUNTPOINT "$disk" 2>/dev/null | awk 'NR>1 && $2!="" {print $1" "$2}')
# Deactivate swap
_doh_progress "[3/8] Disabling swap signatures"
while read -r node; do
[[ -z "$node" ]] && continue
_doh_run_quick_cmd 8 swapoff "$node" || true
done < <(lsblk -lnpo NAME "$disk" 2>/dev/null | awk 'NR>1 {print $1}')
# Wipe filesystem signatures and RAID superblocks on every node
_doh_progress "[4/8] Removing filesystem/RAID signatures"
while read -r node; do
[[ -z "$node" ]] && continue
_doh_run_quick_cmd 10 wipefs -a -f "$node" || true
if command -v mdadm >/dev/null 2>&1; then
_doh_run_quick_cmd 8 mdadm --zero-superblock --force "$node" || true
fi
done < <(lsblk -lnpo NAME "$disk" 2>/dev/null)
# Zap partition table
_doh_progress "[5/8] Resetting partition table"
_doh_run_quick_cmd 12 sgdisk --zap-all "$disk" || true
# TRIM/discard if device supports it
_doh_progress "[6/8] Attempting discard/TRIM when supported"
discard_max=$(lsblk -dn -o DISC-MAX "$disk" 2>/dev/null | xargs)
if [[ -n "$discard_max" && "$discard_max" != "0B" && "$discard_max" != "0" ]]; then
_doh_run_quick_cmd 15 blkdiscard -f "$disk" || true
fi
# Zero first 16 MiB (destroys partition table / filesystem headers)
_doh_progress "[7/8] Zeroing first metadata region"
_doh_run_quick_cmd 20 dd if=/dev/zero of="$disk" bs=1M count=16 conv=fsync status=none || true
# Zero last 16 MiB (destroys backup GPT header)
_doh_progress "[8/8] Zeroing backup GPT region"
total_sectors=$(blockdev --getsz "$disk" 2>/dev/null || echo 0)
if [[ "$total_sectors" =~ ^[0-9]+$ ]] && (( total_sectors > 32768 )); then
seek_sectors=$(( total_sectors - 32768 ))
_doh_run_quick_cmd 20 dd if=/dev/zero of="$disk" bs=512 seek="$seek_sectors" count=32768 conv=fsync status=none || true
fi
udevadm settle --timeout=10 >/dev/null 2>&1 || true
_doh_run_quick_cmd 8 partprobe "$disk" || true
sleep 1
}
# doh_create_partition <disk>
# Creates a single GPT partition spanning the whole disk.
# Tries parted → sgdisk → sfdisk in order; stops at first success.
#
# On success: sets DOH_CREATED_PARTITION to the new partition path, returns 0.
# On failure: sets DOH_PARTITION_ERROR_DETAIL with tool diagnostics, returns 1.
doh_create_partition() {
local disk="$1"
local created=false tmp_out err_snippet
DOH_CREATED_PARTITION=""
DOH_PARTITION_ERROR_DETAIL=""
_doh_run_quick_cmd 5 blockdev --setrw "$disk" || true
# --- attempt 1: parted ---
if command -v parted >/dev/null 2>&1; then
tmp_out=$(mktemp)
if _doh_part_cmd 15 "$tmp_out" parted -s -f "$disk" mklabel gpt; then
if _doh_part_cmd 20 "$tmp_out" parted -s -f "$disk" mkpart primary 1MiB 100%; then
created=true
else
err_snippet=$(tr '\n' ' ' <"$tmp_out" | sed -E 's/[[:space:]]+/ /g; s/^ //; s/ $//')
DOH_PARTITION_ERROR_DETAIL+="parted mkpart: ${err_snippet:-no details}"$'\n'
fi
else
err_snippet=$(tr '\n' ' ' <"$tmp_out" | sed -E 's/[[:space:]]+/ /g; s/^ //; s/ $//')
DOH_PARTITION_ERROR_DETAIL+="parted mklabel: ${err_snippet:-no details}"$'\n'
fi
rm -f "$tmp_out"
else
DOH_PARTITION_ERROR_DETAIL+="parted command not found"$'\n'
fi
# --- attempt 2: sgdisk ---
if [[ "$created" != "true" ]] && command -v sgdisk >/dev/null 2>&1; then
tmp_out=$(mktemp)
_doh_run_quick_cmd 10 sgdisk --zap-all "$disk" || true
# sgdisk does not accept "1MiB" notation — use sector 2048 (= 1 MiB at 512 B/sector)
if _doh_part_cmd 20 "$tmp_out" sgdisk -o -n 1:2048:0 -t 1:8300 "$disk"; then
created=true
else
err_snippet=$(tr '\n' ' ' <"$tmp_out" | sed -E 's/[[:space:]]+/ /g; s/^ //; s/ $//')
DOH_PARTITION_ERROR_DETAIL+="sgdisk create: ${err_snippet:-no details}"$'\n'
fi
rm -f "$tmp_out"
elif [[ "$created" != "true" ]]; then
DOH_PARTITION_ERROR_DETAIL+="sgdisk command not found"$'\n'
fi
# --- attempt 3: sfdisk ---
if [[ "$created" != "true" ]] && command -v sfdisk >/dev/null 2>&1; then
tmp_out=$(mktemp)
local sfdisk_ok=1
if command -v timeout >/dev/null 2>&1; then
printf 'label: gpt\n,;\n' | timeout --kill-after=3 20s sfdisk --wipe always "$disk" >>"$tmp_out" 2>&1
sfdisk_ok=$?
else
printf 'label: gpt\n,;\n' | sfdisk --wipe always "$disk" >>"$tmp_out" 2>&1
sfdisk_ok=$?
fi
if [[ $sfdisk_ok -eq 0 ]]; then
created=true
else
err_snippet=$(tr '\n' ' ' <"$tmp_out" | sed -E 's/[[:space:]]+/ /g; s/^ //; s/ $//')
DOH_PARTITION_ERROR_DETAIL+="sfdisk create: ${err_snippet:-no details}"$'\n'
fi
rm -f "$tmp_out"
elif [[ "$created" != "true" ]]; then
DOH_PARTITION_ERROR_DETAIL+="sfdisk command not found"$'\n'
fi
[[ "$created" == "true" ]] || return 1
# Wait for the kernel to expose the new partition node
udevadm settle --timeout=10 >/dev/null 2>&1 || true
_doh_run_quick_cmd 8 partprobe "$disk" || true
local part
for _ in {1..15}; do
sleep 0.3
part=$(lsblk -lnpo NAME "$disk" 2>/dev/null | awk 'NR==2{print; exit}')
if [[ -n "$part" && -b "$part" ]]; then
DOH_CREATED_PARTITION="$part"
return 0
fi
done
# Fallback: derive partition name from disk path (handles NVMe p-suffix)
local fallback
if [[ "$disk" =~ [0-9]$ ]]; then
fallback="${disk}p1"
else
fallback="${disk}1"
fi
if [[ -b "$fallback" ]]; then
DOH_CREATED_PARTITION="$fallback"
return 0
fi
DOH_PARTITION_ERROR_DETAIL+="partition node not detected after table refresh"$'\n'
return 1
}
# doh_format_partition <partition> <filesystem> [label] [zfs_pool_name] [zfs_mountpoint]
#
# Formats <partition> with <filesystem>.
# label : optional FS label for ext4/xfs/btrfs (ignored for ZFS)
# zfs_pool_name : required when filesystem=zfs; defaults to label if empty
# zfs_mountpoint : ZFS pool mountpoint (default: "none" — no automatic mount)
#
# On failure: sets DOH_FORMAT_ERROR_DETAIL with tool diagnostics.
# Returns 0 on success, 1 on failure.
doh_format_partition() {
local partition="$1"
local filesystem="$2"
local label="${3:-}"
local zfs_pool="${4:-}"
local zfs_mountpoint="${5:-none}"
local tmp_out rc=1
DOH_FORMAT_ERROR_DETAIL=""
tmp_out=$(mktemp)
case "$filesystem" in
ext4)
if [[ -n "$label" ]]; then
mkfs.ext4 -F -L "$label" "$partition" >"$tmp_out" 2>&1; rc=$?
else
mkfs.ext4 -F "$partition" >"$tmp_out" 2>&1; rc=$?
fi
;;
xfs)
if [[ -n "$label" ]]; then
mkfs.xfs -f -L "$label" "$partition" >"$tmp_out" 2>&1; rc=$?
else
mkfs.xfs -f "$partition" >"$tmp_out" 2>&1; rc=$?
fi
;;
exfat)
mkfs.exfat "$partition" >"$tmp_out" 2>&1; rc=$?
;;
btrfs)
if [[ -n "$label" ]]; then
mkfs.btrfs -f -L "$label" "$partition" >"$tmp_out" 2>&1; rc=$?
else
mkfs.btrfs -f "$partition" >"$tmp_out" 2>&1; rc=$?
fi
;;
zfs)
[[ -z "$zfs_pool" ]] && zfs_pool="${label:-pool}"
zpool labelclear -f "$partition" >/dev/null 2>&1 || true
zpool create -f -o ashift=12 \
-O compression=lz4 -O atime=off -O xattr=sa -O acltype=posixacl \
-m "$zfs_mountpoint" "$zfs_pool" "$partition" >"$tmp_out" 2>&1
rc=$?
;;
*)
echo "Unknown filesystem: $filesystem" >"$tmp_out"
rc=1
;;
esac
if [[ $rc -ne 0 ]]; then
DOH_FORMAT_ERROR_DETAIL=$(tr '\n' ' ' <"$tmp_out" | sed -E 's/[[:space:]]+/ /g; s/^ //; s/ $//')
fi
rm -f "$tmp_out"
return $rc
}
+263
View File
@@ -0,0 +1,263 @@
#!/usr/bin/env bash
if [[ -n "${__PROXMENUX_GPU_HOOK_GUARD_HELPERS__}" ]]; then
return 0
fi
__PROXMENUX_GPU_HOOK_GUARD_HELPERS__=1
PROXMENUX_GPU_HOOK_STORAGE_REF="local:snippets/proxmenux-gpu-guard.sh"
PROXMENUX_GPU_HOOK_ABS_PATH="/var/lib/vz/snippets/proxmenux-gpu-guard.sh"
_gpu_guard_msg_warn() {
if declare -F msg_warn >/dev/null 2>&1; then
msg_warn "$1"
else
echo "[WARN] $1" >&2
fi
}
_gpu_guard_msg_ok() {
if declare -F msg_ok >/dev/null 2>&1; then
msg_ok "$1"
else
echo "[OK] $1"
fi
}
_gpu_guard_has_vm_gpu() {
local vmid="$1"
qm config "$vmid" 2>/dev/null | grep -qE '^hostpci[0-9]+:'
}
_gpu_guard_has_lxc_gpu() {
local ctid="$1"
local conf="/etc/pve/lxc/${ctid}.conf"
[[ -f "$conf" ]] || return 1
grep -qE 'dev[0-9]+:.*(/dev/dri|/dev/nvidia|/dev/kfd)|lxc\.mount\.entry:.*dev/dri' "$conf" 2>/dev/null
}
ensure_proxmenux_gpu_guard_hookscript() {
mkdir -p /var/lib/vz/snippets 2>/dev/null || true
cat >"$PROXMENUX_GPU_HOOK_ABS_PATH" <<'HOOKEOF'
#!/usr/bin/env bash
set -u
arg1="${1:-}"
arg2="${2:-}"
case "$arg1" in
pre-start|post-start|pre-stop|post-stop)
phase="$arg1"
guest_id="$arg2"
;;
*)
guest_id="$arg1"
phase="$arg2"
;;
esac
[[ "$phase" == "pre-start" ]] || exit 0
vm_conf="/etc/pve/qemu-server/${guest_id}.conf"
ct_conf="/etc/pve/lxc/${guest_id}.conf"
if [[ -f "$vm_conf" ]]; then
mapfile -t hostpci_lines < <(grep -E '^hostpci[0-9]+:' "$vm_conf" 2>/dev/null || true)
[[ ${#hostpci_lines[@]} -eq 0 ]] && exit 0
# Build slot list used by this VM and block if any running VM already uses same slot.
slot_keys=()
for line in "${hostpci_lines[@]}"; do
val="${line#*: }"
[[ "$val" == *"mapping="* ]] && continue
first_field="${val%%,*}"
IFS=';' read -r -a ids <<< "$first_field"
for id in "${ids[@]}"; do
id="${id#host=}"
id="${id// /}"
[[ -z "$id" ]] && continue
if [[ "$id" =~ ^[0-9a-fA-F]{2}:[0-9a-fA-F]{2}$ ]]; then
key="${id,,}"
else
[[ "$id" =~ ^0000: ]] || id="0000:${id}"
key="${id#0000:}"
key="${key%.*}"
key="${key,,}"
fi
dup=0
for existing in "${slot_keys[@]}"; do
[[ "$existing" == "$key" ]] && dup=1 && break
done
[[ "$dup" -eq 0 ]] && slot_keys+=("$key")
done
done
if [[ ${#slot_keys[@]} -gt 0 ]]; then
conflict_details=""
for other_conf in /etc/pve/qemu-server/*.conf; do
[[ -f "$other_conf" ]] || continue
other_vmid="$(basename "$other_conf" .conf)"
[[ "$other_vmid" == "$guest_id" ]] && continue
qm status "$other_vmid" 2>/dev/null | grep -q "status: running" || continue
for key in "${slot_keys[@]}"; do
if grep -qE "^hostpci[0-9]+:.*(0000:)?${key}(\\.[0-7])?([,[:space:]]|$)" "$other_conf" 2>/dev/null; then
other_name="$(awk '/^name:/ {print $2}' "$other_conf" 2>/dev/null)"
[[ -z "$other_name" ]] && other_name="VM-${other_vmid}"
conflict_details+=$'\n'"- ${key} in use by VM ${other_vmid} (${other_name})"
break
fi
done
done
if [[ -n "$conflict_details" ]]; then
echo "ProxMenux GPU Guard: VM ${guest_id} blocked at pre-start." >&2
echo "A hostpci device slot is already in use by another running VM." >&2
printf '%s\n' "$conflict_details" >&2
echo "Stop the source VM or remove/move the shared hostpci assignment." >&2
exit 1
fi
fi
failed=0
details=""
for line in "${hostpci_lines[@]}"; do
val="${line#*: }"
[[ "$val" == *"mapping="* ]] && continue
first_field="${val%%,*}"
IFS=';' read -r -a ids <<< "$first_field"
for id in "${ids[@]}"; do
id="${id#host=}"
id="${id// /}"
[[ -z "$id" ]] && continue
# Slot-only syntax (e.g. 01:00 or 0000:01:00) is accepted by Proxmox.
if [[ "$id" =~ ^([0-9a-fA-F]{4}:)?[0-9a-fA-F]{2}:[0-9a-fA-F]{2}$ ]]; then
slot="${id,,}"
slot="${slot#0000:}"
slot_has_gpu=false
for dev in /sys/bus/pci/devices/0000:${slot}.*; do
[[ -e "$dev" ]] || continue
class_hex="$(cat "$dev/class" 2>/dev/null | sed 's/^0x//')"
[[ "${class_hex:0:2}" != "03" ]] && continue
slot_has_gpu=true
drv="$(basename "$(readlink "$dev/driver" 2>/dev/null)" 2>/dev/null)"
if [[ "$drv" != "vfio-pci" ]]; then
failed=1
details+=$'\n'"- ${dev##*/}: driver=${drv:-none}"
fi
done
# If this slot does not include a display/3D controller, it is not GPU-guarded.
[[ "$slot_has_gpu" == "true" ]] || true
continue
fi
[[ "$id" =~ ^0000: ]] || id="0000:${id}"
dev_path="/sys/bus/pci/devices/${id}"
if [[ ! -d "$dev_path" ]]; then
failed=1
details+=$'\n'"- ${id}: PCI device not found"
continue
fi
class_hex="$(cat "$dev_path/class" 2>/dev/null | sed 's/^0x//')"
# Enforce vfio only for display/3D devices (PCI class 03xx).
[[ "${class_hex:0:2}" == "03" ]] || continue
drv="$(basename "$(readlink "$dev_path/driver" 2>/dev/null)" 2>/dev/null)"
if [[ "$drv" != "vfio-pci" ]]; then
failed=1
details+=$'\n'"- ${id}: driver=${drv:-none}"
fi
done
done
if [[ "$failed" -eq 1 ]]; then
echo "ProxMenux GPU Guard: VM ${guest_id} blocked at pre-start." >&2
echo "GPU passthrough device is not ready for VM mode (vfio-pci required)." >&2
printf '%s\n' "$details" >&2
echo "Switch mode to GPU -> VM from ProxMenux: GPUs and Coral-TPU Menu." >&2
exit 1
fi
exit 0
fi
if [[ -f "$ct_conf" ]]; then
mapfile -t gpu_dev_paths < <(
{
grep -E '^dev[0-9]+:' "$ct_conf" 2>/dev/null | sed -E 's/^dev[0-9]+:[[:space:]]*([^,[:space:]]+).*/\1/'
grep -E '^lxc\.mount\.entry:' "$ct_conf" 2>/dev/null | sed -E 's/^lxc\.mount\.entry:[[:space:]]*([^[:space:]]+).*/\1/'
} | grep -E '^/dev/(dri|nvidia|kfd)' | sort -u
)
[[ ${#gpu_dev_paths[@]} -eq 0 ]] && exit 0
missing=""
for dev in "${gpu_dev_paths[@]}"; do
[[ -e "$dev" ]] || missing+=$'\n'"- ${dev} unavailable"
done
if [[ -n "$missing" ]]; then
echo "ProxMenux GPU Guard: LXC ${guest_id} blocked at pre-start." >&2
echo "Configured GPU devices are unavailable in host device nodes." >&2
printf '%s\n' "$missing" >&2
echo "Switch mode to GPU -> LXC from ProxMenux: GPUs and Coral-TPU Menu." >&2
exit 1
fi
exit 0
fi
exit 0
HOOKEOF
chmod 755 "$PROXMENUX_GPU_HOOK_ABS_PATH" 2>/dev/null || true
}
attach_proxmenux_gpu_guard_to_vm() {
local vmid="$1"
_gpu_guard_has_vm_gpu "$vmid" || return 0
local current
current=$(qm config "$vmid" 2>/dev/null | awk '/^hookscript:/ {print $2}')
if [[ "$current" == "$PROXMENUX_GPU_HOOK_STORAGE_REF" ]]; then
return 0
fi
if qm set "$vmid" --hookscript "$PROXMENUX_GPU_HOOK_STORAGE_REF" >/dev/null 2>&1; then
_gpu_guard_msg_ok "PCIe passthrough guard attached to VM ${vmid}"
else
_gpu_guard_msg_warn "Could not attach PCIe passthrough guard to VM ${vmid}. Ensure 'local' storage supports snippets."
fi
}
attach_proxmenux_gpu_guard_to_lxc() {
local ctid="$1"
_gpu_guard_has_lxc_gpu "$ctid" || return 0
local current
current=$(pct config "$ctid" 2>/dev/null | awk '/^hookscript:/ {print $2}')
if [[ "$current" == "$PROXMENUX_GPU_HOOK_STORAGE_REF" ]]; then
return 0
fi
if pct set "$ctid" -hookscript "$PROXMENUX_GPU_HOOK_STORAGE_REF" >/dev/null 2>&1; then
_gpu_guard_msg_ok "PCIe passthrough guard attached to LXC ${ctid}"
else
_gpu_guard_msg_warn "Could not attach PCIe passthrough guard to LXC ${ctid}. Ensure 'local' storage supports snippets."
fi
}
sync_proxmenux_gpu_guard_hooks() {
ensure_proxmenux_gpu_guard_hookscript
local vmid ctid
for conf in /etc/pve/qemu-server/*.conf; do
[[ -f "$conf" ]] || continue
vmid=$(basename "$conf" .conf)
_gpu_guard_has_vm_gpu "$vmid" && attach_proxmenux_gpu_guard_to_vm "$vmid"
done
for conf in /etc/pve/lxc/*.conf; do
[[ -f "$conf" ]] || continue
ctid=$(basename "$conf" .conf)
_gpu_guard_has_lxc_gpu "$ctid" && attach_proxmenux_gpu_guard_to_lxc "$ctid"
done
}
+52
View File
@@ -0,0 +1,52 @@
#!/usr/bin/env bash
if [[ -n "${__PROXMENUX_PCI_PASSTHROUGH_HELPERS__}" ]]; then
return 0
fi
__PROXMENUX_PCI_PASSTHROUGH_HELPERS__=1
function _pci_is_iommu_active() {
grep -qE 'intel_iommu=on|amd_iommu=on' /proc/cmdline 2>/dev/null || return 1
[[ -d /sys/kernel/iommu_groups ]] || return 1
find /sys/kernel/iommu_groups -mindepth 1 -maxdepth 1 -type d -print -quit 2>/dev/null | grep -q .
}
function _pci_next_hostpci_index() {
local vmid="$1"
local idx=0
local hostpci_existing
hostpci_existing=$(qm config "$vmid" 2>/dev/null) || return 1
while grep -q "^hostpci${idx}:" <<< "$hostpci_existing"; do
idx=$((idx + 1))
done
echo "$idx"
}
function _pci_slot_assigned_to_vm() {
local pci_full="$1"
local vmid="$2"
local slot_base
slot_base="${pci_full#0000:}"
slot_base="${slot_base%.*}"
qm config "$vmid" 2>/dev/null \
| grep -qE "^hostpci[0-9]+:.*(0000:)?${slot_base}(\\.[0-7])?([,[:space:]]|$)"
}
function _pci_function_assigned_to_vm() {
local pci_full="$1"
local vmid="$2"
local bdf slot func pattern
bdf="${pci_full#0000:}"
slot="${bdf%.*}"
func="${bdf##*.}"
if [[ "$func" == "0" ]]; then
pattern="^hostpci[0-9]+:.*(0000:)?(${bdf}|${slot})([,:[:space:]]|$)"
else
pattern="^hostpci[0-9]+:.*(0000:)?${bdf}([,[:space:]]|$)"
fi
qm config "$vmid" 2>/dev/null | grep -qE "$pattern"
}
-277
View File
@@ -1,277 +0,0 @@
#!/bin/bash
# ==========================================================
# Remove Subscription Banner - Proxmox VE (v3 - Minimal Intrusive)
# ==========================================================
# This version makes a surgical change to the checked_command function
# by changing the condition to 'if (false)' and commenting out the banner logic.
# Also patches the mobile UI to remove the subscription dialog.
# ==========================================================
set -euo pipefail
# Source utilities if available
BASE_DIR="/usr/local/share/proxmenux"
UTILS_FILE="$BASE_DIR/utils.sh"
TOOLS_JSON="/usr/local/share/proxmenux/installed_tools.json"
if [[ -f "$UTILS_FILE" ]]; then
source "$UTILS_FILE"
fi
load_language
initialize_cache
# File paths
JS_FILE="/usr/share/javascript/proxmox-widget-toolkit/proxmoxlib.js"
GZ_FILE="/usr/share/javascript/proxmox-widget-toolkit/proxmoxlib.js.gz"
MIN_JS_FILE="/usr/share/javascript/proxmox-widget-toolkit/proxmoxlib.min.js"
MOBILE_UI_FILE="/usr/share/pve-yew-mobile-gui/index.html.tpl"
BACKUP_DIR="$BASE_DIR/backups"
APT_HOOK="/etc/apt/apt.conf.d/no-nag-script"
PATCH_BIN="/usr/local/bin/pve-remove-nag-v3.sh"
MARK="/* PROXMENUX_NAG_PATCH_V3 */"
MOBILE_MARK="<!-- PROXMENUX_MOBILE_NAG_PATCH -->"
# Ensure tools JSON exists
ensure_tools_json() {
[ -f "$TOOLS_JSON" ] || echo "{}" > "$TOOLS_JSON"
}
# Register tool in JSON
register_tool() {
command -v jq >/dev/null 2>&1 || return 0
local tool="$1" state="$2"
ensure_tools_json
jq --arg t "$tool" --argjson v "$state" '.[$t]=$v' "$TOOLS_JSON" \
> "$TOOLS_JSON.tmp" && mv "$TOOLS_JSON.tmp" "$TOOLS_JSON"
}
# Verify JS file integrity
verify_js_integrity() {
local file="$1"
[ -f "$file" ] || return 1
[ -s "$file" ] || return 1
grep -Eq 'Ext|function|var|const|let' "$file" || return 1
if LC_ALL=C grep -qP '\x00' "$file" 2>/dev/null; then
return 1
fi
return 0
}
# Create timestamped backup
create_backup() {
local file="$1"
local timestamp
timestamp=$(date +%Y%m%d_%H%M%S)
local backup_file="$BACKUP_DIR/$(basename "$file").backup.$timestamp"
mkdir -p "$BACKUP_DIR"
if [ -f "$file" ]; then
rm -f "$BACKUP_DIR"/"$(basename "$file")".backup.* 2>/dev/null || true
cp -a "$file" "$backup_file"
echo "$backup_file"
fi
}
# Create the patch script that will be called by APT hook
create_patch_script() {
cat > "$PATCH_BIN" <<'EOFPATCH'
#!/usr/bin/env bash
# ==========================================================
# Proxmox Subscription Banner Patch (v3 - Minimal)
# ==========================================================
set -euo pipefail
JS_FILE="/usr/share/javascript/proxmox-widget-toolkit/proxmoxlib.js"
GZ_FILE="/usr/share/javascript/proxmox-widget-toolkit/proxmoxlib.js.gz"
MIN_JS_FILE="/usr/share/javascript/proxmox-widget-toolkit/proxmoxlib.min.js"
MOBILE_UI_FILE="/usr/share/pve-yew-mobile-gui/index.html.tpl"
BACKUP_DIR="/usr/local/share/proxmenux/backups"
MARK="/* PROXMENUX_NAG_PATCH_V3 */"
MOBILE_MARK="<!-- PROXMENUX_MOBILE_NAG_PATCH -->"
verify_js_integrity() {
local file="$1"
[ -f "$file" ] && [ -s "$file" ] && grep -Eq 'Ext|function' "$file" && ! LC_ALL=C grep -qP '\x00' "$file" 2>/dev/null
}
patch_checked_command() {
[ -f "$JS_FILE" ] || return 0
# Check if already patched
grep -q "$MARK" "$JS_FILE" && return 0
# Create backup
mkdir -p "$BACKUP_DIR"
local backup="$BACKUP_DIR/$(basename "$JS_FILE").backup.$(date +%Y%m%d_%H%M%S)"
cp -a "$JS_FILE" "$backup"
# Set trap to restore on error
trap "cp -a '$backup' '$JS_FILE' 2>/dev/null || true" ERR
# Add patch marker at the beginning
sed -i "1s|^|$MARK\n|" "$JS_FILE"
# Surgical patch: Change the condition in checked_command function
# This changes the if condition to 'if (false)' making the banner never show
if grep -q "res\.data\.status\.toLowerCase() !== 'active'" "$JS_FILE"; then
# Pattern for newer versions (8.4.5+)
sed -i "/checked_command: function/,/},$/s/res === null || res === undefined || !res || res\.data\.status\.toLowerCase() !== 'active'/false/g" "$JS_FILE"
elif grep -q "res\.data\.status !== 'Active'" "$JS_FILE"; then
# Pattern for older versions
sed -i "/checked_command: function/,/},$/s/res === null || res === undefined || !res || res\.data\.status !== 'Active'/false/g" "$JS_FILE"
fi
# Also handle the NoMoreNagging pattern if present
if grep -q "res\.data\.status\.toLowerCase() !== 'NoMoreNagging'" "$JS_FILE"; then
sed -i "/checked_command: function/,/},$/s/res === null || res === undefined || !res || res\.data\.status\.toLowerCase() !== 'NoMoreNagging'/false/g" "$JS_FILE"
fi
# Verify integrity after patch
if ! verify_js_integrity "$JS_FILE"; then
cp -a "$backup" "$JS_FILE"
return 1
fi
# Clean up generated files
rm -f "$MIN_JS_FILE" "$GZ_FILE" 2>/dev/null || true
find /var/cache/pve-manager/ -name "*.js*" -delete 2>/dev/null || true
find /var/lib/pve-manager/ -name "*.js*" -delete 2>/dev/null || true
find /var/cache/nginx/ -type f -delete 2>/dev/null || true
trap - ERR
return 0
}
patch_mobile_ui() {
[ -f "$MOBILE_UI_FILE" ] || return 0
# Check if already patched
grep -q "$MOBILE_MARK" "$MOBILE_UI_FILE" && return 0
# Create backup
mkdir -p "$BACKUP_DIR"
local backup="$BACKUP_DIR/$(basename "$MOBILE_UI_FILE").backup.$(date +%Y%m%d_%H%M%S)"
cp -a "$MOBILE_UI_FILE" "$backup"
# Set trap to restore on error
trap "cp -a '$backup' '$MOBILE_UI_FILE' 2>/dev/null || true" ERR
# Insert the script before </head> tag
sed -i "/<\/head>/i\\
$MOBILE_MARK\\
<!-- Script to remove subscription banner from mobile UI -->\\
<script>\\
function removeNoSubDialog() {\\
const observer = new MutationObserver(() => {\\
const diag = document.querySelector('dialog[aria-label=\"No valid subscription\"]');\\
if (diag) {\\
diag.remove();\\
}\\
});\\
observer.observe(document.body, { childList: true, subtree: true });\\
}\\
window.addEventListener('load', () => {\\
setTimeout(removeNoSubDialog, 200);\\
});\\
</script>" "$MOBILE_UI_FILE"
trap - ERR
return 0
}
reload_services() {
systemctl is-active --quiet pveproxy 2>/dev/null && {
systemctl reload pveproxy 2>/dev/null || systemctl restart pveproxy 2>/dev/null || true
}
systemctl is-active --quiet nginx 2>/dev/null && {
systemctl reload nginx 2>/dev/null || true
}
systemctl is-active --quiet pvedaemon 2>/dev/null && {
systemctl reload pvedaemon 2>/dev/null || true
}
}
main() {
patch_checked_command || return 1
patch_mobile_ui || true
reload_services
}
main
EOFPATCH
chmod 755 "$PATCH_BIN"
}
# Create APT hook to reapply patch after updates
create_apt_hook() {
cat > "$APT_HOOK" <<'EOFAPT'
/* ProxMenux: reapply minimal nag patch after upgrades */
DPkg::Post-Invoke { "/usr/local/bin/pve-remove-nag-v3.sh || true"; };
EOFAPT
chmod 644 "$APT_HOOK"
# Verify APT hook syntax
apt-config dump >/dev/null 2>&1 || {
rm -f "$APT_HOOK"
}
}
# Main function to remove subscription banner
remove_subscription_banner_v3() {
local pve_version
pve_version=$(pveversion 2>/dev/null | grep -oP 'pve-manager/\K[0-9]+\.[0-9]+' | head -1 || echo "unknown")
msg_info "$(translate "Detected Proxmox VE") ${pve_version} - $(translate "applying banner patch")"
# Remove old APT hooks
for f in /etc/apt/apt.conf.d/*nag*; do
[[ -e "$f" ]] && rm -f "$f"
done
# Create backup for desktop UI
local backup_file
backup_file=$(create_backup "$JS_FILE")
if [ -n "$backup_file" ]; then
# msg_ok "$(translate "Desktop UI backup created"): $backup_file"
:
fi
if [ -f "$MOBILE_UI_FILE" ]; then
local mobile_backup
mobile_backup=$(create_backup "$MOBILE_UI_FILE")
if [ -n "$mobile_backup" ]; then
# msg_ok "$(translate "Mobile UI backup created"): $mobile_backup"
:
fi
fi
# Create patch script and APT hook
create_patch_script
create_apt_hook
# Apply the patch
if ! "$PATCH_BIN"; then
msg_error "$(translate "Error applying patch. Backups preserved at"): $BACKUP_DIR"
return 1
fi
# Register tool as applied
register_tool "subscription_banner" true
msg_ok "$(translate "Subscription banner removed successfully")"
msg_ok "$(translate "Desktop and Mobile UI patched")"
msg_ok "$(translate "Refresh your browser (Ctrl+Shift+R) to see changes")"
}
# Run if executed directly
if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then
remove_subscription_banner_v3
fi
-124
View File
@@ -1,124 +0,0 @@
#!/bin/bash
# ==========================================================
# Remove Subscription Banner - Proxmox VE 9.x
# ==========================================================
LOCAL_SCRIPTS="/usr/local/share/proxmenux/scripts"
BASE_DIR="/usr/local/share/proxmenux"
UTILS_FILE="$BASE_DIR/utils.sh"
VENV_PATH="/opt/googletrans-env"
TOOLS_JSON="/usr/local/share/proxmenux/installed_tools.json"
if [[ -f "$UTILS_FILE" ]]; then
source "$UTILS_FILE"
fi
load_language
initialize_cache
ensure_tools_json() {
[ -f "$TOOLS_JSON" ] || echo "{}" > "$TOOLS_JSON"
}
register_tool() {
local tool="$1"
local state="$2"
ensure_tools_json
jq --arg t "$tool" --argjson v "$state" '.[$t]=$v' "$TOOLS_JSON" > "$TOOLS_JSON.tmp" && mv "$TOOLS_JSON.tmp" "$TOOLS_JSON"
}
remove_subscription_banner_pve9() {
local JS_FILE="/usr/share/javascript/proxmox-widget-toolkit/proxmoxlib.js"
local MIN_JS_FILE="/usr/share/javascript/proxmox-widget-toolkit/proxmoxlib.min.js"
local GZ_FILE="/usr/share/javascript/proxmox-widget-toolkit/proxmoxlib.js.gz"
local APT_HOOK="/etc/apt/apt.conf.d/no-nag-script"
local pve_version=$(pveversion 2>/dev/null | grep -oP 'pve-manager/\K[0-9]+\.[0-9]+' | head -1)
local pve_major=$(echo "$pve_version" | cut -d. -f1)
if [ "$pve_major" -lt 9 ] 2>/dev/null; then
msg_error "This script is for PVE 9.x only. Detected PVE $pve_version"
return 1
fi
msg_info "Detected Proxmox VE $pve_version - Applying PVE 9.x patches"
if [ ! -f "$JS_FILE" ]; then
msg_error "JavaScript file not found: $JS_FILE"
return 1
fi
local backup_file="${JS_FILE}.backup.pve9.$(date +%Y%m%d_%H%M%S)"
cp "$JS_FILE" "$backup_file"
for f in /etc/apt/apt.conf.d/*nag*; do
[[ -e "$f" ]] && rm -f "$f"
done
[[ -f "$GZ_FILE" ]] && rm -f "$GZ_FILE"
[[ -f "$MIN_JS_FILE" ]] && rm -f "$MIN_JS_FILE"
find /var/cache/pve-manager/ -name "*.js*" -delete 2>/dev/null || true
find /var/lib/pve-manager/ -name "*.js*" -delete 2>/dev/null || true
find /var/cache/nginx/ -type f -delete 2>/dev/null || true
sed -i "s/res\.data\.status\.toLowerCase() !== 'active'/false/g" "$JS_FILE"
sed -i "s/subscriptionActive: ''/subscriptionActive: true/g" "$JS_FILE"
sed -i "s/title: gettext('No valid subscription')/title: gettext('Community Edition')/g" "$JS_FILE"
sed -i "s/You do not have a valid subscription for this server/Community Edition - No subscription required/g" "$JS_FILE"
sed -i "s/Enterprise repository needs valid subscription/Enterprise repository configured/g" "$JS_FILE"
sed -i "s/icon: Ext\.Msg\.WARNING/icon: Ext.Msg.INFO/g" "$JS_FILE"
sed -i "s/subscription = !(/subscription = false \&\& (/g" "$JS_FILE"
if grep -q "res\.data\.status\.toLowerCase() !== 'active'" "$JS_FILE"; then
msg_warn "Some patches may not have applied correctly, retrying..."
sed -i "s/res\.data\.status\.toLowerCase() !== 'active'/false/g" "$JS_FILE"
fi
[[ -f "$APT_HOOK" ]] && rm -f "$APT_HOOK"
cat > "$APT_HOOK" << 'EOF'
DPkg::Post-Invoke {
"test -e /usr/share/javascript/proxmox-widget-toolkit/proxmoxlib.js && sed -i 's/res\\.data\\.status\\.toLowerCase() !== '\''active'\''/false/g' /usr/share/javascript/proxmox-widget-toolkit/proxmoxlib.js || true";
"test -e /usr/share/javascript/proxmox-widget-toolkit/proxmoxlib.js && sed -i 's/subscriptionActive: '\'\'\''/subscriptionActive: true/g' /usr/share/javascript/proxmox-widget-toolkit/proxmoxlib.js || true";
"test -e /usr/share/javascript/proxmox-widget-toolkit/proxmoxlib.js && sed -i 's/title: gettext('\''No valid subscription'\'')/title: gettext('\''Community Edition'\'')/g' /usr/share/javascript/proxmox-widget-toolkit/proxmoxlib.js || true";
"test -e /usr/share/javascript/proxmox-widget-toolkit/proxmoxlib.js && sed -i 's/subscription = !(/subscription = false \\&\\& (/g' /usr/share/javascript/proxmox-widget-toolkit/proxmoxlib.js || true";
"rm -f /usr/share/javascript/proxmox-widget-toolkit/proxmoxlib.min.js /usr/share/javascript/proxmox-widget-toolkit/proxmoxlib.js.gz || true";
};
EOF
chmod 644 "$APT_HOOK"
if ! apt-config dump >/dev/null 2>&1; then
msg_warn "APT hook has syntax issues, removing..."
rm -f "$APT_HOOK"
else
msg_ok "APT hook created successfully"
fi
systemctl reload nginx 2>/dev/null || true
msg_ok "Subscription banner removed successfully for Proxmox VE $pve_version"
msg_ok "Banner removal process completed - refresh your browser to see changes"
register_tool "subscription_banner" true
}
if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then
remove_subscription_banner_pve9
fi
-119
View File
@@ -1,119 +0,0 @@
#!/bin/bash
# ==========================================================
# Remove Subscription Banner - Proxmox VE 9.x ONLY
# ==========================================================
LOCAL_SCRIPTS="/usr/local/share/proxmenux/scripts"
BASE_DIR="/usr/local/share/proxmenux"
UTILS_FILE="$BASE_DIR/utils.sh"
VENV_PATH="/opt/googletrans-env"
TOOLS_JSON="/usr/local/share/proxmenux/installed_tools.json"
if [[ -f "$UTILS_FILE" ]]; then
source "$UTILS_FILE"
fi
load_language
initialize_cache
# Tool registration system
ensure_tools_json() {
[ -f "$TOOLS_JSON" ] || echo "{}" > "$TOOLS_JSON"
}
register_tool() {
local tool="$1"
local state="$2"
ensure_tools_json
jq --arg t "$tool" --argjson v "$state" '.[$t]=$v' "$TOOLS_JSON" > "$TOOLS_JSON.tmp" && mv "$TOOLS_JSON.tmp" "$TOOLS_JSON"
}
remove_subscription_banner_pve9() {
local JS_FILE="/usr/share/javascript/proxmox-widget-toolkit/proxmoxlib.js"
local MIN_JS_FILE="/usr/share/javascript/proxmox-widget-toolkit/proxmoxlib.min.js"
local GZ_FILE="/usr/share/javascript/proxmox-widget-toolkit/proxmoxlib.js.gz"
local APT_HOOK="/etc/apt/apt.conf.d/no-nag-script"
# Verify PVE 9.x
local pve_version=$(pveversion 2>/dev/null | grep -oP 'pve-manager/\K[0-9]+\.[0-9]+' | head -1)
local pve_major=$(echo "$pve_version" | cut -d. -f1)
if [ "$pve_major" -lt 9 ] 2>/dev/null; then
msg_error "This script is for PVE 9.x only. Detected PVE $pve_version"
return 1
fi
msg_info "Detected Proxmox VE $pve_version - Applying PVE 9.x patches"
# Verify that the file exists
if [ ! -f "$JS_FILE" ]; then
msg_error "JavaScript file not found: $JS_FILE"
return 1
fi
# Create backup of original file
local backup_file="${JS_FILE}.backup.pve9.$(date +%Y%m%d_%H%M%S)"
cp "$JS_FILE" "$backup_file"
# Clean any existing problematic APT hooks
for f in /etc/apt/apt.conf.d/*nag*; do
[[ -e "$f" ]] && rm -f "$f"
done
# Main subscription check patches for PVE 9
sed -i "s/res\.data\.status\.toLowerCase() !== 'active'/false/g" "$JS_FILE"
sed -i "s/subscriptionActive: ''/subscriptionActive: true/g" "$JS_FILE"
sed -i "s/title: gettext('No valid subscription')/title: gettext('Community Edition')/g" "$JS_FILE"
# Additional UX improvements for PVE 9
sed -i "s/You do not have a valid subscription for this server/Community Edition - No subscription required/g" "$JS_FILE"
sed -i "s/Enterprise repository needs valid subscription/Enterprise repository configured/g" "$JS_FILE"
sed -i "s/icon: Ext\.Msg\.WARNING/icon: Ext.Msg.INFO/g" "$JS_FILE"
# Additional subscription patterns that may exist in PVE 9
sed -i "s/subscription = !(/subscription = false \&\& (/g" "$JS_FILE"
# Remove compressed/minified files to force regeneration
[[ -f "$GZ_FILE" ]] && rm -f "$GZ_FILE"
[[ -f "$MIN_JS_FILE" ]] && rm -f "$MIN_JS_FILE"
# Clear various caches
find /var/cache/pve-manager/ -name "*.js*" -delete 2>/dev/null || true
find /var/lib/pve-manager/ -name "*.js*" -delete 2>/dev/null || true
# Create PVE 9.x specific APT hook
[[ -f "$APT_HOOK" ]] && rm -f "$APT_HOOK"
cat > "$APT_HOOK" << 'EOF'
DPkg::Post-Invoke {
"test -e /usr/share/javascript/proxmox-widget-toolkit/proxmoxlib.js && sed -i 's/res\\.data\\.status\\.toLowerCase() !== '\''active'\''/false/g' /usr/share/javascript/proxmox-widget-toolkit/proxmoxlib.js || true";
"test -e /usr/share/javascript/proxmox-widget-toolkit/proxmoxlib.js && sed -i 's/subscriptionActive: '\'\'\''/subscriptionActive: true/g' /usr/share/javascript/proxmox-widget-toolkit/proxmoxlib.js || true";
"test -e /usr/share/javascript/proxmox-widget-toolkit/proxmoxlib.js && sed -i 's/title: gettext('\''No valid subscription'\'')/title: gettext('\''Community Edition'\'')/g' /usr/share/javascript/proxmox-widget-toolkit/proxmoxlib.js || true";
"test -e /usr/share/javascript/proxmox-widget-toolkit/proxmoxlib.js && sed -i 's/subscription = !(/subscription = false \\&\\& (/g' /usr/share/javascript/proxmox-widget-toolkit/proxmoxlib.js || true";
"rm -f /usr/share/javascript/proxmox-widget-toolkit/proxmoxlib.min.js /usr/share/javascript/proxmox-widget-toolkit/proxmoxlib.js.gz || true";
};
EOF
chmod 644 "$APT_HOOK"
# Verify APT hook syntax
if ! apt-config dump >/dev/null 2>&1; then
msg_warn "APT hook has syntax issues, removing..."
rm -f "$APT_HOOK"
else
msg_ok "APT hook created successfully"
fi
msg_ok "Subscription banner removed successfully for Proxmox VE $pve_version"
msg_ok "Banner removal process completed"
register_tool "subscription_banner" true
}
# Execute function if called directly
if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then
remove_subscription_banner_pve9
fi
-257
View File
@@ -1,257 +0,0 @@
#!/bin/bash
# ==========================================================
# Remove Subscription Banner - Proxmox VE 9.x (Clean Version)
# ==========================================================
set -euo pipefail
BASE_DIR="/usr/local/share/proxmenux"
UTILS_FILE="$BASE_DIR/utils.sh"
TOOLS_JSON="/usr/local/share/proxmenux/installed_tools.json"
if [[ -f "$UTILS_FILE" ]]; then
source "$UTILS_FILE"
fi
load_language
initialize_cache
ensure_tools_json() {
[ -f "$TOOLS_JSON" ] || echo "{}" > "$TOOLS_JSON"
}
register_tool() {
command -v jq >/dev/null 2>&1 || return 0
local tool="$1" state="$2"
ensure_tools_json
jq --arg t "$tool" --argjson v "$state" '.[$t]=$v' "$TOOLS_JSON" \
> "$TOOLS_JSON.tmp" && mv "$TOOLS_JSON.tmp" "$TOOLS_JSON"
}
JS_FILE="/usr/share/javascript/proxmox-widget-toolkit/proxmoxlib.js"
MIN_JS_FILE="/usr/share/javascript/proxmox-widget-toolkit/proxmoxlib.min.js"
GZ_FILE="/usr/share/javascript/proxmox-widget-toolkit/proxmoxlib.js.gz"
MOBILE_TPL="/usr/share/pve-yew-mobile-gui/index.html.tpl"
APT_HOOK="/etc/apt/apt.conf.d/no-nag-script"
PATCH_BIN="/usr/local/bin/pve-remove-nag.sh"
MARK_JS="PROXMENUX_NAG_REMOVED_v2"
MARK_MOBILE="<!-- PROXMENUX: MOBILE NAG PATCH v2 -->"
verify_js_integrity() {
local file="$1"
[ -f "$file" ] || return 1
[ -s "$file" ] || return 1
grep -Eq 'Ext|function|var|const|let' "$file" || return 1
if LC_ALL=C grep -qP '\x00' "$file" 2>/dev/null; then
return 1
fi
return 0
}
create_backup() {
local file="$1"
local backup_dir="$BASE_DIR/backups"
local timestamp
timestamp=$(date +%Y%m%d_%H%M%S)
local backup_file="$backup_dir/$(basename "$file").backup.$timestamp"
mkdir -p "$backup_dir"
if [ -f "$file" ]; then
cp -a "$file" "$backup_file"
ls -t "$backup_dir"/"$(basename "$file")".backup.* 2>/dev/null | tail -n +6 | xargs -r rm -f 2>/dev/null || true
echo "$backup_file"
fi
}
# ----------------------------------------------------
create_patch_script() {
cat > "$PATCH_BIN" <<'EOF'
#!/usr/bin/env bash
set -euo pipefail
JS_FILE="/usr/share/javascript/proxmox-widget-toolkit/proxmoxlib.js"
MIN_JS_FILE="/usr/share/javascript/proxmox-widget-toolkit/proxmoxlib.min.js"
GZ_FILE="/usr/share/javascript/proxmox-widget-toolkit/proxmoxlib.js.gz"
MOBILE_TPL="/usr/share/pve-yew-mobile-gui/index.html.tpl"
MARK_JS="PROXMENUX_NAG_REMOVED_v2"
MARK_MOBILE="<!-- PROXMENUX: MOBILE NAG PATCH v2 -->"
BASE_DIR="/usr/local/share/proxmenux"
verify_js_integrity() {
local file="$1"
[ -f "$file" ] && [ -s "$file" ] && grep -Eq 'Ext|function' "$file" && ! LC_ALL=C grep -qP '\x00' "$file" 2>/dev/null
}
patch_web() {
[ -f "$JS_FILE" ] || return 0
grep -q "$MARK_JS" "$JS_FILE" && return 0
local backup_dir="$BASE_DIR/backups"
mkdir -p "$backup_dir"
local backup="$backup_dir/$(basename "$JS_FILE").backup.$(date +%Y%m%d_%H%M%S)"
cp -a "$JS_FILE" "$backup"
trap "cp -a '$backup' '$JS_FILE' 2>/dev/null || true" ERR
sed -i '1s|^|/* '"$MARK_JS"' */\n|' "$JS_FILE"
local patterns_found=0
if grep -q "res\.data\.status\.toLowerCase() !== 'active'" "$JS_FILE"; then
sed -i "s/res\.data\.status\.toLowerCase() !== 'active'/false/g" "$JS_FILE"
patterns_found=$((patterns_found + 1))
fi
if grep -q "subscriptionActive: ''" "$JS_FILE"; then
sed -i "s/subscriptionActive: ''/subscriptionActive: true/g" "$JS_FILE"
patterns_found=$((patterns_found + 1))
fi
if grep -q "title: gettext('No valid subscription')" "$JS_FILE"; then
sed -i "s/title: gettext('No valid subscription')/title: gettext('Community Edition')/g" "$JS_FILE"
patterns_found=$((patterns_found + 1))
fi
if grep -q "icon: Ext\.Msg\.WARNING" "$JS_FILE"; then
sed -i "s/icon: Ext\.Msg\.WARNING/icon: Ext.Msg.INFO/g" "$JS_FILE"
patterns_found=$((patterns_found + 1))
fi
if grep -q "subscription = !(" "$JS_FILE"; then
sed -i "s/subscription = !(/subscription = false \&\& (/g" "$JS_FILE"
patterns_found=$((patterns_found + 1))
fi
# Si nada coincidió (cambio upstream), restaura y sal limpio
if [ "${patterns_found:-0}" -eq 0 ]; then
cp -a "$backup" "$JS_FILE"
return 0
fi
# Verificación final
if ! verify_js_integrity "$JS_FILE"; then
cp -a "$backup" "$JS_FILE"
return 1
fi
# Limpiar artefactos/cachés
rm -f "$MIN_JS_FILE" "$GZ_FILE" 2>/dev/null || true
find /var/cache/pve-manager/ -name "*.js*" -delete 2>/dev/null || true
find /var/lib/pve-manager/ -name "*.js*" -delete 2>/dev/null || true
find /var/cache/nginx/ -type f -delete 2>/dev/null || true
trap - ERR
}
patch_mobile() {
[ -f "$MOBILE_TPL" ] || return 0
grep -q "$MARK_MOBILE" "$MOBILE_TPL" && return 0
local backup_dir="$BASE_DIR/backups"
mkdir -p "$backup_dir"
cp -a "$MOBILE_TPL" "$backup_dir/$(basename "$MOBILE_TPL").backup.$(date +%Y%m%d_%H%M%S)"
cat >> "$MOBILE_TPL" <<EOM
$MARK_MOBILE
<script>
(function() {
'use strict';
function removeSubscriptionElements() {
try {
const dialogs = document.querySelectorAll('dialog.pwt-outer-dialog');
dialogs.forEach(d => {
const text = (d.textContent || '').toLowerCase();
if (text.includes('subscription') || text.includes('no valid')) { d.remove(); }
});
const cards = document.querySelectorAll('.pwt-card.pwt-p-2.pwt-d-flex.pwt-interactive.pwt-justify-content-center');
cards.forEach(c => {
const text = (c.textContent || '').toLowerCase();
const hasButton = c.querySelector('button');
if (!hasButton && (text.includes('subscription') || text.includes('no valid'))) { c.remove(); }
});
const alerts = document.querySelectorAll('[class*="alert"], [class*="warning"], [class*="notice"]');
alerts.forEach(a => {
const text = (a.textContent || '').toLowerCase();
if (text.includes('subscription') || text.includes('no valid')) { a.remove(); }
});
} catch (e) { console.warn('Error removing subscription elements:', e); }
}
if (document.readyState === 'loading') { document.addEventListener('DOMContentLoaded', removeSubscriptionElements); }
else { removeSubscriptionElements(); }
const observer = new MutationObserver(removeSubscriptionElements);
if (document.body) {
observer.observe(document.body, { childList: true, subtree: true });
const interval = setInterval(removeSubscriptionElements, 500);
setTimeout(() => { try { observer.disconnect(); clearInterval(interval); } catch(e){} }, 30000);
}
})();
</script>
EOM
}
reload_services() {
systemctl is-active --quiet pveproxy 2>/dev/null && {
systemctl reload pveproxy 2>/dev/null || systemctl restart pveproxy 2>/dev/null || true
}
systemctl is-active --quiet nginx 2>/dev/null && {
systemctl reload nginx 2>/dev/null || true
}
systemctl is-active --quiet pvedaemon 2>/dev/null && {
systemctl reload pvedaemon 2>/dev/null || true
}
find /var/cache/pve-manager/ -type f -delete 2>/dev/null || true
find /var/lib/pve-manager/ -type f -delete 2>/dev/null || true
}
main() {
patch_web || return 1
patch_mobile
reload_services
}
main
EOF
chmod 755 "$PATCH_BIN"
}
# ----------------------------------------------------
create_apt_hook() {
cat > "$APT_HOOK" <<'EOF'
/* ProxMenux: reapply nag patch after upgrades */
DPkg::Post-Invoke { "/usr/local/bin/pve-remove-nag.sh || true"; };
EOF
chmod 644 "$APT_HOOK"
apt-config dump >/dev/null 2>&1 || { msg_warn "APT hook syntax issue"; rm -f "$APT_HOOK"; }
}
remove_subscription_banner_pve9() {
local pve_version
pve_version=$(pveversion 2>/dev/null | grep -oP 'pve-manager/\K[0-9]+\.[0-9]+' | head -1 || true)
local pve_major="${pve_version%%.*}"
msg_info "$(translate "Detected Proxmox VE ${pve_version:-9.x} removing subscription banner")"
create_patch_script
create_apt_hook
if ! "$PATCH_BIN"; then
msg_error "$(translate "Error applying patches")"
return 1
fi
register_tool "subscription_banner" true
msg_ok "$(translate "Subscription banner removed successfully.")"
msg_ok "$(translate "Refresh your browser to see changes.")"
}
if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then
remove_subscription_banner_pve9
fi
+111 -13
View File
@@ -1,11 +1,98 @@
#!/usr/bin/env bash
# ==========================================================
# ProxMenux - Global Share Functions (reusable)
# File: scripts/global/share_common.func
# ==========================================================
#!/bin/bash
# ProxMenux - Shared Common Functions
# ============================================
# Author : MacRimi
# License : MIT
# Version : 1.0
# Last Updated: 29/01/2026
# ============================================
# Common functions shared across multiple scripts
# ==========================================================
# Ensure repositories are properly configured
# ==========================================================
ensure_repositories() {
local pve_version need_update=false
pve_version=$(pveversion 2>/dev/null | grep -oP 'pve-manager/\K[0-9]+' | head -1)
if [[ -z "$pve_version" ]]; then
msg_error "$(translate 'Unable to detect Proxmox version.')"
return 1
fi
if (( pve_version >= 9 )); then
# ===== PVE 9 (Debian 13 - trixie) =====
# proxmox.sources (no-subscription) - create if missing
if [[ ! -f /etc/apt/sources.list.d/proxmox.sources ]]; then
cat > /etc/apt/sources.list.d/proxmox.sources <<'EOF'
Enabled: true
Types: deb
URIs: http://download.proxmox.com/debian/pve
Suites: trixie
Components: pve-no-subscription
Signed-By: /usr/share/keyrings/proxmox-archive-keyring.gpg
EOF
need_update=true
fi
# debian.sources - create if missing
if [[ ! -f /etc/apt/sources.list.d/debian.sources ]]; then
cat > /etc/apt/sources.list.d/debian.sources <<'EOF'
Types: deb
URIs: http://deb.debian.org/debian/
Suites: trixie trixie-updates
Components: main contrib non-free-firmware
Signed-By: /usr/share/keyrings/debian-archive-keyring.gpg
Types: deb
URIs: http://security.debian.org/debian-security/
Suites: trixie-security
Components: main contrib non-free-firmware
Signed-By: /usr/share/keyrings/debian-archive-keyring.gpg
EOF
need_update=true
fi
else
# ===== PVE 8 (Debian 12 - bookworm) =====
local sources_file="/etc/apt/sources.list"
# Debian base (create or append minimal lines if missing)
if ! grep -qE 'deb .* bookworm .* main' "$sources_file" 2>/dev/null; then
{
echo "deb http://deb.debian.org/debian bookworm main contrib non-free non-free-firmware"
echo "deb http://deb.debian.org/debian bookworm-updates main contrib non-free non-free-firmware"
echo "deb http://security.debian.org/debian-security bookworm-security main contrib non-free non-free-firmware"
} >> "$sources_file"
need_update=true
fi
# Proxmox no-subscription list (classic) if missing
if [[ ! -f /etc/apt/sources.list.d/pve-no-subscription.list ]]; then
echo "deb http://download.proxmox.com/debian/pve bookworm pve-no-subscription" \
> /etc/apt/sources.list.d/pve-no-subscription.list
need_update=true
fi
fi
# apt-get update only if needed or lists are empty
if [[ "$need_update" == true ]] || [[ ! -d /var/lib/apt/lists || -z "$(ls -A /var/lib/apt/lists 2>/dev/null)" ]]; then
msg_info "$(translate 'Updating APT package lists...')"
apt-get update >/dev/null 2>&1 || apt-get update
msg_ok "$(translate 'APT package lists updated')"
fi
return 0
}
# ==========================================================
if [[ -n "${__PROXMENUX_SHARE_COMMON__}" ]]; then
return 0
fi
@@ -41,7 +128,7 @@ pmx_share_map_set() {
# ==========================================================
@@ -120,7 +207,7 @@ pmx_choose_or_create_group() {
# ==========================================================
@@ -179,7 +266,7 @@ pmx_ensure_host_group() {
# ==========================================================
@@ -214,7 +301,7 @@ pmx_prepare_host_shared_dir() {
# ==========================================================
@@ -257,7 +344,7 @@ pmx_select_host_mount_point() {
# ==========================================================
@@ -311,7 +398,7 @@ select_host_directory_() {
# ==========================================================
@@ -366,7 +453,7 @@ select_host_directory__() {
# ==========================================================
@@ -421,6 +508,9 @@ select_host_directory() {
# ==========================================================
select_lxc_container() {
@@ -455,6 +545,14 @@ select_lxc_container() {
# ==========================================================
select_container_mount_point_() {
local ctid="$1"
local host_dir="$2"
@@ -500,7 +598,7 @@ select_container_mount_point_() {
# ==========================================================
-339
View File
@@ -1,339 +0,0 @@
#!/bin/bash
# ==========================================================
# Proxmox VE Update Script
# ==========================================================
# Configuration
LOCAL_SCRIPTS="/usr/local/share/proxmenux/scripts"
BASE_DIR="/usr/local/share/proxmenux"
UTILS_FILE="$BASE_DIR/utils.sh"
VENV_PATH="/opt/googletrans-env"
TOOLS_JSON="/usr/local/share/proxmenux/installed_tools.json"
if [[ -f "$UTILS_FILE" ]]; then
source "$UTILS_FILE"
fi
load_language
initialize_cache
ensure_tools_json() {
[ -f "$TOOLS_JSON" ] || echo "{}" > "$TOOLS_JSON"
}
register_tool() {
local tool="$1"
local state="$2"
ensure_tools_json
jq --arg t "$tool" --argjson v "$state" '.[$t]=$v' "$TOOLS_JSON" > "$TOOLS_JSON.tmp" && mv "$TOOLS_JSON.tmp" "$TOOLS_JSON"
}
download_common_functions() {
if ! source "$LOCAL_SCRIPTS/global/common-functions.sh"; then
return 1
fi
}
update_pve9() {
local pve_version=$(pveversion | awk -F'/' '{print $2}' | cut -d'-' -f1)
local start_time=$(date +%s)
local log_file="/var/log/proxmox-update-$(date +%Y%m%d-%H%M%S).log"
local changes_made=false
local OS_CODENAME="$(grep "VERSION_CODENAME=" /etc/os-release | cut -d"=" -f 2 | xargs)"
local TARGET_CODENAME="trixie"
if [ -z "$OS_CODENAME" ]; then
OS_CODENAME=$(lsb_release -cs 2>/dev/null || echo "trixie")
fi
download_common_functions
msg_info2 "$(translate "Detected: Proxmox VE $pve_version (Current: $OS_CODENAME, Target: $TARGET_CODENAME)")"
echo -e
local available_space=$(df /var/cache/apt/archives | awk 'NR==2 {print int($4/1024)}')
if [ "$available_space" -lt 1024 ]; then
msg_error "$(translate "Insufficient disk space. Available: ${available_space}MB")"
echo -e
msg_success "$(translate "Press Enter to return to menu...")"
read -r
return 1
fi
if ! ping -c 1 download.proxmox.com >/dev/null 2>&1; then
msg_error "$(translate "Cannot reach Proxmox repositories")"
echo -e
msg_success "$(translate "Press Enter to return to menu...")"
read -r
return 1
fi
disable_sources_repo() {
local file="$1"
if [[ -f "$file" ]]; then
sed -i ':a;/^\n*$/{$d;N;ba}' "$file"
if grep -q "^Enabled:" "$file"; then
sed -i 's/^Enabled:.*$/Enabled: false/' "$file"
else
echo "Enabled: false" >> "$file"
fi
if ! grep -q "^Types: " "$file"; then
msg_warn "$(translate "Malformed .sources file detected, removing: $(basename "$file")")"
rm -f "$file"
fi
return 0
fi
return 1
}
if disable_sources_repo "/etc/apt/sources.list.d/pve-enterprise.sources"; then
msg_ok "$(translate "Enterprise Proxmox repository disabled")"
changes_made=true
fi
if disable_sources_repo "/etc/apt/sources.list.d/ceph.sources"; then
msg_ok "$(translate "Enterprise Proxmox Ceph repository disabled")"
changes_made=true
fi
for legacy_file in /etc/apt/sources.list.d/pve-public-repo.list \
/etc/apt/sources.list.d/pve-install-repo.list \
/etc/apt/sources.list.d/debian.list; do
if [[ -f "$legacy_file" ]]; then
rm -f "$legacy_file"
msg_ok "$(translate "Removed legacy repository: $(basename "$legacy_file")")"
fi
done
if [[ -f /etc/apt/sources.list.d/debian.sources ]]; then
rm -f /etc/apt/sources.list.d/debian.sources
msg_ok "$(translate "Old debian.sources file removed to prevent duplication")"
fi
msg_info "$(translate "Creating Proxmox VE 9.x no-subscription repository...")"
cat > /etc/apt/sources.list.d/proxmox.sources << EOF
Enabled: true
Types: deb
URIs: http://download.proxmox.com/debian/pve
Suites: ${TARGET_CODENAME}
Components: pve-no-subscription
Signed-By: /usr/share/keyrings/proxmox-archive-keyring.gpg
EOF
msg_ok "$(translate "Proxmox VE 9.x no-subscription repository created")"
changes_made=true
msg_info "$(translate "Creating Debian ${TARGET_CODENAME} sources file...")"
cat > /etc/apt/sources.list.d/debian.sources << EOF
Types: deb
URIs: http://deb.debian.org/debian/
Suites: ${TARGET_CODENAME} ${TARGET_CODENAME}-updates
Components: main contrib non-free non-free-firmware
Signed-By: /usr/share/keyrings/debian-archive-keyring.gpg
Types: deb
URIs: http://security.debian.org/debian-security/
Suites: ${TARGET_CODENAME}-security
Components: main contrib non-free non-free-firmware
Signed-By: /usr/share/keyrings/debian-archive-keyring.gpg
EOF
msg_ok "$(translate "Debian repositories configured for $TARGET_CODENAME")"
local firmware_conf="/etc/apt/apt.conf.d/no-firmware-warnings.conf"
if [ ! -f "$firmware_conf" ]; then
msg_info "$(translate "Disabling non-free firmware warnings...")"
echo 'APT::Get::Update::SourceListWarnings::NonFreeFirmware "false";' > "$firmware_conf"
msg_ok "$(translate "Non-free firmware warnings disabled")"
fi
update_output=$(apt-get update 2>&1)
update_exit_code=$?
if [ $update_exit_code -eq 0 ]; then
msg_ok "$(translate "Package lists updated successfully")"
else
if echo "$update_output" | grep -q "NO_PUBKEY\|GPG error"; then
msg_info "$(translate "Fixing GPG key issues...")"
apt-key adv --keyserver keyserver.ubuntu.com --recv-keys $(echo "$update_output" | grep "NO_PUBKEY" | sed 's/.*NO_PUBKEY //' | head -1) 2>/dev/null
if apt-get update > "$log_file" 2>&1; then
msg_ok "$(translate "Package lists updated after GPG fix")"
else
msg_error "$(translate "Failed to update package lists. Check log: $log_file")"
return 1
fi
elif echo "$update_output" | grep -q "404\|Failed to fetch"; then
msg_warn "$(translate "Some repositories are not available, continuing with available ones...")"
else
msg_error "$(translate "Failed to update package lists. Check log: $log_file")"
echo "Error details: $update_output"
return 1
fi
fi
if apt policy 2>/dev/null | grep -q "${TARGET_CODENAME}.*pve-no-subscription"; then
msg_ok "$(translate "Proxmox VE 9.x repositories verified")"
else
msg_warn "$(translate "Proxmox VE 9.x repositories verification inconclusive, continuing...")"
fi
local current_pve_version=$(pveversion 2>/dev/null | grep -oP 'pve-manager/\K[0-9]+\.[0-9]+\.[0-9]+' | head -1)
local available_pve_version=$(apt-cache policy pve-manager 2>/dev/null | grep -oP 'Candidate: \K[0-9]+\.[0-9]+\.[0-9]+' | head -1)
local upgradable=$(apt list --upgradable 2>/dev/null | grep -c "upgradable")
local security_updates=$(apt list --upgradable 2>/dev/null | grep -c "security")
show_update_menu() {
local current_version="$1"
local target_version="$2"
local upgradable_count="$3"
local security_count="$4"
local menu_text="$(translate "System Update Information")\n\n"
menu_text+="$(translate "Current PVE Version"): $current_version\n"
if [ -n "$target_version" ] && [ "$target_version" != "$current_version" ]; then
menu_text+="$(translate "Available PVE Version"): $target_version\n"
fi
menu_text+="\n$(translate "Package Updates Available"): $upgradable_count\n"
menu_text+="$(translate "Security Updates"): $security_count\n\n"
if [ "$upgradable_count" -eq 0 ]; then
menu_text+="$(translate "System is already up to date")"
whiptail --title "$(translate "Update Status")" --msgbox "$menu_text" 15 70
return 2
else
menu_text+="$(translate "Do you want to proceed with the system update?")"
if whiptail --title "$(translate "Proxmox Update")" --yesno "$menu_text" 18 70; then
return 0
else
return 1
fi
fi
}
show_update_menu "$current_pve_version" "$available_pve_version" "$upgradable" "$security_updates"
MENU_RESULT=$?
if [[ $MENU_RESULT -eq 1 ]]; then
msg_info2 "$(translate "Update cancelled by user")"
apt-get -y autoremove > /dev/null 2>&1 || true
apt-get -y autoclean > /dev/null 2>&1 || true
return 0
elif [[ $MENU_RESULT -eq 2 ]]; then
msg_ok "$(translate "System is already up to date. No update needed.")"
apt-get -y autoremove > /dev/null 2>&1 || true
apt-get -y autoclean > /dev/null 2>&1 || true
return 0
fi
msg_info "$(translate "Cleaning up unused time synchronization services...")"
if /usr/bin/env DEBIAN_FRONTEND=noninteractive apt-get -y -o Dpkg::Options::='--force-confdef' purge ntp openntpd systemd-timesyncd > /dev/null 2>&1; then
msg_ok "$(translate "Old time services removed successfully")"
else
msg_warn "$(translate "Some old time services could not be removed (not installed)")"
fi
msg_info "$(translate "Updating packages...")"
apt-get install pv -y > /dev/null 2>&1
msg_ok "$(translate "Packages updated successfully")"
tput sc
DEBIAN_FRONTEND=noninteractive apt-get -y \
-o Dpkg::Options::='--force-confdef' \
-o Dpkg::Options::='--force-confold' \
dist-upgrade 2>&1 | while IFS= read -r line; do
echo "$line" >> "$log_file"
if [[ "$line" =~ \[[#=\-]+\]\ *[0-9]{1,3}% ]]; then
continue
fi
if [[ "$line" =~ ^(Setting\ up|Unpacking|Preparing\ to\ unpack|Processing\ triggers\ for) ]]; then
package_name=$(echo "$line" | sed -E 's/.*(Setting up|Unpacking|Preparing to unpack|Processing triggers for) ([^ :]+).*/\2/')
[ -z "$package_name" ] && package_name="$(translate "Unknown")"
row=$(( $(tput lines) - 6 ))
tput cup $row 0; printf "%s\n" "$(translate "Installing packages...")"
tput cup $((row + 1)) 0; printf "%s\n" "──────────────────────────────────────────────"
tput cup $((row + 2)) 0; printf "%s %s\n" "$(translate "Package:")" "$package_name"
tput cup $((row + 3)) 0; printf "%s\n" "Progress: [ ] 0%"
tput cup $((row + 4)) 0; printf "%s\n" "──────────────────────────────────────────────"
for i in $(seq 1 10); do
sleep 0.1
progress=$((i * 10))
tput cup $((row + 3)) 9
printf "[%-50s] %3d%%" "$(printf "#%.0s" $(seq 1 $((progress/2))))" "$progress"
done
fi
done
tput rc
tput ed
upgrade_exit_code=${PIPESTATUS[0]}
if [ $upgrade_exit_code -eq 0 ]; then
msg_ok "$(translate "System upgrade completed successfully")"
else
msg_error "$(translate "System upgrade failed. Check log: $log_file")"
return 1
fi
msg_info "$(translate "Installing essential Proxmox packages...")"
local additional_packages="zfsutils-linux proxmox-backup-restore-image chrony"
if /usr/bin/env DEBIAN_FRONTEND=noninteractive apt-get -y -o Dpkg::Options::='--force-confdef' install $additional_packages >> "$log_file" 2>&1; then
msg_ok "$(translate "Essential Proxmox packages installed")"
else
msg_warn "$(translate "Some essential Proxmox packages may not have been installed")"
fi
lvm_repair_check
cleanup_duplicate_repos
#msg_info "$(translate "Performing system cleanup...")"
apt-get -y autoremove > /dev/null 2>&1 || true
apt-get -y autoclean > /dev/null 2>&1 || true
msg_ok "$(translate "Cleanup finished")"
local end_time=$(date +%s)
local duration=$((end_time - start_time))
local minutes=$((duration / 60))
local seconds=$((duration % 60))
echo -e "${TAB}${BGN}$(translate "====== PVE UPDATE COMPLETED ======")${CL}"
echo -e "${TAB}${GN}⏱️ $(translate "Duration")${CL}: ${BL}${minutes}m ${seconds}s${CL}"
echo -e "${TAB}${GN}📄 $(translate "Log file")${CL}: ${BL}$log_file${CL}"
echo -e "${TAB}${GN}📦 $(translate "Packages upgraded")${CL}: ${BL}$upgradable${CL}"
echo -e "${TAB}${GN}🖥️ $(translate "Proxmox VE")${CL}: ${BL}$target_version (Debian $OS_CODENAME)${CL}"
msg_ok "$(translate "Proxmox VE 9.x configuration completed.")"
}
if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then
update_pve9
fi

Some files were not shown because too many files have changed in this diff Show More