diff --git a/scripts/gpu_tpu/add_gpu_lxc.sh b/scripts/gpu_tpu/add_gpu_lxc.sh index e3b397e8..9a8d21cb 100644 --- a/scripts/gpu_tpu/add_gpu_lxc.sh +++ b/scripts/gpu_tpu/add_gpu_lxc.sh @@ -799,6 +799,9 @@ _remove_gpu_blacklist() { sed -i '/^blacklist nouveau$/d' "$blacklist_file" sed -i '/^blacklist nvidia$/d' "$blacklist_file" sed -i '/^blacklist nvidiafb$/d' "$blacklist_file" + sed -i '/^blacklist nvidia_drm$/d' "$blacklist_file" + sed -i '/^blacklist nvidia_modeset$/d' "$blacklist_file" + sed -i '/^blacklist nvidia_uvm$/d' "$blacklist_file" sed -i '/^blacklist lbm-nouveau$/d' "$blacklist_file" sed -i '/^options nouveau modeset=0$/d' "$blacklist_file" ;; diff --git a/scripts/gpu_tpu/add_gpu_vm.sh b/scripts/gpu_tpu/add_gpu_vm.sh index 2ea54d77..b17e8255 100644 --- a/scripts/gpu_tpu/add_gpu_vm.sh +++ b/scripts/gpu_tpu/add_gpu_vm.sh @@ -259,9 +259,20 @@ evaluate_host_reboot_requirement() { nvidia) _file_has_exact_line "blacklist nouveau" "$blacklist_file" || needs_change=true _file_has_exact_line "blacklist nvidia" "$blacklist_file" || needs_change=true + _file_has_exact_line "blacklist nvidia_drm" "$blacklist_file" || needs_change=true + _file_has_exact_line "blacklist nvidia_modeset" "$blacklist_file" || needs_change=true + _file_has_exact_line "blacklist nvidia_uvm" "$blacklist_file" || needs_change=true _file_has_exact_line "blacklist nvidiafb" "$blacklist_file" || needs_change=true _file_has_exact_line "blacklist lbm-nouveau" "$blacklist_file" || needs_change=true _file_has_exact_line "options nouveau modeset=0" "$blacklist_file" || needs_change=true + [[ -f /etc/modules-load.d/nvidia-vfio.conf ]] && needs_change=true + grep -qE '^(nvidia|nvidia_uvm|nvidia_drm|nvidia_modeset)$' /etc/modules 2>/dev/null && needs_change=true + local svc + for svc in nvidia-persistenced.service nvidia-persistenced nvidia-powerd.service nvidia-fabricmanager.service; do + if systemctl is-active --quiet "$svc" 2>/dev/null || systemctl is-enabled --quiet "$svc" 2>/dev/null; then + needs_change=true + fi + done ;; amd) _file_has_exact_line "blacklist radeon" "$blacklist_file" || needs_change=true @@ -1381,6 +1392,9 @@ blacklist_gpu_drivers() { nvidia) _add_line_if_missing "blacklist nouveau" "$blacklist_file" _add_line_if_missing "blacklist nvidia" "$blacklist_file" + _add_line_if_missing "blacklist nvidia_drm" "$blacklist_file" + _add_line_if_missing "blacklist nvidia_modeset" "$blacklist_file" + _add_line_if_missing "blacklist nvidia_uvm" "$blacklist_file" _add_line_if_missing "blacklist nvidiafb" "$blacklist_file" _add_line_if_missing "blacklist lbm-nouveau" "$blacklist_file" _add_line_if_missing "options nouveau modeset=0" "$blacklist_file" @@ -1396,6 +1410,63 @@ blacklist_gpu_drivers() { msg_ok "$(translate 'GPU host driver blacklisted in /etc/modprobe.d/blacklist.conf')" | tee -a "$screen_capture" } +sanitize_nvidia_host_stack_for_vfio() { + msg_info "$(translate 'Sanitizing NVIDIA host services for VFIO mode...')" + local changed=false + local state_dir="/var/lib/proxmenux" + local state_file="${state_dir}/nvidia-host-services.state" + local svc + local -a services=( + "nvidia-persistenced.service" + "nvidia-powerd.service" + "nvidia-fabricmanager.service" + ) + + mkdir -p "$state_dir" >/dev/null 2>&1 || true + : > "$state_file" + + for svc in "${services[@]}"; do + local was_enabled=0 was_active=0 + if systemctl is-enabled --quiet "$svc" 2>/dev/null; then + was_enabled=1 + fi + if systemctl is-active --quiet "$svc" 2>/dev/null; then + was_active=1 + fi + if (( was_enabled == 1 || was_active == 1 )); then + echo "${svc} enabled=${was_enabled} active=${was_active}" >>"$state_file" + fi + + if systemctl is-active --quiet "$svc" 2>/dev/null; then + systemctl stop "$svc" >>"$LOG_FILE" 2>&1 || true + changed=true + fi + if systemctl is-enabled --quiet "$svc" 2>/dev/null; then + systemctl disable "$svc" >>"$LOG_FILE" 2>&1 || true + changed=true + fi + done + + [[ -s "$state_file" ]] || rm -f "$state_file" + + if [[ -f /etc/modules-load.d/nvidia-vfio.conf ]]; then + mv /etc/modules-load.d/nvidia-vfio.conf /etc/modules-load.d/nvidia-vfio.conf.proxmenux-disabled-vfio >>"$LOG_FILE" 2>&1 || true + changed=true + fi + + if grep -qE '^(nvidia|nvidia_uvm|nvidia_drm|nvidia_modeset)$' /etc/modules 2>/dev/null; then + sed -i '/^nvidia$/d;/^nvidia_uvm$/d;/^nvidia_drm$/d;/^nvidia_modeset$/d' /etc/modules + changed=true + fi + + if $changed; then + HOST_CONFIG_CHANGED=true + msg_ok "$(translate 'NVIDIA host services/autoload disabled for VFIO mode')" | tee -a "$screen_capture" + else + msg_ok "$(translate 'NVIDIA host services/autoload already aligned for VFIO mode')" | tee -a "$screen_capture" + fi +} + # ── AMD ROM dump: sysfs first, VFCT ACPI table as fallback ─────────────── _dump_rom_via_vfct() { @@ -1726,6 +1797,7 @@ main() { blacklist_gpu_drivers [[ "$SELECTED_GPU" == "amd" ]] && dump_amd_rom fi + [[ "$SELECTED_GPU" == "nvidia" ]] && sanitize_nvidia_host_stack_for_vfio cleanup_lxc_configs cleanup_vm_config ensure_vm_display_std @@ -1748,7 +1820,11 @@ main() { fi if [[ "$WIZARD_CALL" == "true" ]]; then - _set_wizard_result "applied" + if [[ "$HOST_CONFIG_CHANGED" == "true" ]]; then + _set_wizard_result "applied_reboot_required" + else + _set_wizard_result "applied" + fi rm -f "$screen_capture" return 0 fi diff --git a/scripts/gpu_tpu/switch_gpu_mode.sh b/scripts/gpu_tpu/switch_gpu_mode.sh index 02a0f7b5..dc6f808a 100755 --- a/scripts/gpu_tpu/switch_gpu_mode.sh +++ b/scripts/gpu_tpu/switch_gpu_mode.sh @@ -183,10 +183,13 @@ _remove_gpu_blacklist() { local changed=false case "$gpu_type" in nvidia) - grep -qE '^blacklist (nouveau|nvidia|nvidiafb|lbm-nouveau)$|^options nouveau modeset=0$' "$blacklist_file" 2>/dev/null && changed=true + grep -qE '^blacklist (nouveau|nvidia|nvidiafb|nvidia_drm|nvidia_modeset|nvidia_uvm|lbm-nouveau)$|^options nouveau modeset=0$' "$blacklist_file" 2>/dev/null && changed=true sed -i '/^blacklist nouveau$/d' "$blacklist_file" sed -i '/^blacklist nvidia$/d' "$blacklist_file" sed -i '/^blacklist nvidiafb$/d' "$blacklist_file" + sed -i '/^blacklist nvidia_drm$/d' "$blacklist_file" + sed -i '/^blacklist nvidia_modeset$/d' "$blacklist_file" + sed -i '/^blacklist nvidia_uvm$/d' "$blacklist_file" sed -i '/^blacklist lbm-nouveau$/d' "$blacklist_file" sed -i '/^options nouveau modeset=0$/d' "$blacklist_file" ;; @@ -213,6 +216,9 @@ _add_gpu_blacklist() { _add_line_if_missing "blacklist nouveau" "$blacklist_file" _add_line_if_missing "blacklist nvidia" "$blacklist_file" _add_line_if_missing "blacklist nvidiafb" "$blacklist_file" + _add_line_if_missing "blacklist nvidia_drm" "$blacklist_file" + _add_line_if_missing "blacklist nvidia_modeset" "$blacklist_file" + _add_line_if_missing "blacklist nvidia_uvm" "$blacklist_file" _add_line_if_missing "blacklist lbm-nouveau" "$blacklist_file" _add_line_if_missing "options nouveau modeset=0" "$blacklist_file" ;; @@ -226,6 +232,103 @@ _add_gpu_blacklist() { esac } +_sanitize_nvidia_host_stack_for_vfio() { + local changed=false + local state_dir="/var/lib/proxmenux" + local state_file="${state_dir}/nvidia-host-services.state" + local svc + local -a services=( + "nvidia-persistenced.service" + "nvidia-powerd.service" + "nvidia-fabricmanager.service" + ) + + mkdir -p "$state_dir" >/dev/null 2>&1 || true + : > "$state_file" + + for svc in "${services[@]}"; do + local was_enabled=0 was_active=0 + if systemctl is-enabled --quiet "$svc" 2>/dev/null; then + was_enabled=1 + fi + if systemctl is-active --quiet "$svc" 2>/dev/null; then + was_active=1 + fi + if (( was_enabled == 1 || was_active == 1 )); then + echo "${svc} enabled=${was_enabled} active=${was_active}" >>"$state_file" + fi + + if systemctl is-active --quiet "$svc" 2>/dev/null; then + systemctl stop "$svc" >>"$LOG_FILE" 2>&1 || true + changed=true + fi + if systemctl is-enabled --quiet "$svc" 2>/dev/null; then + systemctl disable "$svc" >>"$LOG_FILE" 2>&1 || true + changed=true + fi + done + + [[ -s "$state_file" ]] || rm -f "$state_file" + + if [[ -f /etc/modules-load.d/nvidia-vfio.conf ]]; then + mv /etc/modules-load.d/nvidia-vfio.conf /etc/modules-load.d/nvidia-vfio.conf.proxmenux-disabled-vfio >>"$LOG_FILE" 2>&1 || true + changed=true + fi + + if grep -qE '^(nvidia|nvidia_uvm|nvidia_drm|nvidia_modeset)$' /etc/modules 2>/dev/null; then + sed -i '/^nvidia$/d;/^nvidia_uvm$/d;/^nvidia_drm$/d;/^nvidia_modeset$/d' /etc/modules + changed=true + fi + + if $changed; then + HOST_CONFIG_CHANGED=true + msg_ok "$(translate 'NVIDIA host services/autoload disabled for VFIO mode')" | tee -a "$screen_capture" + else + msg_ok "$(translate 'NVIDIA host services/autoload already aligned for VFIO mode')" | tee -a "$screen_capture" + fi +} + +_restore_nvidia_host_stack_for_lxc() { + local changed=false + local state_file="/var/lib/proxmenux/nvidia-host-services.state" + local disabled_file="/etc/modules-load.d/nvidia-vfio.conf.proxmenux-disabled-vfio" + local active_file="/etc/modules-load.d/nvidia-vfio.conf" + + # Restore previous modules-load policy if ProxMenux disabled it in VM mode. + if [[ -f "$disabled_file" ]]; then + mv "$disabled_file" "$active_file" >>"$LOG_FILE" 2>&1 || true + changed=true + fi + + # Best effort: load NVIDIA kernel modules now that we are back in native mode. + # If not installed, these calls simply fail silently. + modprobe nvidia >/dev/null 2>&1 || true + modprobe nvidia_uvm >/dev/null 2>&1 || true + modprobe nvidia_modeset >/dev/null 2>&1 || true + modprobe nvidia_drm >/dev/null 2>&1 || true + + if [[ -f "$state_file" ]]; then + while IFS= read -r line; do + [[ -z "$line" ]] && continue + local svc enabled active + svc=$(echo "$line" | awk '{print $1}') + enabled=$(echo "$line" | awk -F'enabled=' '{print $2}' | awk '{print $1}') + active=$(echo "$line" | awk -F'active=' '{print $2}' | awk '{print $1}') + [[ "$enabled" == "1" ]] && systemctl enable "$svc" >>"$LOG_FILE" 2>&1 || true + [[ "$active" == "1" ]] && systemctl start "$svc" >>"$LOG_FILE" 2>&1 || true + done <"$state_file" + rm -f "$state_file" + changed=true + fi + + if $changed; then + HOST_CONFIG_CHANGED=true + msg_ok "$(translate 'NVIDIA host services/autoload restored for native mode')" | tee -a "$screen_capture" + else + msg_ok "$(translate 'NVIDIA host services/autoload already aligned for native mode')" | tee -a "$screen_capture" + fi +} + _add_amd_softdep() { local vfio_conf="/etc/modprobe.d/vfio.conf" _add_line_if_missing "softdep radeon pre: vfio-pci" "$vfio_conf" @@ -777,6 +880,7 @@ switch_to_vm_mode() { _add_gpu_blacklist "$t" done msg_ok "$(translate 'GPU host driver blacklisted in /etc/modprobe.d/blacklist.conf')" | tee -a "$screen_capture" + _contains_in_array "nvidia" "${selected_types[@]}" && _sanitize_nvidia_host_stack_for_vfio _contains_in_array "amd" "${selected_types[@]}" && _add_amd_softdep if [[ "$HOST_CONFIG_CHANGED" == "true" ]]; then @@ -840,6 +944,9 @@ switch_to_lxc_mode() { if _remove_gpu_blacklist "$t"; then msg_ok "$(translate 'Driver blacklist removed for') ${t}" | tee -a "$screen_capture" fi + if [[ "$t" == "nvidia" ]]; then + _restore_nvidia_host_stack_for_lxc + fi fi done diff --git a/scripts/vm/synology.sh b/scripts/vm/synology.sh index e3bcced3..5183ad18 100644 --- a/scripts/vm/synology.sh +++ b/scripts/vm/synology.sh @@ -1456,8 +1456,23 @@ if [[ "${WIZARD_ADD_GPU:-no}" == "yes" ]]; then replay_vm_wizard_capture fi -if [[ "${WIZARD_ADD_GPU:-no}" == "yes" && "$WIZARD_GPU_RESULT" == "applied" ]]; then +local GPU_WIZARD_APPLIED="no" +local GPU_WIZARD_REBOOT_REQUIRED="no" +case "${WIZARD_GPU_RESULT:-}" in + applied) + GPU_WIZARD_APPLIED="yes" + ;; + applied_reboot_required) + GPU_WIZARD_APPLIED="yes" + GPU_WIZARD_REBOOT_REQUIRED="yes" + ;; +esac + +if [[ "${WIZARD_ADD_GPU:-no}" == "yes" && "$GPU_WIZARD_APPLIED" == "yes" ]]; then msg_success "$(translate "Completed Successfully with GPU passthrough configured!")" + if [[ "$GPU_WIZARD_REBOOT_REQUIRED" == "yes" ]]; then + msg_warn "$(translate "Host VFIO configuration changed (initramfs updated). Reboot required before starting the VM.")" + fi else msg_success "$(translate "Completed Successfully!")" if [[ "${WIZARD_ADD_GPU:-no}" == "yes" && "$WIZARD_GPU_RESULT" == "no_gpu" ]]; then @@ -1473,15 +1488,27 @@ echo -e "${TAB}2. $(translate "Open the VM console and wait for the loader to bo echo -e "${TAB}3. $(translate "In the loader interface, follow the instructions to select your Synology model")" echo -e "${TAB}4. $(translate "Complete the DSM installation wizard")" echo -e "${TAB}5. $(translate "Find your device using https://finds.synology.com")" -if [[ "$WIZARD_GPU_RESULT" == "applied" ]]; then +if [[ "$GPU_WIZARD_APPLIED" == "yes" ]]; then echo -e "${TAB}- $(translate "If you want to use a physical monitor on the passthrough GPU:")" echo -e "${TAB}• $(translate "First complete DSM setup and verify Web UI/SSH access.")" echo -e "${TAB}• $(translate "Then change the VM display to none (vga: none) when the system is stable.")" fi +local HOST_REBOOT_REQUIRED="no" if [[ "${VM_STORAGE_IOMMU_PENDING_REBOOT:-0}" == "1" ]]; then + HOST_REBOOT_REQUIRED="yes" msg_warn "$(translate "IOMMU was enabled during this wizard. Reboot the host to apply it.")" echo -e "${TAB}$(translate "After reboot, run: Storage -> Add Controller or NVMe PCIe to VM, and select VM") ${VMID}." fi +if [[ "$GPU_WIZARD_REBOOT_REQUIRED" == "yes" ]]; then + HOST_REBOOT_REQUIRED="yes" +fi +if [[ "$HOST_REBOOT_REQUIRED" == "yes" ]]; then + if whiptail --title "$(translate "Reboot Recommended")" --yesno \ +"$(translate "A host reboot is required to apply passthrough changes before starting the VM.")\n\n$(translate "Do you want to reboot now?")" 11 78; then + msg_warn "$(translate "Rebooting the system...")" + reboot + fi +fi echo -e #msg_success "$(translate "Press Enter to return to the main menu...")" diff --git a/scripts/vm/vm_creator.sh b/scripts/vm/vm_creator.sh index 22f10816..654cab16 100644 --- a/scripts/vm/vm_creator.sh +++ b/scripts/vm/vm_creator.sh @@ -704,6 +704,17 @@ fi if [[ "${WIZARD_ADD_GPU:-no}" == "yes" ]]; then WIZARD_GPU_RESULT="cancelled" run_gpu_passthrough_wizard + local GPU_WIZARD_APPLIED="no" + local GPU_WIZARD_REBOOT_REQUIRED="no" + case "$WIZARD_GPU_RESULT" in + applied) + GPU_WIZARD_APPLIED="yes" + ;; + applied_reboot_required) + GPU_WIZARD_APPLIED="yes" + GPU_WIZARD_REBOOT_REQUIRED="yes" + ;; + esac if [[ "${VM_WIZARD_CAPTURE_ACTIVE:-0}" -eq 1 ]]; then stop_spinner exec 1>&8 @@ -714,8 +725,11 @@ if [[ "${WIZARD_ADD_GPU:-no}" == "yes" ]]; then rm -f "$VM_WIZARD_CAPTURE_FILE" VM_WIZARD_CAPTURE_FILE="" fi - if [[ "$WIZARD_GPU_RESULT" == "applied" ]]; then + if [[ "$GPU_WIZARD_APPLIED" == "yes" ]]; then msg_success "$(translate "VM creation completed with GPU passthrough configured.")" + if [[ "$GPU_WIZARD_REBOOT_REQUIRED" == "yes" ]]; then + msg_warn "$(translate "Host VFIO configuration changed (initramfs updated). Reboot required before starting the VM.")" + fi elif [[ "$WIZARD_GPU_RESULT" == "no_gpu" ]]; then msg_success "$(translate "VM creation completed. GPU passthrough was skipped (no compatible GPU detected).")" else @@ -730,7 +744,7 @@ if [[ "${WIZARD_ADD_GPU:-no}" == "yes" ]]; then echo -e "${TAB}4. $(translate "Continue the Windows installation as usual.")" echo -e "${TAB}5. $(translate "Once installed, open the VirtIO ISO and run the installer to complete driver setup.")" echo -e "${TAB}6. $(translate "Reboot the VM to complete the driver installation.")" - if [[ "$WIZARD_GPU_RESULT" == "applied" ]]; then + if [[ "$GPU_WIZARD_APPLIED" == "yes" ]]; then echo -e "${TAB}- $(translate "If you want to use a physical monitor on the passthrough GPU:")" echo -e "${TAB}• $(translate "First install the GPU drivers inside the guest and verify remote access (RDP/SSH).")" echo -e "${TAB}• $(translate "Then change the VM display to none (vga: none) when the guest is stable.")" @@ -741,17 +755,29 @@ if [[ "${WIZARD_ADD_GPU:-no}" == "yes" ]]; then echo -e "${TAB}${GN}$(translate "Recommended: Install the QEMU Guest Agent in the VM")${CL}" echo -e "${TAB}$(translate "Run the following inside the VM:")" echo -e "${TAB}apt install qemu-guest-agent -y && systemctl enable --now qemu-guest-agent" - if [[ "$WIZARD_GPU_RESULT" == "applied" ]]; then + if [[ "$GPU_WIZARD_APPLIED" == "yes" ]]; then echo -e "${TAB}- $(translate "If you want to use a physical monitor on the passthrough GPU:")" echo -e "${TAB}• $(translate "First install the GPU drivers inside the guest and verify remote access (RDP/SSH).")" echo -e "${TAB}• $(translate "Then change the VM display to none (vga: none) when the guest is stable.")" fi echo -e fi + local HOST_REBOOT_REQUIRED="no" if [[ "${VM_STORAGE_IOMMU_PENDING_REBOOT:-0}" == "1" ]]; then + HOST_REBOOT_REQUIRED="yes" msg_warn "$(translate "IOMMU was enabled during this wizard. Reboot the host to apply it.")" echo -e "${TAB}$(translate "After reboot, run: Storage -> Add Controller or NVMe PCIe to VM, and select VM") ${VMID}." fi + if [[ "$GPU_WIZARD_REBOOT_REQUIRED" == "yes" ]]; then + HOST_REBOOT_REQUIRED="yes" + fi + if [[ "$HOST_REBOOT_REQUIRED" == "yes" ]]; then + if whiptail --title "$(translate "Reboot Recommended")" --yesno \ +"$(translate "A host reboot is required to apply passthrough changes before starting the VM.")\n\n$(translate "Do you want to reboot now?")" 11 78; then + msg_warn "$(translate "Rebooting the system...")" + reboot + fi + fi msg_success "$(translate "Press Enter to return to the main menu...")" read -r bash "$LOCAL_SCRIPTS/menus/create_vm_menu.sh" diff --git a/scripts/vm/zimaos.sh b/scripts/vm/zimaos.sh index a1cd8543..ff3c6fc7 100644 --- a/scripts/vm/zimaos.sh +++ b/scripts/vm/zimaos.sh @@ -1483,8 +1483,23 @@ else replay_vm_wizard_capture fi - if [[ "${WIZARD_ADD_GPU:-no}" == "yes" && "$WIZARD_GPU_RESULT" == "applied" ]]; then + local GPU_WIZARD_APPLIED="no" + local GPU_WIZARD_REBOOT_REQUIRED="no" + case "${WIZARD_GPU_RESULT:-}" in + applied) + GPU_WIZARD_APPLIED="yes" + ;; + applied_reboot_required) + GPU_WIZARD_APPLIED="yes" + GPU_WIZARD_REBOOT_REQUIRED="yes" + ;; + esac + + if [[ "${WIZARD_ADD_GPU:-no}" == "yes" && "$GPU_WIZARD_APPLIED" == "yes" ]]; then msg_success "$(translate "Completed Successfully with GPU passthrough configured!")" + if [[ "$GPU_WIZARD_REBOOT_REQUIRED" == "yes" ]]; then + msg_warn "$(translate "Host VFIO configuration changed (initramfs updated). Reboot required before starting the VM.")" + fi else msg_success "$(translate "Completed Successfully!")" if [[ "${WIZARD_ADD_GPU:-no}" == "yes" && "$WIZARD_GPU_RESULT" == "no_gpu" ]]; then @@ -1498,15 +1513,27 @@ else echo -e "${TAB}1. $(translate "Start the VM")" echo -e "${TAB}2. $(translate "Open the VM console and wait for the installer to boot")" echo -e "${TAB}3. $(translate "Complete the ZimaOS installation wizard")" - if [[ "$WIZARD_GPU_RESULT" == "applied" ]]; then + if [[ "$GPU_WIZARD_APPLIED" == "yes" ]]; then echo -e "${TAB}- $(translate "If you want to use a physical monitor on the passthrough GPU:")" echo -e "${TAB}• $(translate "First complete ZimaOS setup and verify remote access (web/SSH).")" echo -e "${TAB}• $(translate "Then change the VM display to none (vga: none) when the system is stable.")" fi + local HOST_REBOOT_REQUIRED="no" if [[ "${VM_STORAGE_IOMMU_PENDING_REBOOT:-0}" == "1" ]]; then + HOST_REBOOT_REQUIRED="yes" msg_warn "$(translate "IOMMU was enabled during this wizard. Reboot the host to apply it.")" echo -e "${TAB}$(translate "After reboot, run: Storage -> Add Controller or NVMe PCIe to VM, and select VM") ${VMID}." fi + if [[ "$GPU_WIZARD_REBOOT_REQUIRED" == "yes" ]]; then + HOST_REBOOT_REQUIRED="yes" + fi + if [[ "$HOST_REBOOT_REQUIRED" == "yes" ]]; then + if whiptail --title "$(translate "Reboot Recommended")" --yesno \ +"$(translate "A host reboot is required to apply passthrough changes before starting the VM.")\n\n$(translate "Do you want to reboot now?")" 11 78; then + msg_warn "$(translate "Rebooting the system...")" + reboot + fi + fi echo -e