commit 1c4449fd60afce40b83688c2ea1dd137d6ba743e Author: martin Date: Mon Mar 16 14:28:27 2026 +0100 🔖 snapshot: Stažení Ubuntu LXC skriptů z community-scripts/ProxmoxVE Zdrojové súbory: - ct/ubuntu.sh - hlavní vstupní skript - install/ubuntu-install.sh - instalačný skript pre kontajner - misc/build.func - build knihovňa (5780 riadkov) - misc/install.func - inštalačné funkcie (332 riadkov) - misc/core.func - základné utility (1719 riadkov) - misc/error_handler.func - spracovanie chýb (638 riadkov) - misc/tools.func - pomocné nástroje (8395 riadkov) - misc/api.func - telemetria (1470 riadkov) Zdroj: https://github.com/community-scripts/ProxmoxVE Co-Authored-By: Claude Opus 4.6 (1M context) diff --git a/ct/ubuntu.sh b/ct/ubuntu.sh new file mode 100644 index 0000000..9314cbe --- /dev/null +++ b/ct/ubuntu.sh @@ -0,0 +1,43 @@ +#!/usr/bin/env bash +source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/build.func) +# Copyright (c) 2021-2026 tteck +# Author: tteck (tteckster) +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Source: https://ubuntu.com/ + +APP="Ubuntu" +var_tags="${var_tags:-os}" +var_cpu="${var_cpu:-1}" +var_ram="${var_ram:-512}" +var_disk="${var_disk:-2}" +var_os="${var_os:-ubuntu}" +var_version="${var_version:-24.04}" +var_unprivileged="${var_unprivileged:-1}" + +header_info "$APP" +variables +color +catch_errors + +function update_script() { + header_info + check_container_storage + check_container_resources + if [[ ! -d /var ]]; then + msg_error "No ${APP} Installation Found!" + exit + fi + msg_info "Updating ${APP} LXC" + $STD apt-get update + $STD apt-get -y upgrade + msg_ok "Updated ${APP} LXC" + msg_ok "Updated successfully!" + exit +} + +start +build_container +description + +msg_ok "Completed successfully!\n" +echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}" diff --git a/install/ubuntu-install.sh b/install/ubuntu-install.sh new file mode 100644 index 0000000..101462b --- /dev/null +++ b/install/ubuntu-install.sh @@ -0,0 +1,18 @@ +#!/usr/bin/env bash + +# Copyright (c) 2021-2026 tteck +# Author: tteck (tteckster) +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Source: https://ubuntu.com/ + +source /dev/stdin <<<"$FUNCTIONS_FILE_PATH" +color +verb_ip6 +catch_errors +setting_up_container +network_check +update_os + +motd_ssh +customize +cleanup_lxc diff --git a/misc/api.func b/misc/api.func new file mode 100644 index 0000000..e29975e --- /dev/null +++ b/misc/api.func @@ -0,0 +1,1470 @@ +# Copyright (c) 2021-2026 community-scripts ORG +# Author: michelroegl-brunner | MickLesk +# License: MIT | https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/LICENSE + +# ============================================================================== +# API.FUNC - TELEMETRY & DIAGNOSTICS API +# ============================================================================== +# +# Provides functions for sending anonymous telemetry data via the community +# telemetry ingest service at telemetry.community-scripts.org. +# +# Features: +# - Container/VM creation statistics +# - Installation success/failure tracking +# - Error code mapping and reporting +# - Privacy-respecting anonymous telemetry +# +# Usage: +# source <(curl -fsSL .../api.func) +# post_to_api # Report LXC container creation +# post_to_api_vm # Report VM creation +# post_update_to_api # Report installation status +# +# Privacy: +# - Only anonymous statistics (no personal data) +# - User can opt-out via DIAGNOSTICS=no +# - Random UUID for session tracking only +# - Data retention: 30 days +# +# ============================================================================== + +# ============================================================================== +# Telemetry Configuration +# ============================================================================== +TELEMETRY_URL="https://telemetry.community-scripts.org/telemetry" + +# Timeout for telemetry requests (seconds) +# Progress pings (validation/configuring) use the short timeout +TELEMETRY_TIMEOUT=5 +# Final status updates (success/failed) use the longer timeout +# PocketBase may need more time under load (FindRecord + UpdateRecord) +STATUS_TIMEOUT=10 + +# ============================================================================== +# SECTION 0: REPOSITORY SOURCE DETECTION +# ============================================================================== + +# ------------------------------------------------------------------------------ +# detect_repo_source() +# +# - Dynamically detects which GitHub/Gitea repo the scripts were loaded from +# - Inspects /proc/$$/cmdline and $0 to find the source URL +# - Maps detected repo to one of three canonical values: +# * "ProxmoxVE" — official community-scripts/ProxmoxVE (production) +# * "ProxmoxVED" — official community-scripts/ProxmoxVED (development) +# * "external" — any fork or unknown source +# - Fallback: "ProxmoxVED" (CI sed transforms ProxmoxVED → ProxmoxVE on promotion) +# - Sets and exports REPO_SOURCE global variable +# - Skips detection if REPO_SOURCE is already set (e.g., by environment) +# ------------------------------------------------------------------------------ +detect_repo_source() { + # Allow explicit override via environment + [[ -n "${REPO_SOURCE:-}" ]] && return 0 + + local content="" owner_repo="" + + # Method 1: Read from /proc/$$/cmdline + # When invoked via: bash -c "$(curl -fsSL https://.../ct/app.sh)" + # the full CT/VM script content is in /proc/$$/cmdline (same PID through source chain) + if [[ -r /proc/$$/cmdline ]]; then + content=$(tr '\0' ' ' /dev/null) || true + fi + + # Method 2: Read from the original script file (bash ct/app.sh / bash vm/app.sh) + if [[ -z "$content" ]] || ! echo "$content" | grep -qE 'githubusercontent\.com|community-scripts\.org' 2>/dev/null; then + if [[ -f "$0" ]] && [[ "$0" != *bash* ]]; then + content=$(head -10 "$0" 2>/dev/null) || true + fi + fi + + # Extract owner/repo from URL patterns found in the script content + if [[ -n "$content" ]]; then + # GitHub raw URL: raw.githubusercontent.com/OWNER/REPO/... + owner_repo=$(echo "$content" | grep -oE 'raw\.githubusercontent\.com/[^/]+/[^/]+' | head -1 | sed 's|raw\.githubusercontent\.com/||') || true + + # Gitea URL: git.community-scripts.org/OWNER/REPO/... + if [[ -z "$owner_repo" ]]; then + owner_repo=$(echo "$content" | grep -oE 'git\.community-scripts\.org/[^/]+/[^/]+' | head -1 | sed 's|git\.community-scripts\.org/||') || true + fi + fi + + # Map detected owner/repo to canonical repo_source value + case "$owner_repo" in + community-scripts/ProxmoxVE) REPO_SOURCE="ProxmoxVE" ;; + community-scripts/ProxmoxVED) REPO_SOURCE="ProxmoxVED" ;; + "") + # No URL detected — use hardcoded fallback + # This value must match the repo: ProxmoxVE for production, ProxmoxVED for dev + REPO_SOURCE="ProxmoxVE" + ;; + *) + # Fork or unknown repo + REPO_SOURCE="external" + ;; + esac + + export REPO_SOURCE +} + +# Run detection immediately when api.func is sourced +detect_repo_source + +# ============================================================================== +# SECTION 1: ERROR CODE DESCRIPTIONS +# ============================================================================== + +# ------------------------------------------------------------------------------ +# explain_exit_code() +# +# - Maps numeric exit codes to human-readable error descriptions +# - Canonical source of truth for ALL exit code mappings +# - Used by both api.func (telemetry) and error_handler.func (error display) +# - Supports: +# * Generic/Shell errors (1-3, 10, 124-132, 134, 137, 139, 141, 143-146) +# * curl/wget errors (4-8, 16, 18, 22-28, 30, 32-36, 39, 44-48, 51-52, 55-57, 59, 61, 63, 75, 78-79, 92, 95) +# * Package manager errors (APT, DPKG: 100-102, 255) +# * Script Validation & Setup (103-123) +# * BSD sysexits (64-78) +# * Systemd/Service errors (150-154) +# * Python/pip/uv errors (160-162) +# * PostgreSQL errors (170-173) +# * MySQL/MariaDB errors (180-183) +# * MongoDB errors (190-193) +# * Proxmox custom codes (200-231) +# * Tools & Addon Scripts (232-238) +# * Node.js/npm errors (239, 243, 245-249) +# * Application Install/Update errors (250-254) +# - Returns description string for given exit code +# ------------------------------------------------------------------------------ +explain_exit_code() { + local code="$1" + case "$code" in + # --- Generic / Shell --- + 1) echo "General error / Operation not permitted" ;; + 2) echo "Misuse of shell builtins (e.g. syntax error)" ;; + 3) echo "General syntax or argument error" ;; + 10) echo "Docker / privileged mode required (unsupported environment)" ;; + + # --- curl / wget errors (commonly seen in downloads) --- + 4) echo "curl: Feature not supported or protocol error" ;; + 5) echo "curl: Could not resolve proxy" ;; + 6) echo "curl: DNS resolution failed (could not resolve host)" ;; + 7) echo "curl: Failed to connect (network unreachable / host down)" ;; + 8) echo "curl: Server reply error (FTP/SFTP or apk untrusted key)" ;; + 16) echo "curl: HTTP/2 framing layer error" ;; + 18) echo "curl: Partial file (transfer not completed)" ;; + 22) echo "curl: HTTP error returned (404, 429, 500+)" ;; + 23) echo "curl: Write error (disk full or permissions)" ;; + 24) echo "curl: Write to local file failed" ;; + 25) echo "curl: Upload failed" ;; + 26) echo "curl: Read error on local file (I/O)" ;; + 27) echo "curl: Out of memory (memory allocation failed)" ;; + 28) echo "curl: Operation timeout (network slow or server not responding)" ;; + 30) echo "curl: FTP port command failed" ;; + 32) echo "curl: FTP SIZE command failed" ;; + 33) echo "curl: HTTP range error" ;; + 34) echo "curl: HTTP post error" ;; + 35) echo "curl: SSL/TLS handshake failed (certificate error)" ;; + 36) echo "curl: FTP bad download resume" ;; + 39) echo "curl: LDAP search failed" ;; + 44) echo "curl: Internal error (bad function call order)" ;; + 45) echo "curl: Interface error (failed to bind to specified interface)" ;; + 46) echo "curl: Bad password entered" ;; + 47) echo "curl: Too many redirects" ;; + 48) echo "curl: Unknown command line option specified" ;; + 51) echo "curl: SSL peer certificate or SSH host key verification failed" ;; + 52) echo "curl: Empty reply from server (got nothing)" ;; + 55) echo "curl: Failed sending network data" ;; + 56) echo "curl: Receive error (connection reset by peer)" ;; + 57) echo "curl: Unrecoverable poll/select error (system I/O failure)" ;; + 59) echo "curl: Couldn't use specified SSL cipher" ;; + 61) echo "curl: Bad/unrecognized transfer encoding" ;; + 63) echo "curl: Maximum file size exceeded" ;; + 75) echo "Temporary failure (retry later)" ;; + 78) echo "curl: Remote file not found (404 on FTP/file)" ;; + 79) echo "curl: SSH session error (key exchange/auth failed)" ;; + 92) echo "curl: HTTP/2 stream error (protocol violation)" ;; + 95) echo "curl: HTTP/3 layer error" ;; + + # --- Package manager / APT / DPKG --- + 100) echo "APT: Package manager error (broken packages / dependency problems)" ;; + 101) echo "APT: Configuration error (bad sources.list, malformed config)" ;; + 102) echo "APT: Lock held by another process (dpkg/apt still running)" ;; + + # --- Script Validation & Setup (103-123) --- + 103) echo "Validation: Shell is not Bash" ;; + 104) echo "Validation: Not running as root (or invoked via sudo)" ;; + 105) echo "Validation: Proxmox VE version not supported" ;; + 106) echo "Validation: Architecture not supported (ARM / PiMox)" ;; + 107) echo "Validation: Kernel key parameters unreadable" ;; + 108) echo "Validation: Kernel key limits exceeded" ;; + 109) echo "Proxmox: No available container ID after max attempts" ;; + 110) echo "Proxmox: Failed to apply default.vars" ;; + 111) echo "Proxmox: App defaults file not available" ;; + 112) echo "Proxmox: Invalid install menu option" ;; + 113) echo "LXC: Under-provisioned — user aborted update" ;; + 114) echo "LXC: Storage too low — user aborted update" ;; + 115) echo "Download: install.func download failed or incomplete" ;; + 116) echo "Proxmox: Default bridge vmbr0 not found" ;; + 117) echo "LXC: Container did not reach running state" ;; + 118) echo "LXC: No IP assigned to container after timeout" ;; + 119) echo "Proxmox: No valid storage for rootdir content" ;; + 120) echo "Proxmox: No valid storage for vztmpl content" ;; + 121) echo "LXC: Container network not ready (no IP after retries)" ;; + 122) echo "LXC: No internet connectivity — user declined to continue" ;; + 123) echo "LXC: Local IP detection failed" ;; + + # --- BSD sysexits.h (64-78) --- + 64) echo "Usage error (wrong arguments)" ;; + 65) echo "Data format error (bad input data)" ;; + 66) echo "Input file not found (cannot open input)" ;; + 67) echo "User not found (addressee unknown)" ;; + 68) echo "Host not found (hostname unknown)" ;; + 69) echo "Service unavailable" ;; + 70) echo "Internal software error" ;; + 71) echo "System error (OS-level failure)" ;; + 72) echo "Critical OS file missing" ;; + 73) echo "Cannot create output file" ;; + 74) echo "I/O error" ;; + 76) echo "Remote protocol error" ;; + 77) echo "Permission denied" ;; + + # --- Common shell/system errors --- + 124) echo "Command timed out (timeout command)" ;; + 125) echo "Command failed to start (Docker daemon or execution error)" ;; + 126) echo "Command invoked cannot execute (permission problem?)" ;; + 127) echo "Command not found" ;; + 128) echo "Invalid argument to exit" ;; + 129) echo "Killed by SIGHUP (terminal closed / hangup)" ;; + 130) echo "Aborted by user (SIGINT)" ;; + 131) echo "Killed by SIGQUIT (core dumped)" ;; + 132) echo "Killed by SIGILL (illegal CPU instruction)" ;; + 134) echo "Process aborted (SIGABRT - possibly Node.js heap overflow)" ;; + 137) echo "Killed (SIGKILL / Out of memory?)" ;; + 139) echo "Segmentation fault (core dumped)" ;; + 141) echo "Broken pipe (SIGPIPE - output closed prematurely)" ;; + 143) echo "Terminated (SIGTERM)" ;; + 144) echo "Killed by signal 16 (SIGUSR1 / SIGSTKFLT)" ;; + 146) echo "Killed by signal 18 (SIGTSTP)" ;; + + # --- Systemd / Service errors (150-154) --- + 150) echo "Systemd: Service failed to start" ;; + 151) echo "Systemd: Service unit not found" ;; + 152) echo "Permission denied (EACCES)" ;; + 153) echo "Build/compile failed (make/gcc/cmake)" ;; + 154) echo "Node.js: Native addon build failed (node-gyp)" ;; + # --- Python / pip / uv (160-162) --- + 160) echo "Python: Virtualenv / uv environment missing or broken" ;; + 161) echo "Python: Dependency resolution failed" ;; + 162) echo "Python: Installation aborted (permissions or EXTERNALLY-MANAGED)" ;; + + # --- PostgreSQL (170-173) --- + 170) echo "PostgreSQL: Connection failed (server not running / wrong socket)" ;; + 171) echo "PostgreSQL: Authentication failed (bad user/password)" ;; + 172) echo "PostgreSQL: Database does not exist" ;; + 173) echo "PostgreSQL: Fatal error in query / syntax" ;; + + # --- MySQL / MariaDB (180-183) --- + 180) echo "MySQL/MariaDB: Connection failed (server not running / wrong socket)" ;; + 181) echo "MySQL/MariaDB: Authentication failed (bad user/password)" ;; + 182) echo "MySQL/MariaDB: Database does not exist" ;; + 183) echo "MySQL/MariaDB: Fatal error in query / syntax" ;; + + # --- MongoDB (190-193) --- + 190) echo "MongoDB: Connection failed (server not running)" ;; + 191) echo "MongoDB: Authentication failed (bad user/password)" ;; + 192) echo "MongoDB: Database not found" ;; + 193) echo "MongoDB: Fatal query error" ;; + + # --- Proxmox Custom Codes (200-231) --- + 200) echo "Proxmox: Failed to create lock file" ;; + 203) echo "Proxmox: Missing CTID variable" ;; + 204) echo "Proxmox: Missing PCT_OSTYPE variable" ;; + 205) echo "Proxmox: Invalid CTID (<100)" ;; + 206) echo "Proxmox: CTID already in use" ;; + 207) echo "Proxmox: Password contains unescaped special characters" ;; + 208) echo "Proxmox: Invalid configuration (DNS/MAC/Network format)" ;; + 209) echo "Proxmox: Container creation failed" ;; + 210) echo "Proxmox: Cluster not quorate" ;; + 211) echo "Proxmox: Timeout waiting for template lock" ;; + 212) echo "Proxmox: Storage type 'iscsidirect' does not support containers (VMs only)" ;; + 213) echo "Proxmox: Storage type does not support 'rootdir' content" ;; + 214) echo "Proxmox: Not enough storage space" ;; + 215) echo "Proxmox: Container created but not listed (ghost state)" ;; + 216) echo "Proxmox: RootFS entry missing in config" ;; + 217) echo "Proxmox: Storage not accessible" ;; + 218) echo "Proxmox: Template file corrupted or incomplete" ;; + 219) echo "Proxmox: CephFS does not support containers - use RBD" ;; + 220) echo "Proxmox: Unable to resolve template path" ;; + 221) echo "Proxmox: Template file not readable" ;; + 222) echo "Proxmox: Template download failed" ;; + 223) echo "Proxmox: Template not available after download" ;; + 224) echo "Proxmox: PBS storage is for backups only" ;; + 225) echo "Proxmox: No template available for OS/Version" ;; + 226) echo "Proxmox: VM disk import or post-creation setup failed" ;; + 231) echo "Proxmox: LXC stack upgrade failed" ;; + + # --- Tools & Addon Scripts (232-238) --- + 232) echo "Tools: Wrong execution environment (run on PVE host, not inside LXC)" ;; + 233) echo "Tools: Application not installed (update prerequisite missing)" ;; + 234) echo "Tools: No LXC containers found or available" ;; + 235) echo "Tools: Backup or restore operation failed" ;; + 236) echo "Tools: Required hardware not detected" ;; + 237) echo "Tools: Dependency package installation failed" ;; + 238) echo "Tools: OS or distribution not supported for this addon" ;; + + # --- Node.js / npm / pnpm / yarn (239-249) --- + 239) echo "npm/Node.js: Unexpected runtime error or dependency failure" ;; + 243) echo "Node.js: Out of memory (JavaScript heap out of memory)" ;; + 245) echo "Node.js: Invalid command-line option" ;; + 246) echo "Node.js: Internal JavaScript Parse Error" ;; + 247) echo "Node.js: Fatal internal error" ;; + 248) echo "Node.js: Invalid C++ addon / N-API failure" ;; + 249) echo "npm/pnpm/yarn: Unknown fatal error" ;; + + # --- Application Install/Update Errors (250-254) --- + 250) echo "App: Download failed or version not determined" ;; + 251) echo "App: File extraction failed (corrupt or incomplete archive)" ;; + 252) echo "App: Required file or resource not found" ;; + 253) echo "App: Data migration required — update aborted" ;; + 254) echo "App: User declined prompt or input timed out" ;; + + # --- DPKG --- + 255) echo "DPKG: Fatal internal error" ;; + + # --- Default --- + *) echo "Unknown error" ;; + esac +} + +# ------------------------------------------------------------------------------ +# json_escape() +# +# - Escapes a string for safe JSON embedding +# - Strips ANSI escape sequences and non-printable control characters +# - Handles backslashes, quotes, newlines, tabs, and carriage returns +# ------------------------------------------------------------------------------ +json_escape() { + # Escape a string for safe JSON embedding using awk (handles any input size). + # Pipeline: strip ANSI → remove control chars → escape \ " TAB → join lines with \n + printf '%s' "$1" \ + | sed 's/\x1b\[[0-9;]*[a-zA-Z]//g' \ + | tr -d '\000-\010\013\014\016-\037\177\r' \ + | awk ' + BEGIN { ORS = "" } + { + gsub(/\\/, "\\\\") # backslash → \\ + gsub(/"/, "\\\"") # double quote → \" + gsub(/\t/, "\\t") # tab → \t + if (NR > 1) printf "\\n" + printf "%s", $0 + }' +} + +# ------------------------------------------------------------------------------ +# get_error_text() +# +# - Returns last 20 lines of the active log (INSTALL_LOG or BUILD_LOG) +# - Falls back to combined log or BUILD_LOG if primary is not accessible +# - Handles container paths that don't exist on the host +# ------------------------------------------------------------------------------ +get_error_text() { + local logfile="" + if declare -f get_active_logfile >/dev/null 2>&1; then + logfile=$(get_active_logfile) + elif [[ -n "${INSTALL_LOG:-}" ]]; then + logfile="$INSTALL_LOG" + elif [[ -n "${BUILD_LOG:-}" ]]; then + logfile="$BUILD_LOG" + fi + + # If logfile is inside container (e.g. /root/.install-*), try the host copy + if [[ -n "$logfile" && ! -s "$logfile" ]]; then + # Try combined log: /tmp/--.log + if [[ -n "${CTID:-}" && -n "${SESSION_ID:-}" ]]; then + local combined_log="/tmp/${NSAPP:-lxc}-${CTID}-${SESSION_ID}.log" + if [[ -s "$combined_log" ]]; then + logfile="$combined_log" + fi + fi + fi + + # Also try BUILD_LOG as fallback if primary log is empty/missing + if [[ -z "$logfile" || ! -s "$logfile" ]] && [[ -n "${BUILD_LOG:-}" && -s "${BUILD_LOG}" ]]; then + logfile="$BUILD_LOG" + fi + + # Try SILENT_LOGFILE as last resort (captures $STD command output) + if [[ -z "$logfile" || ! -s "$logfile" ]] && [[ -n "${SILENT_LOGFILE:-}" && -s "${SILENT_LOGFILE}" ]]; then + logfile="$SILENT_LOGFILE" + fi + + if [[ -n "$logfile" && -s "$logfile" ]]; then + tail -n 20 "$logfile" 2>/dev/null | sed 's/\r$//' | sed 's/\x1b\[[0-9;]*[a-zA-Z]//g' + fi +} + +# ------------------------------------------------------------------------------ +# get_full_log() +# +# - Returns the FULL installation log (build + install combined) +# - Calls ensure_log_on_host() to pull container log if needed +# - Strips ANSI escape codes and carriage returns +# - Truncates to max_bytes (default: 120KB) to stay within API limits +# - Used for the error telemetry field (full trace instead of 20 lines) +# ------------------------------------------------------------------------------ +get_full_log() { + local max_bytes="${1:-122880}" # 120KB default + local logfile="" + + # Ensure logs are available on host (pulls from container if needed) + if declare -f ensure_log_on_host >/dev/null 2>&1; then + ensure_log_on_host + fi + + # Try combined log first (most complete) + if [[ -n "${CTID:-}" && -n "${SESSION_ID:-}" ]]; then + local combined_log="/tmp/${NSAPP:-lxc}-${CTID}-${SESSION_ID}.log" + if [[ -s "$combined_log" ]]; then + logfile="$combined_log" + fi + fi + + # Fall back to INSTALL_LOG + if [[ -z "$logfile" || ! -s "$logfile" ]]; then + if [[ -n "${INSTALL_LOG:-}" && -s "${INSTALL_LOG}" ]]; then + logfile="$INSTALL_LOG" + fi + fi + + # Fall back to BUILD_LOG + if [[ -z "$logfile" || ! -s "$logfile" ]]; then + if [[ -n "${BUILD_LOG:-}" && -s "${BUILD_LOG}" ]]; then + logfile="$BUILD_LOG" + fi + fi + + # Fall back to SILENT_LOGFILE (captures $STD command output) + if [[ -z "$logfile" || ! -s "$logfile" ]]; then + if [[ -n "${SILENT_LOGFILE:-}" && -s "${SILENT_LOGFILE}" ]]; then + logfile="$SILENT_LOGFILE" + fi + fi + + if [[ -n "$logfile" && -s "$logfile" ]]; then + # Strip ANSI codes, carriage returns, and anonymize IP addresses (GDPR) + sed 's/\r$//' "$logfile" 2>/dev/null | + sed 's/\x1b\[[0-9;]*[a-zA-Z]//g' | + sed -E 's/([0-9]{1,3}\.)[0-9]{1,3}\.[0-9]{1,3}/\1x.x/g' | + head -c "$max_bytes" + fi +} + +# ------------------------------------------------------------------------------ +# build_error_string() +# +# - Builds a structured error string for telemetry reporting +# - Format: "exit_code= | \n---\n" +# - If no log lines available, returns just the explanation +# - Arguments: +# * $1: exit_code (numeric) +# * $2: log_text (optional, output from get_error_text) +# - Returns structured error string via stdout +# ------------------------------------------------------------------------------ +build_error_string() { + local exit_code="${1:-1}" + local log_text="${2:-}" + local explanation + explanation=$(explain_exit_code "$exit_code") + + if [[ -n "$log_text" ]]; then + # Structured format: header + separator + log lines + printf 'exit_code=%s | %s\n---\n%s' "$exit_code" "$explanation" "$log_text" + else + # No log available - just the explanation with exit code + printf 'exit_code=%s | %s' "$exit_code" "$explanation" + fi +} + +# ============================================================================== +# SECTION 2: TELEMETRY FUNCTIONS +# ============================================================================== + +# ------------------------------------------------------------------------------ +# detect_gpu() +# +# - Detects GPU vendor, model, and passthrough type +# - Sets GPU_VENDOR, GPU_MODEL, and GPU_PASSTHROUGH globals +# - Used for GPU analytics +# ------------------------------------------------------------------------------ +detect_gpu() { + GPU_VENDOR="unknown" + GPU_MODEL="" + GPU_PASSTHROUGH="unknown" + + local gpu_line + gpu_line=$(lspci 2>/dev/null | grep -iE "VGA|3D|Display" | head -1) + + if [[ -n "$gpu_line" ]]; then + # Extract model: everything after the colon, clean up + GPU_MODEL=$(echo "$gpu_line" | sed 's/.*: //' | sed 's/ (rev .*)$//' | cut -c1-64) + + # Detect vendor and passthrough type + if echo "$gpu_line" | grep -qi "Intel"; then + GPU_VENDOR="intel" + GPU_PASSTHROUGH="igpu" + elif echo "$gpu_line" | grep -qi "AMD\|ATI"; then + GPU_VENDOR="amd" + if echo "$gpu_line" | grep -qi "Radeon RX\|Radeon Pro"; then + GPU_PASSTHROUGH="dgpu" + else + GPU_PASSTHROUGH="igpu" + fi + elif echo "$gpu_line" | grep -qi "NVIDIA"; then + GPU_VENDOR="nvidia" + GPU_PASSTHROUGH="dgpu" + fi + fi + + export GPU_VENDOR GPU_MODEL GPU_PASSTHROUGH +} + +# ------------------------------------------------------------------------------ +# detect_cpu() +# +# - Detects CPU vendor and model +# - Sets CPU_VENDOR (intel/amd/arm/unknown) and CPU_MODEL globals +# - Used for CPU analytics +# ------------------------------------------------------------------------------ +detect_cpu() { + CPU_VENDOR="unknown" + CPU_MODEL="" + + if [[ -f /proc/cpuinfo ]]; then + local vendor_id + vendor_id=$(grep -m1 "vendor_id" /proc/cpuinfo 2>/dev/null | cut -d: -f2 | tr -d ' ') + + case "$vendor_id" in + GenuineIntel) CPU_VENDOR="intel" ;; + AuthenticAMD) CPU_VENDOR="amd" ;; + *) + # ARM doesn't have vendor_id, check for CPU implementer + if grep -qi "CPU implementer" /proc/cpuinfo 2>/dev/null; then + CPU_VENDOR="arm" + fi + ;; + esac + + # Extract model name and clean it up + CPU_MODEL=$(grep -m1 "model name" /proc/cpuinfo 2>/dev/null | cut -d: -f2 | sed 's/^ *//' | sed 's/(R)//g' | sed 's/(TM)//g' | sed 's/ */ /g' | cut -c1-64) + fi + + export CPU_VENDOR CPU_MODEL +} + +# ------------------------------------------------------------------------------ +# detect_ram() +# +# - Detects RAM speed using dmidecode +# - Sets RAM_SPEED global (e.g., "4800" for DDR5-4800) +# - Requires root access for dmidecode +# - Returns empty if not available or if speed is "Unknown" (nested VMs) +# ------------------------------------------------------------------------------ +detect_ram() { + RAM_SPEED="" + + if command -v dmidecode &>/dev/null; then + # Get configured memory speed (actual running speed) + # Use || true to handle "Unknown" values in nested VMs (no numeric match) + RAM_SPEED=$(dmidecode -t memory 2>/dev/null | grep -m1 "Configured Memory Speed:" | grep -oE "[0-9]+" | head -1) || true + + # Fallback to Speed: if Configured not available + if [[ -z "$RAM_SPEED" ]]; then + RAM_SPEED=$(dmidecode -t memory 2>/dev/null | grep -m1 "Speed:" | grep -oE "[0-9]+" | head -1) || true + fi + fi + + export RAM_SPEED +} + +# ------------------------------------------------------------------------------ +# post_to_api() +# +# - Sends LXC container creation statistics to telemetry ingest service +# - Only executes if: +# * curl is available +# * DIAGNOSTICS=yes +# * RANDOM_UUID is set +# - Payload includes: +# * Container type, disk size, CPU cores, RAM +# * OS type and version +# * Application name (NSAPP) +# * Installation method +# * PVE version +# * Status: "installing" +# * Random UUID for session tracking +# - Anonymous telemetry (no personal data) +# - Never blocks or fails script execution +# ------------------------------------------------------------------------------ +post_to_api() { + # Prevent duplicate submissions (post_to_api is called from multiple places) + [[ "${POST_TO_API_DONE:-}" == "true" ]] && return 0 + + # Silent fail - telemetry should never break scripts + command -v curl &>/dev/null || { + [[ "${DEV_MODE:-}" == "true" ]] && echo "[DEBUG] curl not found, skipping" >&2 + return 0 + } + [[ "${DIAGNOSTICS:-no}" == "no" ]] && { + [[ "${DEV_MODE:-}" == "true" ]] && echo "[DEBUG] DIAGNOSTICS=no, skipping" >&2 + return 0 + } + [[ -z "${RANDOM_UUID:-}" ]] && { + [[ "${DEV_MODE:-}" == "true" ]] && echo "[DEBUG] RANDOM_UUID empty, skipping" >&2 + return 0 + } + + [[ "${DEV_MODE:-}" == "true" ]] && echo "[DEBUG] post_to_api() DIAGNOSTICS=$DIAGNOSTICS RANDOM_UUID=$RANDOM_UUID NSAPP=$NSAPP" >&2 + + # Set type for later status updates + TELEMETRY_TYPE="lxc" + + local pve_version="" + if command -v pveversion &>/dev/null; then + pve_version=$(pveversion 2>/dev/null | awk -F'[/ ]' '{print $2}') || true + fi + + # Detect GPU if not already set + if [[ -z "${GPU_VENDOR:-}" ]]; then + detect_gpu + fi + local gpu_vendor="${GPU_VENDOR:-unknown}" + local gpu_model + gpu_model=$(json_escape "${GPU_MODEL:-}") + local gpu_passthrough="${GPU_PASSTHROUGH:-unknown}" + + # Detect CPU if not already set + if [[ -z "${CPU_VENDOR:-}" ]]; then + detect_cpu + fi + local cpu_vendor="${CPU_VENDOR:-unknown}" + local cpu_model + cpu_model=$(json_escape "${CPU_MODEL:-}") + + # Detect RAM if not already set + if [[ -z "${RAM_SPEED:-}" ]]; then + detect_ram + fi + local ram_speed="${RAM_SPEED:-}" + + local JSON_PAYLOAD + JSON_PAYLOAD=$( + cat <&2 + [[ "${DEV_MODE:-}" == "true" ]] && echo "[DEBUG] Payload: $JSON_PAYLOAD" >&2 + + # Send initial "installing" record with retry. + # This record MUST exist for all subsequent updates to succeed. + local http_code="" attempt + for attempt in 1 2 3; do + if [[ "${DEV_MODE:-}" == "true" ]]; then + http_code=$(curl -sS -w "%{http_code}" -m "${TELEMETRY_TIMEOUT}" -X POST "${TELEMETRY_URL}" \ + -H "Content-Type: application/json" \ + -d "$JSON_PAYLOAD" -o /dev/stderr 2>&1) || http_code="000" + echo "[DEBUG] post_to_api attempt $attempt HTTP=$http_code" >&2 + else + http_code=$(curl -sS -w "%{http_code}" -m "${TELEMETRY_TIMEOUT}" -X POST "${TELEMETRY_URL}" \ + -H "Content-Type: application/json" \ + -d "$JSON_PAYLOAD" -o /dev/null 2>/dev/null) || http_code="000" + fi + [[ "$http_code" =~ ^2[0-9]{2}$ ]] && break + [[ "$attempt" -lt 3 ]] && sleep 1 + done + + POST_TO_API_DONE=true +} + +# ------------------------------------------------------------------------------ +# post_to_api_vm() +# +# - Sends VM creation statistics to telemetry ingest service +# - Reads DIAGNOSTICS from /usr/local/community-scripts/diagnostics file +# - Payload differences from LXC: +# * ct_type=2 (VM instead of LXC) +# * type="vm" +# * Disk size without 'G' suffix +# - Includes hardware detection: CPU, GPU, RAM speed +# - Only executes if DIAGNOSTICS=yes and RANDOM_UUID is set +# - Never blocks or fails script execution +# ------------------------------------------------------------------------------ +post_to_api_vm() { + # Read diagnostics setting from file + if [[ -f /usr/local/community-scripts/diagnostics ]]; then + DIAGNOSTICS=$(grep -i "^DIAGNOSTICS=" /usr/local/community-scripts/diagnostics 2>/dev/null | awk -F'=' '{print $2}') || true + fi + + # Silent fail - telemetry should never break scripts + command -v curl &>/dev/null || return 0 + [[ "${DIAGNOSTICS:-no}" == "no" ]] && return 0 + [[ -z "${RANDOM_UUID:-}" ]] && return 0 + + # Set type for later status updates + TELEMETRY_TYPE="vm" + + local pve_version="" + if command -v pveversion &>/dev/null; then + pve_version=$(pveversion 2>/dev/null | awk -F'[/ ]' '{print $2}') || true + fi + + # Detect GPU if not already set + if [[ -z "${GPU_VENDOR:-}" ]]; then + detect_gpu + fi + local gpu_vendor="${GPU_VENDOR:-unknown}" + local gpu_model + gpu_model=$(json_escape "${GPU_MODEL:-}") + local gpu_passthrough="${GPU_PASSTHROUGH:-unknown}" + + # Detect CPU if not already set + if [[ -z "${CPU_VENDOR:-}" ]]; then + detect_cpu + fi + local cpu_vendor="${CPU_VENDOR:-unknown}" + local cpu_model + cpu_model=$(json_escape "${CPU_MODEL:-}") + + # Detect RAM if not already set + if [[ -z "${RAM_SPEED:-}" ]]; then + detect_ram + fi + local ram_speed="${RAM_SPEED:-}" + + # Remove 'G' suffix from disk size + local DISK_SIZE_API="${DISK_SIZE%G}" + + local JSON_PAYLOAD + JSON_PAYLOAD=$( + cat </dev/null) || http_code="000" + [[ "$http_code" =~ ^2[0-9]{2}$ ]] && break + [[ "$attempt" -lt 3 ]] && sleep 1 + done + + POST_TO_API_DONE=true +} + +# ------------------------------------------------------------------------------ +# post_progress_to_api() +# +# - Lightweight progress ping from host or container +# - Updates the existing telemetry record status +# - Arguments: +# * $1: status (optional, default: "configuring") +# Valid values: "validation", "configuring" +# - Signals that the installation is actively progressing (not stuck) +# - Fire-and-forget: never blocks or fails the script +# - Only executes if DIAGNOSTICS=yes and RANDOM_UUID is set +# - Can be called multiple times safely +# ------------------------------------------------------------------------------ +post_progress_to_api() { + command -v curl &>/dev/null || return 0 + [[ "${DIAGNOSTICS:-no}" == "no" ]] && return 0 + [[ -z "${RANDOM_UUID:-}" ]] && return 0 + + local progress_status="${1:-configuring}" + local app_name="${NSAPP:-${app:-unknown}}" + local telemetry_type="${TELEMETRY_TYPE:-lxc}" + + curl -fsS -m 5 -X POST "${TELEMETRY_URL:-https://telemetry.community-scripts.org/telemetry}" \ + -H "Content-Type: application/json" \ + -d "{\"random_id\":\"${RANDOM_UUID}\",\"execution_id\":\"${EXECUTION_ID:-${RANDOM_UUID}}\",\"type\":\"${telemetry_type}\",\"nsapp\":\"${app_name}\",\"status\":\"${progress_status}\"}" &>/dev/null || true +} + +# ------------------------------------------------------------------------------ +# post_update_to_api() +# +# - Reports installation completion status to telemetry ingest service +# - Prevents duplicate submissions via POST_UPDATE_DONE flag +# - Arguments: +# * $1: status ("done" or "failed") +# * $2: exit_code (numeric, default: 1 for failed, 0 for done) +# - Payload includes: +# * Final status (mapped: "done"→"success", "failed"→"failed") +# * Error description via explain_exit_code() +# * Numeric exit code +# - Only executes once per session +# - Never blocks or fails script execution +# ------------------------------------------------------------------------------ +post_update_to_api() { + # Silent fail - telemetry should never break scripts + command -v curl &>/dev/null || return 0 + + # Support "force" mode (3rd arg) to bypass duplicate check for retries after cleanup + local force="${3:-}" + POST_UPDATE_DONE=${POST_UPDATE_DONE:-false} + if [[ "$POST_UPDATE_DONE" == "true" && "$force" != "force" ]]; then + return 0 + fi + + [[ "${DIAGNOSTICS:-no}" == "no" ]] && return 0 + [[ -z "${RANDOM_UUID:-}" ]] && return 0 + + local status="${1:-failed}" + local raw_exit_code="${2:-1}" + local exit_code=0 error="" pb_status error_category="" + + # Get GPU info (if detected) + local gpu_vendor="${GPU_VENDOR:-unknown}" + local gpu_model + gpu_model=$(json_escape "${GPU_MODEL:-}") + local gpu_passthrough="${GPU_PASSTHROUGH:-unknown}" + + # Get CPU info (if detected) + local cpu_vendor="${CPU_VENDOR:-unknown}" + local cpu_model + cpu_model=$(json_escape "${CPU_MODEL:-}") + + # Get RAM info (if detected) + local ram_speed="${RAM_SPEED:-}" + + # Map status to telemetry values: installing, success, failed, unknown + case "$status" in + done | success) + pb_status="success" + exit_code=0 + error="" + error_category="" + ;; + failed) + pb_status="failed" + ;; + *) + pb_status="unknown" + ;; + esac + + # For failed/unknown status, resolve exit code and error description + local short_error="" medium_error="" + if [[ "$pb_status" == "failed" ]] || [[ "$pb_status" == "unknown" ]]; then + if [[ "$raw_exit_code" =~ ^[0-9]+$ ]]; then + exit_code="$raw_exit_code" + else + exit_code=1 + fi + # Get full installation log for error field + local log_text="" + log_text=$(get_full_log 122880) || true # 120KB max + if [[ -z "$log_text" ]]; then + # Fallback to last 20 lines + log_text=$(get_error_text) + fi + local full_error + full_error=$(build_error_string "$exit_code" "$log_text") + error=$(json_escape "$full_error") + short_error=$(json_escape "$(explain_exit_code "$exit_code")") + error_category=$(categorize_error "$exit_code") + [[ -z "$error" ]] && error="Unknown error" + + # Build medium error for attempt 2: explanation + last 100 log lines (≤16KB) + # This is the critical middle ground between full 120KB log and generic-only description + local medium_log="" + medium_log=$(get_full_log 16384) || true # 16KB max + if [[ -z "$medium_log" ]]; then + medium_log=$(get_error_text) || true + fi + local medium_full + medium_full=$(build_error_string "$exit_code" "$medium_log") + medium_error=$(json_escape "$medium_full") + [[ -z "$medium_error" ]] && medium_error="$short_error" + fi + + # Calculate duration if timer was started + local duration=0 + if [[ -n "${INSTALL_START_TIME:-}" ]]; then + duration=$(($(date +%s) - INSTALL_START_TIME)) + fi + + # Get PVE version + local pve_version="" + if command -v pveversion &>/dev/null; then + pve_version=$(pveversion 2>/dev/null | awk -F'[/ ]' '{print $2}') || true + fi + + local http_code="" + + # Strip 'G' suffix from disk size (VMs set DISK_SIZE=32G) + local DISK_SIZE_API="${DISK_SIZE:-0}" + DISK_SIZE_API="${DISK_SIZE_API%G}" + [[ ! "$DISK_SIZE_API" =~ ^[0-9]+$ ]] && DISK_SIZE_API=0 + + # ── Attempt 1: Full payload with complete error text (includes full log) ── + local JSON_PAYLOAD + JSON_PAYLOAD=$( + cat </dev/null) || http_code="000" + + if [[ "$http_code" =~ ^2[0-9]{2}$ ]]; then + POST_UPDATE_DONE=true + return 0 + fi + + # ── Attempt 2: Medium error text (truncated log ≤16KB instead of full 120KB) ── + sleep 1 + local RETRY_PAYLOAD + RETRY_PAYLOAD=$( + cat </dev/null) || http_code="000" + + if [[ "$http_code" =~ ^2[0-9]{2}$ ]]; then + POST_UPDATE_DONE=true + return 0 + fi + + # ── Attempt 3: Minimal payload with medium error (bare minimum to set status) ── + sleep 2 + local MINIMAL_PAYLOAD + MINIMAL_PAYLOAD=$( + cat </dev/null) || http_code="000" + + if [[ "$http_code" =~ ^2[0-9]{2}$ ]]; then + POST_UPDATE_DONE=true + return 0 + fi + + # All 3 attempts failed — do NOT set POST_UPDATE_DONE=true. + # This allows the EXIT trap (on_exit in error_handler.func) to retry. + # No infinite loop risk: EXIT trap fires exactly once. +} + +# ============================================================================== +# SECTION 3: EXTENDED TELEMETRY FUNCTIONS +# ============================================================================== + +# ------------------------------------------------------------------------------ +# categorize_error() +# +# - Maps exit codes to error categories for better analytics +# - Categories: network, storage, dependency, permission, timeout, config, resource, unknown +# - Used to group errors in dashboard +# ------------------------------------------------------------------------------ +categorize_error() { + local code="$1" + case "$code" in + # Network errors (curl/wget) + 6 | 7 | 22 | 35) echo "network" ;; + + # Docker / Privileged mode required + 10) echo "config" ;; + + # Timeout errors + 28 | 124 | 211) echo "timeout" ;; + + # Storage errors (Proxmox storage) + 214 | 217 | 219 | 224) echo "storage" ;; + + # Dependency/Package errors (APT, DPKG, pip, commands) + 100 | 101 | 102 | 127 | 160 | 161 | 162 | 255) echo "dependency" ;; + + # Permission errors + 126 | 152) echo "permission" ;; + + # Configuration errors (Proxmox config, invalid args) + 128 | 203 | 204 | 205 | 206 | 207 | 208) echo "config" ;; + + # Proxmox container/template errors + 200 | 209 | 210 | 212 | 213 | 215 | 216 | 218 | 220 | 221 | 222 | 223 | 225 | 231) echo "proxmox" ;; + + # Service/Systemd errors + 150 | 151 | 153 | 154) echo "service" ;; + + # Database errors (PostgreSQL, MySQL, MongoDB) + 170 | 171 | 172 | 173 | 180 | 181 | 182 | 183 | 190 | 191 | 192 | 193) echo "database" ;; + + # Node.js / JavaScript runtime errors + 243 | 245 | 246 | 247 | 248 | 249) echo "runtime" ;; + + # Python environment errors + # (already covered: 160-162 under dependency) + + # Aborted by user (SIGHUP=terminal closed, SIGINT=Ctrl+C, SIGTERM=killed) + 129 | 130 | 143) echo "user_aborted" ;; + + # Resource errors (OOM, SIGKILL, SIGABRT) + 134 | 137) echo "resource" ;; + + # Signal/Process errors (SIGPIPE, SIGSEGV) + 139 | 141) echo "signal" ;; + + # Shell errors (general error, syntax error) + 1 | 2) echo "shell" ;; + + # Default - truly unknown + *) echo "unknown" ;; + esac +} + +# ------------------------------------------------------------------------------ +# start_install_timer() +# +# - Captures start time for installation duration tracking +# - Call at the beginning of installation +# - Sets INSTALL_START_TIME global variable +# ------------------------------------------------------------------------------ +start_install_timer() { + INSTALL_START_TIME=$(date +%s) + export INSTALL_START_TIME +} + +# ------------------------------------------------------------------------------ +# get_install_duration() +# +# - Returns elapsed seconds since start_install_timer() was called +# - Returns 0 if timer was not started +# ------------------------------------------------------------------------------ +get_install_duration() { + if [[ -z "${INSTALL_START_TIME:-}" ]]; then + echo "0" + return + fi + local now=$(date +%s) + echo $((now - INSTALL_START_TIME)) +} + +# ------------------------------------------------------------------------------ +# _telemetry_report_exit() +# +# - Internal handler called by EXIT trap set in init_tool_telemetry() +# - Determines success/failure from exit code and reports via appropriate API +# - Arguments: +# * $1: exit_code from the script +# ------------------------------------------------------------------------------ +_telemetry_report_exit() { + local ec="${1:-0}" + local status="success" + [[ "$ec" -ne 0 ]] && status="failed" + + # Lazy name resolution: use explicit name, fall back to $APP, then "unknown" + local name="${TELEMETRY_TOOL_NAME:-${APP:-unknown}}" + + if [[ "${TELEMETRY_TOOL_TYPE:-pve}" == "addon" ]]; then + post_addon_to_api "$name" "$status" "$ec" + else + post_tool_to_api "$name" "$status" "$ec" + fi +} + +# ------------------------------------------------------------------------------ +# init_tool_telemetry() +# +# - One-line telemetry setup for tools/addon scripts +# - Reads DIAGNOSTICS from /usr/local/community-scripts/diagnostics +# (persisted on PVE host during first build, and inside containers by install.func) +# - Starts install timer for duration tracking +# - Sets EXIT trap to automatically report success/failure on script exit +# - Arguments: +# * $1: tool_name (optional, falls back to $APP at exit time) +# * $2: type ("pve" for PVE host scripts, "addon" for container addons) +# - Usage: +# source <(curl -fsSL .../misc/api.func) 2>/dev/null || true +# init_tool_telemetry "post-pve-install" "pve" +# init_tool_telemetry "" "addon" # uses $APP at exit time +# ------------------------------------------------------------------------------ +init_tool_telemetry() { + local name="${1:-}" + local type="${2:-pve}" + + [[ -n "$name" ]] && TELEMETRY_TOOL_NAME="$name" + TELEMETRY_TOOL_TYPE="$type" + + # Read diagnostics opt-in/opt-out + if [[ -f /usr/local/community-scripts/diagnostics ]]; then + DIAGNOSTICS=$(grep -i "^DIAGNOSTICS=" /usr/local/community-scripts/diagnostics 2>/dev/null | awk -F'=' '{print $2}') || true + fi + + start_install_timer + + # EXIT trap: automatically report telemetry when script ends + trap '_telemetry_report_exit "$?"' EXIT +} + +# ------------------------------------------------------------------------------ +# post_tool_to_api() +# +# - Reports tool usage to telemetry +# - Arguments: +# * $1: tool_name (e.g., "microcode", "lxc-update", "post-pve-install") +# * $2: status ("success" or "failed") +# * $3: exit_code (optional, default: 0 for success, 1 for failed) +# - For PVE host tools, not container installations +# ------------------------------------------------------------------------------ +post_tool_to_api() { + command -v curl &>/dev/null || return 0 + [[ "${DIAGNOSTICS:-no}" == "no" ]] && return 0 + + local tool_name="${1:-unknown}" + local status="${2:-success}" + local exit_code="${3:-0}" + local error="" error_category="" + local uuid duration + + # Generate UUID for this tool execution + uuid=$(cat /proc/sys/kernel/random/uuid 2>/dev/null || uuidgen 2>/dev/null || echo "tool-$(date +%s)") + duration=$(get_install_duration) + + # Map status + [[ "$status" == "done" ]] && status="success" + + if [[ "$status" == "failed" ]]; then + [[ ! "$exit_code" =~ ^[0-9]+$ ]] && exit_code=1 + local error_text="" + error_text=$(get_error_text) + local full_error + full_error=$(build_error_string "$exit_code" "$error_text") + error=$(json_escape "$full_error") + error_category=$(categorize_error "$exit_code") + fi + + local pve_version="" + if command -v pveversion &>/dev/null; then + pve_version=$(pveversion 2>/dev/null | awk -F'[/ ]' '{print $2}') || true + fi + + local JSON_PAYLOAD + JSON_PAYLOAD=$( + cat </dev/null || true +} + +# ------------------------------------------------------------------------------ +# post_addon_to_api() +# +# - Reports addon installation to telemetry +# - Arguments: +# * $1: addon_name (e.g., "filebrowser", "netdata") +# * $2: status ("success" or "failed") +# * $3: exit_code (optional) +# - For addons installed inside containers +# ------------------------------------------------------------------------------ +post_addon_to_api() { + command -v curl &>/dev/null || return 0 + [[ "${DIAGNOSTICS:-no}" == "no" ]] && return 0 + + local addon_name="${1:-unknown}" + local status="${2:-success}" + local exit_code="${3:-0}" + local error="" error_category="" + local uuid duration + + # Generate UUID for this addon installation + uuid=$(cat /proc/sys/kernel/random/uuid 2>/dev/null || uuidgen 2>/dev/null || echo "addon-$(date +%s)") + duration=$(get_install_duration) + + # Map status + [[ "$status" == "done" ]] && status="success" + + if [[ "$status" == "failed" ]]; then + [[ ! "$exit_code" =~ ^[0-9]+$ ]] && exit_code=1 + local error_text="" + error_text=$(get_error_text) + local full_error + full_error=$(build_error_string "$exit_code" "$error_text") + error=$(json_escape "$full_error") + error_category=$(categorize_error "$exit_code") + fi + + # Detect OS info + local os_type="" os_version="" + if [[ -f /etc/os-release ]]; then + os_type=$(grep "^ID=" /etc/os-release | cut -d= -f2 | tr -d '"') + os_version=$(grep "^VERSION_ID=" /etc/os-release | cut -d= -f2 | tr -d '"') + fi + + local JSON_PAYLOAD + JSON_PAYLOAD=$( + cat </dev/null || true +} + +# ------------------------------------------------------------------------------ +# post_update_to_api_extended() +# +# - Extended version of post_update_to_api with duration, GPU, and error category +# - Same arguments as post_update_to_api: +# * $1: status ("done" or "failed") +# * $2: exit_code (numeric) +# - Automatically includes: +# * Install duration (if start_install_timer was called) +# * Error category (for failed status) +# * GPU info (if detect_gpu was called) +# ------------------------------------------------------------------------------ +post_update_to_api_extended() { + # Silent fail - telemetry should never break scripts + command -v curl &>/dev/null || return 0 + + # Prevent duplicate submissions + POST_UPDATE_DONE=${POST_UPDATE_DONE:-false} + [[ "$POST_UPDATE_DONE" == "true" ]] && return 0 + + [[ "${DIAGNOSTICS:-no}" == "no" ]] && return 0 + [[ -z "${RANDOM_UUID:-}" ]] && return 0 + + local status="${1:-failed}" + local raw_exit_code="${2:-1}" + local exit_code=0 error="" pb_status error_category="" + local duration gpu_vendor gpu_passthrough + + # Get duration + duration=$(get_install_duration) + + # Get GPU info (if detected) + gpu_vendor="${GPU_VENDOR:-}" + gpu_passthrough="${GPU_PASSTHROUGH:-}" + + # Map status to telemetry values + case "$status" in + done | success) + pb_status="success" + exit_code=0 + error="" + error_category="" + ;; + failed) + pb_status="failed" + ;; + *) + pb_status="unknown" + ;; + esac + + # For failed/unknown status, resolve exit code and error description + if [[ "$pb_status" == "failed" ]] || [[ "$pb_status" == "unknown" ]]; then + if [[ "$raw_exit_code" =~ ^[0-9]+$ ]]; then + exit_code="$raw_exit_code" + else + exit_code=1 + fi + local error_text="" + error_text=$(get_error_text) + local full_error + full_error=$(build_error_string "$exit_code" "$error_text") + error=$(json_escape "$full_error") + error_category=$(categorize_error "$exit_code") + [[ -z "$error" ]] && error="Unknown error" + fi + + local JSON_PAYLOAD + JSON_PAYLOAD=$( + cat </dev/null) || http_code="000" + + if [[ "$http_code" =~ ^2[0-9]{2}$ ]]; then + POST_UPDATE_DONE=true + return 0 + fi + + # Retry with minimal payload + sleep 1 + http_code=$(curl -sS -w "%{http_code}" -m "${STATUS_TIMEOUT}" -X POST "${TELEMETRY_URL}" \ + -H "Content-Type: application/json" \ + -d "{\"random_id\":\"${RANDOM_UUID}\",\"execution_id\":\"${EXECUTION_ID:-${RANDOM_UUID}}\",\"type\":\"${TELEMETRY_TYPE:-lxc}\",\"nsapp\":\"${NSAPP:-unknown}\",\"status\":\"${pb_status}\",\"exit_code\":${exit_code},\"install_duration\":${duration:-0}}" \ + -o /dev/null 2>/dev/null) || http_code="000" + + if [[ "$http_code" =~ ^2[0-9]{2}$ ]]; then + POST_UPDATE_DONE=true + return 0 + fi + + # Do NOT set POST_UPDATE_DONE=true — let EXIT trap retry +} diff --git a/misc/build.func b/misc/build.func new file mode 100644 index 0000000..97eb927 --- /dev/null +++ b/misc/build.func @@ -0,0 +1,5780 @@ +#!/usr/bin/env bash +# Copyright (c) 2021-2026 community-scripts ORG +# Author: tteck (tteckster) | MickLesk | michelroegl-brunner +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/branch/main/LICENSE + +# ============================================================================== +# BUILD.FUNC - LXC CONTAINER BUILD & CONFIGURATION +# ============================================================================== +# +# This file provides the main build functions for creating and configuring +# LXC containers in Proxmox VE. It handles: +# +# - Variable initialization and defaults +# - Container creation and resource allocation +# - Storage selection and management +# - Advanced configuration and customization +# - User interaction menus and prompts +# +# Usage: +# - Sourced automatically by CT creation scripts +# - Requires core.func and error_handler.func to be loaded first +# +# ============================================================================== + +# ============================================================================== +# SECTION 1: INITIALIZATION & CORE VARIABLES +# ============================================================================== + +# ------------------------------------------------------------------------------ +# variables() +# +# - Initializes core variables for container creation +# - Normalizes application name (NSAPP = lowercase, no spaces) +# - Builds installer filename (var_install) +# - Defines regex patterns for validation +# - Fetches Proxmox hostname and version +# - Generates unique session ID for tracking and logging +# - Captures app-declared resource defaults (CPU, RAM, Disk) +# ------------------------------------------------------------------------------ +variables() { + NSAPP=$(echo "${APP,,}" | tr -d ' ') # This function sets the NSAPP variable by converting the value of the APP variable to lowercase and removing any spaces. + var_install="${NSAPP}-install" # sets the var_install variable by appending "-install" to the value of NSAPP. + INTEGER='^[0-9]+([.][0-9]+)?$' # it defines the INTEGER regular expression pattern. + PVEHOST_NAME=$(hostname) # gets the Proxmox Hostname and sets it to Uppercase + DIAGNOSTICS="no" # Safe default: no telemetry until user consents via diagnostics_check() + METHOD="default" # sets the METHOD variable to "default", used for the API call. + RANDOM_UUID="$(cat /proc/sys/kernel/random/uuid)" # generates a random UUID and sets it to the RANDOM_UUID variable. + EXECUTION_ID="${RANDOM_UUID}" # Unique execution ID for telemetry record identification (unique-indexed in PocketBase) + SESSION_ID="${RANDOM_UUID:0:8}" # Short session ID (first 8 chars of UUID) for log files + BUILD_LOG="/tmp/create-lxc-${SESSION_ID}.log" # Host-side container creation log + # NOTE: combined_log is constructed locally in build_container() and ensure_log_on_host() + # as "/tmp/${NSAPP}-${CTID}-${SESSION_ID}.log" (requires CTID, not available here) + CTTYPE="${CTTYPE:-${CT_TYPE:-1}}" + + # Parse dev_mode early + parse_dev_mode + + # Setup persistent log directory if logs mode active + if [[ "${DEV_MODE_LOGS:-false}" == "true" ]]; then + mkdir -p /var/log/community-scripts + BUILD_LOG="/var/log/community-scripts/create-lxc-${SESSION_ID}-$(date +%Y%m%d_%H%M%S).log" + fi + + # Get Proxmox VE version and kernel version + if command -v pveversion >/dev/null 2>&1; then + PVEVERSION="$(pveversion | awk -F'/' '{print $2}' | awk -F'-' '{print $1}')" + else + PVEVERSION="N/A" + fi + KERNEL_VERSION=$(uname -r) + + # Capture app-declared defaults (for precedence logic) + # These values are set by the app script BEFORE default.vars is loaded + # If app declares higher values than default.vars, app values take precedence + if [[ -n "${var_cpu:-}" && "${var_cpu}" =~ ^[0-9]+$ ]]; then + export APP_DEFAULT_CPU="${var_cpu}" + fi + if [[ -n "${var_ram:-}" && "${var_ram}" =~ ^[0-9]+$ ]]; then + export APP_DEFAULT_RAM="${var_ram}" + fi + if [[ -n "${var_disk:-}" && "${var_disk}" =~ ^[0-9]+$ ]]; then + export APP_DEFAULT_DISK="${var_disk}" + fi +} + +source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/api.func) + +if command -v curl >/dev/null 2>&1; then + source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/core.func) + source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/error_handler.func) + load_functions + catch_errors +elif command -v wget >/dev/null 2>&1; then + source <(wget -qO- https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/core.func) + source <(wget -qO- https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/error_handler.func) + load_functions + catch_errors +fi + +# ============================================================================== +# SECTION 2: PRE-FLIGHT CHECKS & SYSTEM VALIDATION +# ============================================================================== + +# ------------------------------------------------------------------------------ +# maxkeys_check() +# +# - Reads kernel keyring limits (maxkeys, maxbytes) +# - Checks current usage for LXC user (UID 100000) +# - Warns if usage is close to limits and suggests sysctl tuning +# - Exits if thresholds are exceeded +# - https://cleveruptime.com/docs/files/proc-key-users | https://docs.kernel.org/security/keys/core.html +# ------------------------------------------------------------------------------ + +maxkeys_check() { + # Read kernel parameters + per_user_maxkeys=$(cat /proc/sys/kernel/keys/maxkeys 2>/dev/null || echo 0) + per_user_maxbytes=$(cat /proc/sys/kernel/keys/maxbytes 2>/dev/null || echo 0) + + # Exit if kernel parameters are unavailable + if [[ "$per_user_maxkeys" -eq 0 || "$per_user_maxbytes" -eq 0 ]]; then + msg_error "Unable to read kernel key parameters. Ensure proper permissions." + exit 107 + fi + + # Fetch key usage for user ID 100000 (typical for containers) + used_lxc_keys=$(awk '/100000:/ {print $2}' /proc/key-users 2>/dev/null || echo 0) + used_lxc_bytes=$(awk '/100000:/ {split($5, a, "/"); print a[1]}' /proc/key-users 2>/dev/null || echo 0) + + # Calculate thresholds and suggested new limits + threshold_keys=$((per_user_maxkeys - 100)) + threshold_bytes=$((per_user_maxbytes - 1000)) + new_limit_keys=$((per_user_maxkeys * 2)) + new_limit_bytes=$((per_user_maxbytes * 2)) + + # Check if key or byte usage is near limits + failure=0 + if [[ "$used_lxc_keys" -gt "$threshold_keys" ]]; then + msg_warn "Key usage is near the limit (${used_lxc_keys}/${per_user_maxkeys})" + echo -e "${INFO} Suggested action: Set ${GN}kernel.keys.maxkeys=${new_limit_keys}${CL} in ${BOLD}/etc/sysctl.d/98-community-scripts.conf${CL}." + failure=1 + fi + if [[ "$used_lxc_bytes" -gt "$threshold_bytes" ]]; then + msg_warn "Key byte usage is near the limit (${used_lxc_bytes}/${per_user_maxbytes})" + echo -e "${INFO} Suggested action: Set ${GN}kernel.keys.maxbytes=${new_limit_bytes}${CL} in ${BOLD}/etc/sysctl.d/98-community-scripts.conf${CL}." + failure=1 + fi + + # Provide next steps if issues are detected + if [[ "$failure" -eq 1 ]]; then + msg_error "Kernel key limits exceeded - see suggestions above" + exit 108 + fi + + # Silent success - only show errors if they exist +} + +# ============================================================================== +# SECTION 3: CONTAINER SETUP UTILITIES +# ============================================================================== + +# ------------------------------------------------------------------------------ +# get_current_ip() +# +# - Returns current container IP depending on OS type +# - Debian/Ubuntu: uses `hostname -I` +# - Alpine: parses eth0 via `ip -4 addr` or `ip -6 addr` +# - Supports IPv6-only environments as fallback +# - Returns "Unknown" if OS type cannot be determined +# ------------------------------------------------------------------------------ +get_current_ip() { + CURRENT_IP="" + if [ -f /etc/os-release ]; then + # Check for Debian/Ubuntu (uses hostname -I) + if grep -qE 'ID=debian|ID=ubuntu' /etc/os-release; then + # Try IPv4 first + CURRENT_IP=$(hostname -I 2>/dev/null | tr ' ' '\n' | grep -E '^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$' | head -n1) + # Fallback to IPv6 if no IPv4 + if [[ -z "$CURRENT_IP" ]]; then + CURRENT_IP=$(hostname -I 2>/dev/null | tr ' ' '\n' | grep -E ':' | head -n1) + fi + # Check for Alpine (uses ip command) + elif grep -q 'ID=alpine' /etc/os-release; then + # Try IPv4 first + CURRENT_IP=$(ip -4 addr show eth0 2>/dev/null | awk '/inet / {print $2}' | cut -d/ -f1 | head -n 1) + # Fallback to IPv6 if no IPv4 + if [[ -z "$CURRENT_IP" ]]; then + CURRENT_IP=$(ip -6 addr show eth0 scope global 2>/dev/null | awk '/inet6 / {print $2}' | cut -d/ -f1 | head -n 1) + fi + else + CURRENT_IP="Unknown" + fi + fi + echo "$CURRENT_IP" +} + +# ------------------------------------------------------------------------------ +# update_motd_ip() +# +# - Updates /etc/motd with current container IP +# - Removes old IP entries to avoid duplicates +# - Regenerates /etc/profile.d/00_lxc-details.sh with dynamic OS/IP info +# ------------------------------------------------------------------------------ +update_motd_ip() { + MOTD_FILE="/etc/motd" + PROFILE_FILE="/etc/profile.d/00_lxc-details.sh" + + if [ -f "$MOTD_FILE" ]; then + # Remove existing IP Address lines to prevent duplication + sed -i '/IP Address:/d' "$MOTD_FILE" + + IP=$(get_current_ip) + # Add the new IP address + echo -e "${TAB}${NETWORK}${YW} IP Address: ${GN}${IP}${CL}" >>"$MOTD_FILE" + fi + + # Update dynamic LXC details profile if values changed (e.g., after OS upgrade) + # Only update if file exists and is from community-scripts + if [ -f "$PROFILE_FILE" ] && grep -q "community-scripts" "$PROFILE_FILE" 2>/dev/null; then + # Get current values + local current_os="$(grep ^NAME /etc/os-release | cut -d= -f2 | tr -d '"') - Version: $(grep ^VERSION_ID /etc/os-release | cut -d= -f2 | tr -d '"')" + local current_hostname="$(hostname)" + local current_ip="$(hostname -I | awk '{print $1}')" + + # Update only if values actually changed + if ! grep -q "OS:.*$current_os" "$PROFILE_FILE" 2>/dev/null; then + sed -i "s|OS:.*|OS: \${GN}$current_os\${CL}\\\"|" "$PROFILE_FILE" + fi + if ! grep -q "Hostname:.*$current_hostname" "$PROFILE_FILE" 2>/dev/null; then + sed -i "s|Hostname:.*|Hostname: \${GN}$current_hostname\${CL}\\\"|" "$PROFILE_FILE" + fi + if ! grep -q "IP Address:.*$current_ip" "$PROFILE_FILE" 2>/dev/null; then + sed -i "s|IP Address:.*|IP Address: \${GN}$current_ip\${CL}\\\"|" "$PROFILE_FILE" + fi + fi +} + +# ------------------------------------------------------------------------------ +# install_ssh_keys_into_ct() +# +# - Installs SSH keys into container root account if SSH is enabled +# - Uses pct push or direct input to authorized_keys +# - Supports both SSH_KEYS_FILE (from advanced settings) and SSH_AUTHORIZED_KEY (from user defaults) +# - Falls back to warning if no keys provided +# ------------------------------------------------------------------------------ +install_ssh_keys_into_ct() { + [[ "${SSH:-no}" != "yes" ]] && return 0 + + # Ensure SSH_KEYS_FILE is defined (may not be set if advanced_settings was skipped) + : "${SSH_KEYS_FILE:=}" + + # If SSH_KEYS_FILE doesn't exist but SSH_AUTHORIZED_KEY is set (from user defaults), + # create a temporary SSH_KEYS_FILE with the key + if [[ -z "$SSH_KEYS_FILE" || ! -s "$SSH_KEYS_FILE" ]] && [[ -n "${SSH_AUTHORIZED_KEY:-}" ]]; then + SSH_KEYS_FILE="$(mktemp)" + printf '%s\n' "$SSH_AUTHORIZED_KEY" >"$SSH_KEYS_FILE" + fi + + if [[ -n "$SSH_KEYS_FILE" && -s "$SSH_KEYS_FILE" ]]; then + msg_info "Installing selected SSH keys into CT ${CTID}" + pct exec "$CTID" -- sh -c 'mkdir -p /root/.ssh && chmod 700 /root/.ssh' || { + msg_error "prepare /root/.ssh failed" + return 1 + } + pct push "$CTID" "$SSH_KEYS_FILE" /root/.ssh/authorized_keys >/dev/null 2>&1 || + pct exec "$CTID" -- sh -c "cat > /root/.ssh/authorized_keys" <"$SSH_KEYS_FILE" || { + msg_error "write authorized_keys failed" + return 1 + } + pct exec "$CTID" -- sh -c 'chmod 600 /root/.ssh/authorized_keys' || true + msg_ok "Installed SSH keys into CT ${CTID}" + return 0 + fi + + # Fallback + msg_warn "No SSH keys to install (skipping)." + return 0 +} + +# ------------------------------------------------------------------------------ +# validate_container_id() +# +# - Validates if a container ID is available for use (CLUSTER-WIDE) +# - Checks cluster resources via pvesh for VMs/CTs on ALL nodes +# - Falls back to local config file check if pvesh unavailable +# - Checks if ID is used in LVM logical volumes +# - Returns 0 if ID is available, 1 if already in use +# ------------------------------------------------------------------------------ +validate_container_id() { + local ctid="$1" + + # Check if ID is numeric + if ! [[ "$ctid" =~ ^[0-9]+$ ]]; then + return 1 + fi + + # CLUSTER-WIDE CHECK: Query all VMs/CTs across all nodes + # This catches IDs used on other nodes in the cluster + # NOTE: Works on single-node too - Proxmox always has internal cluster structure + # Falls back gracefully if pvesh unavailable or returns empty + if command -v pvesh &>/dev/null; then + local cluster_ids + cluster_ids=$(pvesh get /cluster/resources --type vm --output-format json 2>/dev/null | + grep -oP '"vmid":\s*\K[0-9]+' 2>/dev/null || true) + if [[ -n "$cluster_ids" ]] && echo "$cluster_ids" | grep -qw "$ctid"; then + return 1 + fi + fi + + # LOCAL FALLBACK: Check if config file exists for VM or LXC + # This handles edge cases where pvesh might not return all info + if [[ -f "/etc/pve/qemu-server/${ctid}.conf" ]] || [[ -f "/etc/pve/lxc/${ctid}.conf" ]]; then + return 1 + fi + + # Check ALL nodes in cluster for config files (handles pmxcfs sync delays) + # NOTE: On single-node, /etc/pve/nodes/ contains just the one node - still works + if [[ -d "/etc/pve/nodes" ]]; then + for node_dir in /etc/pve/nodes/*/; do + if [[ -f "${node_dir}qemu-server/${ctid}.conf" ]] || [[ -f "${node_dir}lxc/${ctid}.conf" ]]; then + return 1 + fi + done + fi + + # Check if ID is used in LVM logical volumes + if lvs --noheadings -o lv_name 2>/dev/null | grep -qE "(^|[-_])${ctid}($|[-_])"; then + return 1 + fi + + return 0 +} + +# ------------------------------------------------------------------------------ +# get_valid_container_id() +# +# - Returns a valid, unused container ID (CLUSTER-AWARE) +# - Uses pvesh /cluster/nextid as starting point (already cluster-aware) +# - If provided ID is valid, returns it +# - Otherwise increments until a free one is found across entire cluster +# - Calls validate_container_id() to check availability +# ------------------------------------------------------------------------------ +get_valid_container_id() { + local suggested_id="${1:-$(pvesh get /cluster/nextid 2>/dev/null || echo 100)}" + + # Ensure we have a valid starting ID + if ! [[ "$suggested_id" =~ ^[0-9]+$ ]]; then + suggested_id=$(pvesh get /cluster/nextid 2>/dev/null || echo 100) + fi + + local max_attempts=1000 + local attempts=0 + + while ! validate_container_id "$suggested_id"; do + suggested_id=$((suggested_id + 1)) + attempts=$((attempts + 1)) + if [[ $attempts -ge $max_attempts ]]; then + msg_error "Could not find available container ID after $max_attempts attempts" + exit 109 + fi + done + + echo "$suggested_id" +} + +# ------------------------------------------------------------------------------ +# validate_hostname() +# +# - Validates hostname/FQDN according to RFC 1123/952 +# - Checks total length (max 253 characters for FQDN) +# - Validates each label (max 63 chars, alphanumeric + hyphens) +# - Returns 0 if valid, 1 if invalid +# ------------------------------------------------------------------------------ +validate_hostname() { + local hostname="$1" + + # Check total length (max 253 for FQDN) + if [[ ${#hostname} -gt 253 ]] || [[ -z "$hostname" ]]; then + return 1 + fi + + # Split by dots and validate each label + local IFS='.' + read -ra labels <<<"$hostname" + for label in "${labels[@]}"; do + # Each label: 1-63 chars, alphanumeric, hyphens allowed (not at start/end) + if [[ -z "$label" ]] || [[ ${#label} -gt 63 ]]; then + return 1 + fi + if [[ ! "$label" =~ ^[a-z0-9]([a-z0-9-]*[a-z0-9])?$ ]] && [[ ! "$label" =~ ^[a-z0-9]$ ]]; then + return 1 + fi + done + + return 0 +} + +# ------------------------------------------------------------------------------ +# validate_mac_address() +# +# - Validates MAC address format (XX:XX:XX:XX:XX:XX) +# - Empty value is allowed (auto-generated) +# - Returns 0 if valid, 1 if invalid +# ------------------------------------------------------------------------------ +validate_mac_address() { + local mac="$1" + [[ -z "$mac" ]] && return 0 + if [[ ! "$mac" =~ ^([0-9A-Fa-f]{2}:){5}[0-9A-Fa-f]{2}$ ]]; then + return 1 + fi + return 0 +} + +# ------------------------------------------------------------------------------ +# validate_vlan_tag() +# +# - Validates VLAN tag (1-4094) +# - Empty value is allowed (no VLAN) +# - Returns 0 if valid, 1 if invalid +# ------------------------------------------------------------------------------ +validate_vlan_tag() { + local vlan="$1" + [[ -z "$vlan" ]] && return 0 + if ! [[ "$vlan" =~ ^[0-9]+$ ]] || ((vlan < 1 || vlan > 4094)); then + return 1 + fi + return 0 +} + +# ------------------------------------------------------------------------------ +# validate_mtu() +# +# - Validates MTU size (576-65535, common values: 1500, 9000) +# - Empty value is allowed (default 1500) +# - Returns 0 if valid, 1 if invalid +# ------------------------------------------------------------------------------ +validate_mtu() { + local mtu="$1" + [[ -z "$mtu" ]] && return 0 + if ! [[ "$mtu" =~ ^[0-9]+$ ]] || ((mtu < 576 || mtu > 65535)); then + return 1 + fi + return 0 +} + +# ------------------------------------------------------------------------------ +# validate_ipv6_address() +# +# - Validates IPv6 address with optional CIDR notation +# - Supports compressed (::) and full notation +# - Empty value is allowed +# - Returns 0 if valid, 1 if invalid +# ------------------------------------------------------------------------------ +validate_ipv6_address() { + local ipv6="$1" + [[ -z "$ipv6" ]] && return 0 + + # Extract address and CIDR + local addr="${ipv6%%/*}" + local cidr="${ipv6##*/}" + + # Validate CIDR if present (1-128) + if [[ "$ipv6" == */* ]]; then + if ! [[ "$cidr" =~ ^[0-9]+$ ]] || ((cidr < 1 || cidr > 128)); then + return 1 + fi + fi + + # Basic IPv6 validation - check for valid characters and structure + # Must contain only hex digits and colons + if [[ ! "$addr" =~ ^[0-9a-fA-F:]+$ ]]; then + return 1 + fi + + # Must contain at least one colon + if [[ ! "$addr" == *:* ]]; then + return 1 + fi + + # Check for valid double-colon usage (only one :: allowed) + if [[ "$addr" == *::*::* ]]; then + return 1 + fi + + # Check that no segment exceeds 4 hex chars + local IFS=':' + local -a segments + read -ra segments <<<"$addr" + for seg in "${segments[@]}"; do + if [[ ${#seg} -gt 4 ]]; then + return 1 + fi + done + + return 0 +} + +# ------------------------------------------------------------------------------ +# validate_bridge() +# +# - Validates that network bridge exists and is active +# - Returns 0 if valid, 1 if invalid +# ------------------------------------------------------------------------------ +validate_bridge() { + local bridge="$1" + [[ -z "$bridge" ]] && return 1 + + # Check if bridge interface exists + if ! ip link show "$bridge" &>/dev/null; then + return 1 + fi + + return 0 +} + +# ------------------------------------------------------------------------------ +# validate_gateway_in_subnet() +# +# - Validates that gateway IP is in the same subnet as static IP +# - Arguments: static_ip (with CIDR), gateway_ip +# - Returns 0 if valid, 1 if invalid +# ------------------------------------------------------------------------------ +validate_gateway_in_subnet() { + local static_ip="$1" + local gateway="$2" + + [[ -z "$static_ip" || -z "$gateway" ]] && return 0 + + # Extract IP and CIDR + local ip="${static_ip%%/*}" + local cidr="${static_ip##*/}" + + # Convert CIDR to netmask bits + local mask=$((0xFFFFFFFF << (32 - cidr) & 0xFFFFFFFF)) + + # Convert IPs to integers + local IFS='.' + read -r i1 i2 i3 i4 <<<"$ip" + read -r g1 g2 g3 g4 <<<"$gateway" + + local ip_int=$(((i1 << 24) + (i2 << 16) + (i3 << 8) + i4)) + local gw_int=$(((g1 << 24) + (g2 << 16) + (g3 << 8) + g4)) + + # Check if both are in same network + if (((ip_int & mask) != (gw_int & mask))); then + return 1 + fi + + return 0 +} + +# ------------------------------------------------------------------------------ +# validate_ip_address() +# +# - Validates IPv4 address with CIDR notation +# - Checks each octet is 0-255 +# - Checks CIDR is 1-32 +# - Returns 0 if valid, 1 if invalid +# ------------------------------------------------------------------------------ +validate_ip_address() { + local ip="$1" + [[ -z "$ip" ]] && return 1 + + # Check format with CIDR + if [[ ! "$ip" =~ ^([0-9]{1,3})\.([0-9]{1,3})\.([0-9]{1,3})\.([0-9]{1,3})/([0-9]{1,2})$ ]]; then + return 1 + fi + + local o1="${BASH_REMATCH[1]}" + local o2="${BASH_REMATCH[2]}" + local o3="${BASH_REMATCH[3]}" + local o4="${BASH_REMATCH[4]}" + local cidr="${BASH_REMATCH[5]}" + + # Validate octets (0-255) + for octet in "$o1" "$o2" "$o3" "$o4"; do + if ((octet > 255)); then + return 1 + fi + done + + # Validate CIDR (1-32) + if ((cidr < 1 || cidr > 32)); then + return 1 + fi + + return 0 +} + +# ------------------------------------------------------------------------------ +# validate_gateway_ip() +# +# - Validates gateway IPv4 address (without CIDR) +# - Checks each octet is 0-255 +# - Returns 0 if valid, 1 if invalid +# ------------------------------------------------------------------------------ +validate_gateway_ip() { + local ip="$1" + [[ -z "$ip" ]] && return 0 + + # Check format without CIDR + if [[ ! "$ip" =~ ^([0-9]{1,3})\.([0-9]{1,3})\.([0-9]{1,3})\.([0-9]{1,3})$ ]]; then + return 1 + fi + + local o1="${BASH_REMATCH[1]}" + local o2="${BASH_REMATCH[2]}" + local o3="${BASH_REMATCH[3]}" + local o4="${BASH_REMATCH[4]}" + + # Validate octets (0-255) + for octet in "$o1" "$o2" "$o3" "$o4"; do + if ((octet > 255)); then + return 1 + fi + done + + return 0 +} + +# ------------------------------------------------------------------------------ +# validate_timezone() +# +# - Validates timezone string against system zoneinfo +# - Empty value or "host" is allowed +# - Returns 0 if valid, 1 if invalid +# ------------------------------------------------------------------------------ +validate_timezone() { + local tz="$1" + [[ -z "$tz" || "$tz" == "host" ]] && return 0 + + # Check if timezone file exists + if [[ ! -f "/usr/share/zoneinfo/$tz" ]]; then + return 1 + fi + + return 0 +} + +# ------------------------------------------------------------------------------ +# validate_tags() +# +# - Validates Proxmox tags format +# - Only alphanumeric, hyphens, underscores, and semicolons allowed +# - Empty value is allowed +# - Returns 0 if valid, 1 if invalid +# ------------------------------------------------------------------------------ +validate_tags() { + local tags="$1" + [[ -z "$tags" ]] && return 0 + + # Tags can only contain alphanumeric, -, _, and ; (separator) + if [[ ! "$tags" =~ ^[a-zA-Z0-9_\;-]+$ ]]; then + return 1 + fi + + return 0 +} + +# ------------------------------------------------------------------------------ +# find_host_ssh_keys() +# +# - Scans system for available SSH keys +# - Supports defaults (~/.ssh, /etc/ssh/authorized_keys) +# - Returns list of files containing valid SSH public keys +# - Sets FOUND_HOST_KEY_COUNT to number of keys found +# ------------------------------------------------------------------------------ +find_host_ssh_keys() { + local re='(ssh-(rsa|ed25519)|ecdsa-sha2-nistp256|sk-(ssh-ed25519|ecdsa-sha2-nistp256))' + local -a files=() cand=() + local g="${var_ssh_import_glob:-}" + local total=0 f base c + + shopt -s nullglob + if [[ -n "$g" ]]; then + for pat in $g; do cand+=($pat); done + else + cand+=(/root/.ssh/authorized_keys /root/.ssh/authorized_keys2) + cand+=(/root/.ssh/*.pub) + cand+=(/etc/ssh/authorized_keys /etc/ssh/authorized_keys.d/*) + fi + shopt -u nullglob + + for f in "${cand[@]}"; do + [[ -f "$f" && -r "$f" ]] || continue + base="$(basename -- "$f")" + case "$base" in + known_hosts | known_hosts.* | config) continue ;; + id_*) [[ "$f" != *.pub ]] && continue ;; + esac + + # CRLF safe check for host keys + c=$(tr -d '\r' <"$f" | awk ' + /^[[:space:]]*#/ {next} + /^[[:space:]]*$/ {next} + {print} + ' | grep -E -c "$re" || true) + + if ((c > 0)); then + files+=("$f") + total=$((total + c)) + fi + done + + # Fallback to /root/.ssh/authorized_keys + if ((${#files[@]} == 0)) && [[ -r /root/.ssh/authorized_keys ]]; then + if grep -E -q "$re" /root/.ssh/authorized_keys; then + files+=(/root/.ssh/authorized_keys) + total=$((total + $(grep -E -c "$re" /root/.ssh/authorized_keys || echo 0))) + fi + fi + + FOUND_HOST_KEY_COUNT="$total" + ( + IFS=: + echo "${files[*]}" + ) +} + +# ============================================================================== +# SECTION 3B: IP RANGE SCANNING +# ============================================================================== + +# ------------------------------------------------------------------------------ +# ip_to_int() / int_to_ip() +# +# - Converts IP address to integer and vice versa for range iteration +# ------------------------------------------------------------------------------ +ip_to_int() { + local IFS=. + read -r i1 i2 i3 i4 <<<"$1" + echo $(((i1 << 24) + (i2 << 16) + (i3 << 8) + i4)) +} + +int_to_ip() { + local ip=$1 + echo "$(((ip >> 24) & 0xFF)).$(((ip >> 16) & 0xFF)).$(((ip >> 8) & 0xFF)).$((ip & 0xFF))" +} + +# ------------------------------------------------------------------------------ +# resolve_ip_from_range() +# +# - Takes an IP range in format "10.0.0.1/24-10.0.0.10/24" +# - Pings each IP in the range to find the first available one +# - Returns the first free IP with CIDR notation +# - Sets NET_RESOLVED to the resolved IP or empty on failure +# ------------------------------------------------------------------------------ +resolve_ip_from_range() { + local range="$1" + local ip_cidr_regex='^([0-9]{1,3})\.([0-9]{1,3})\.([0-9]{1,3})\.([0-9]{1,3})/([0-9]{1,2})$' + local ip_start ip_end + + # Parse range: "10.0.0.1/24-10.0.0.10/24" + ip_start="${range%%-*}" + ip_end="${range##*-}" + + if [[ ! "$ip_start" =~ $ip_cidr_regex ]] || [[ ! "$ip_end" =~ $ip_cidr_regex ]]; then + NET_RESOLVED="" + return 1 + fi + + local ip1="${ip_start%%/*}" + local ip2="${ip_end%%/*}" + local cidr="${ip_start##*/}" + + local start_int=$(ip_to_int "$ip1") + local end_int=$(ip_to_int "$ip2") + + for ((ip_int = start_int; ip_int <= end_int; ip_int++)); do + local ip=$(int_to_ip $ip_int) + msg_info "Checking IP: $ip" + if ! ping -c 1 -W 1 "$ip" >/dev/null 2>&1; then + NET_RESOLVED="$ip/$cidr" + msg_ok "Found free IP: ${BGN}$NET_RESOLVED${CL}" + return 0 + fi + done + + NET_RESOLVED="" + msg_error "No free IP found in range $range" + return 1 +} + +# ------------------------------------------------------------------------------ +# is_ip_range() +# +# - Checks if a string is an IP range (contains - and looks like IP/CIDR) +# - Returns 0 if it's a range, 1 otherwise +# ------------------------------------------------------------------------------ +is_ip_range() { + local value="$1" + local ip_start ip_end + if [[ "$value" == *-* ]] && [[ "$value" != "dhcp" ]]; then + local ip_cidr_regex='^([0-9]{1,3})\.([0-9]{1,3})\.([0-9]{1,3})\.([0-9]{1,3})/([0-9]{1,2})$' + ip_start="${value%%-*}" + ip_end="${value##*-}" + if [[ "$ip_start" =~ $ip_cidr_regex ]] && [[ "$ip_end" =~ $ip_cidr_regex ]]; then + return 0 + fi + fi + return 1 +} + +# ============================================================================== +# SECTION 4: STORAGE & RESOURCE MANAGEMENT +# ============================================================================== + +# ------------------------------------------------------------------------------ +# _write_storage_to_vars() +# +# - Writes storage selection to vars file +# - Removes old entries (commented and uncommented) to avoid duplicates +# - Arguments: vars_file, key (var_container_storage/var_template_storage), value +# ------------------------------------------------------------------------------ +_write_storage_to_vars() { + # $1 = vars_file, $2 = key (var_container_storage / var_template_storage), $3 = value + local vf="$1" key="$2" val="$3" + # remove uncommented and commented versions to avoid duplicates + sed -i "/^[#[:space:]]*${key}=/d" "$vf" + echo "${key}=${val}" >>"$vf" +} + +choose_and_set_storage_for_file() { + # $1 = vars_file, $2 = class ('container'|'template') + local vf="$1" class="$2" key="" current="" + case "$class" in + container) key="var_container_storage" ;; + template) key="var_template_storage" ;; + *) + msg_error "Unknown storage class: $class" + return 1 + ;; + esac + + current=$(awk -F= -v k="^${key}=" '$0 ~ k {print $2; exit}' "$vf") + + # If only one storage exists for the content type, auto-pick. Else always ask (your wish #4). + local content="rootdir" + [[ "$class" == "template" ]] && content="vztmpl" + local count + count=$(pvesm status -content "$content" | awk 'NR>1{print $1}' | wc -l) + + if [[ "$count" -eq 1 ]]; then + STORAGE_RESULT=$(pvesm status -content "$content" | awk 'NR>1{print $1; exit}') + STORAGE_INFO="" + + # Validate storage space for auto-picked container storage + if [[ "$class" == "container" && -n "${DISK_SIZE:-}" ]]; then + validate_storage_space "$STORAGE_RESULT" "$DISK_SIZE" "yes" + # Continue even if validation fails - user was warned + fi + else + # If the current value is preselectable, we could show it, but per your requirement we always offer selection + select_storage "$class" || return 1 + fi + + _write_storage_to_vars "$vf" "$key" "$STORAGE_RESULT" + + # Keep environment in sync for later steps (e.g. app-default save) + if [[ "$class" == "container" ]]; then + export var_container_storage="$STORAGE_RESULT" + export CONTAINER_STORAGE="$STORAGE_RESULT" + else + export var_template_storage="$STORAGE_RESULT" + export TEMPLATE_STORAGE="$STORAGE_RESULT" + fi + + # Silent operation - no output message +} + +# ============================================================================== +# SECTION 5: CONFIGURATION & DEFAULTS MANAGEMENT +# ============================================================================== + +# ------------------------------------------------------------------------------ +# base_settings() +# +# - Defines all base/default variables for container creation +# - Reads from environment variables (var_*) +# - Provides fallback defaults for OS type/version +# - App-specific values take precedence when they are HIGHER (for CPU, RAM, DISK) +# - Sets up container type, resources, network, SSH, features, and tags +# ------------------------------------------------------------------------------ +base_settings() { + # Default Settings + CT_TYPE=${var_unprivileged:-"1"} + + # Resource allocation: App defaults take precedence if HIGHER + # Compare app-declared values (saved in APP_DEFAULT_*) with current var_* values + local final_disk="${var_disk:-4}" + local final_cpu="${var_cpu:-1}" + local final_ram="${var_ram:-1024}" + + # If app declared higher values, use those instead + if [[ -n "${APP_DEFAULT_DISK:-}" && "${APP_DEFAULT_DISK}" =~ ^[0-9]+$ ]]; then + if [[ "${APP_DEFAULT_DISK}" -gt "${final_disk}" ]]; then + final_disk="${APP_DEFAULT_DISK}" + fi + fi + + if [[ -n "${APP_DEFAULT_CPU:-}" && "${APP_DEFAULT_CPU}" =~ ^[0-9]+$ ]]; then + if [[ "${APP_DEFAULT_CPU}" -gt "${final_cpu}" ]]; then + final_cpu="${APP_DEFAULT_CPU}" + fi + fi + + if [[ -n "${APP_DEFAULT_RAM:-}" && "${APP_DEFAULT_RAM}" =~ ^[0-9]+$ ]]; then + if [[ "${APP_DEFAULT_RAM}" -gt "${final_ram}" ]]; then + final_ram="${APP_DEFAULT_RAM}" + fi + fi + + DISK_SIZE="${final_disk}" + CORE_COUNT="${final_cpu}" + RAM_SIZE="${final_ram}" + VERBOSE=${var_verbose:-"${1:-no}"} + PW="" + if [[ -n "${var_pw:-}" ]]; then + local _pw_raw="${var_pw}" + case "$_pw_raw" in + --password\ *) _pw_raw="${_pw_raw#--password }" ;; + -password\ *) _pw_raw="${_pw_raw#-password }" ;; + esac + while [[ "$_pw_raw" == -* ]]; do + _pw_raw="${_pw_raw#-}" + done + if [[ -z "$_pw_raw" ]]; then + msg_warn "Password was only dashes after cleanup; leaving empty." + else + PW="--password $_pw_raw" + fi + fi + + # Validate and set Container ID + local requested_id="${var_ctid:-$NEXTID}" + if ! validate_container_id "$requested_id"; then + # Only show warning if user manually specified an ID (not auto-assigned) + if [[ -n "${var_ctid:-}" ]]; then + msg_warn "Container ID $requested_id is already in use. Using next available ID: $(get_valid_container_id "$requested_id")" + fi + requested_id=$(get_valid_container_id "$requested_id") + fi + CT_ID="$requested_id" + + # Validate and set Hostname/FQDN + local requested_hostname="${var_hostname:-$NSAPP}" + requested_hostname=$(echo "${requested_hostname,,}" | tr -d ' ') + if ! validate_hostname "$requested_hostname"; then + if [[ -n "${var_hostname:-}" ]]; then + msg_warn "Invalid hostname '$requested_hostname'. Using default: $NSAPP" + fi + requested_hostname="$NSAPP" + fi + HN="$requested_hostname" + + BRG=${var_brg:-"vmbr0"} + NET=${var_net:-"dhcp"} + + # Resolve IP range if NET contains a range (e.g., 192.168.1.100/24-192.168.1.200/24) + if is_ip_range "$NET"; then + msg_info "Scanning IP range: $NET" + if resolve_ip_from_range "$NET"; then + NET="$NET_RESOLVED" + else + msg_error "Could not find free IP in range. Falling back to DHCP." + NET="dhcp" + fi + fi + + IPV6_METHOD=${var_ipv6_method:-"none"} + IPV6_STATIC=${var_ipv6_static:-""} + GATE=${var_gateway:-""} + APT_CACHER=${var_apt_cacher:-""} + APT_CACHER_IP=${var_apt_cacher_ip:-""} + + # Runtime check: Verify APT cacher is reachable if configured + if [[ -n "$APT_CACHER_IP" && "$APT_CACHER" == "yes" ]]; then + if ! curl -s --connect-timeout 2 "http://${APT_CACHER_IP}:3142" >/dev/null 2>&1; then + msg_warn "APT Cacher configured but not reachable at ${APT_CACHER_IP}:3142" + msg_custom "⚠️" "${YW}" "Disabling APT Cacher for this installation" + APT_CACHER="" + APT_CACHER_IP="" + else + msg_ok "APT Cacher verified at ${APT_CACHER_IP}:3142" + fi + fi + + MTU=${var_mtu:-""} + SD=${var_searchdomain:-""} + NS=${var_ns:-""} + MAC=${var_mac:-""} + VLAN=${var_vlan:-""} + SSH=${var_ssh:-"no"} + SSH_AUTHORIZED_KEY=${var_ssh_authorized_key:-""} + UDHCPC_FIX=${var_udhcpc_fix:-""} + TAGS="community-script,${var_tags:-}" + ENABLE_FUSE=${var_fuse:-"${1:-no}"} + ENABLE_TUN=${var_tun:-"${1:-no}"} + + # Additional settings that may be skipped if advanced_settings is not run (e.g., App Defaults) + ENABLE_GPU=${var_gpu:-"no"} + ENABLE_NESTING=${var_nesting:-"1"} + ENABLE_KEYCTL=${var_keyctl:-"0"} + ENABLE_MKNOD=${var_mknod:-"0"} + PROTECT_CT=${var_protection:-"no"} + CT_TIMEZONE=${var_timezone:-"$timezone"} + [[ "${CT_TIMEZONE:-}" == Etc/* ]] && CT_TIMEZONE="host" # pct doesn't accept Etc/* zones + + # Since these 2 are only defined outside of default_settings function, we add a temporary fallback. TODO: To align everything, we should add these as constant variables (e.g. OSTYPE and OSVERSION), but that would currently require updating the default_settings function for all existing scripts + if [ -z "$var_os" ]; then + var_os="debian" + fi + if [ -z "$var_version" ]; then + var_version="12" + fi +} + +# ------------------------------------------------------------------------------ +# load_vars_file() +# +# - Safe parser for KEY=VALUE lines from vars files +# - Used by default_var_settings and app defaults loading +# - Only loads whitelisted var_* keys +# - Optional force parameter to override existing values (for app defaults) +# ------------------------------------------------------------------------------ +load_vars_file() { + local file="$1" + local force="${2:-no}" # If "yes", override existing variables + [ -f "$file" ] || return 0 + msg_info "Loading defaults from ${file}" + + # Allowed var_* keys + local VAR_WHITELIST=( + var_apt_cacher var_apt_cacher_ip var_brg var_cpu var_disk var_fuse var_gpu var_keyctl + var_gateway var_hostname var_ipv6_method var_mac var_mknod var_mount_fs var_mtu + var_net var_nesting var_ns var_os var_protection var_pw var_ram var_tags var_timezone var_tun var_unprivileged + var_verbose var_version var_vlan var_ssh var_ssh_authorized_key var_container_storage var_template_storage var_searchdomain + ) + + # Whitelist check helper + _is_whitelisted() { + local k="$1" w + for w in "${VAR_WHITELIST[@]}"; do [ "$k" = "$w" ] && return 0; done + return 1 + } + + local line key val + while IFS= read -r line || [ -n "$line" ]; do + line="${line#"${line%%[![:space:]]*}"}" + line="${line%"${line##*[![:space:]]}"}" + [[ -z "$line" || "$line" == \#* ]] && continue + if [[ "$line" =~ ^([A-Za-z_][A-Za-z0-9_]*)=(.*)$ ]]; then + local var_key="${BASH_REMATCH[1]}" + local var_val="${BASH_REMATCH[2]}" + + [[ "$var_key" != var_* ]] && continue + _is_whitelisted "$var_key" || continue + + # Strip inline comments (anything after unquoted #) + # Only strip if not inside quotes + if [[ ! "$var_val" =~ ^[\"\'] ]]; then + var_val="${var_val%%#*}" + fi + + # Strip quotes + if [[ "$var_val" =~ ^\"(.*)\"$ ]]; then + var_val="${BASH_REMATCH[1]}" + elif [[ "$var_val" =~ ^\'(.*)\'$ ]]; then + var_val="${BASH_REMATCH[1]}" + fi + + # Trim trailing whitespace + var_val="${var_val%"${var_val##*[![:space:]]}"}" + + # Validate values before setting (skip empty values - they use defaults) + if [[ -n "$var_val" ]]; then + case "$var_key" in + var_mac) + if ! validate_mac_address "$var_val"; then + msg_warn "Invalid MAC address '$var_val' in $file, ignoring" + continue + fi + ;; + var_vlan) + if ! validate_vlan_tag "$var_val"; then + msg_warn "Invalid VLAN tag '$var_val' in $file (must be 1-4094), ignoring" + continue + fi + ;; + var_mtu) + if ! validate_mtu "$var_val"; then + msg_warn "Invalid MTU '$var_val' in $file (must be 576-65535), ignoring" + continue + fi + ;; + var_tags) + if ! validate_tags "$var_val"; then + msg_warn "Invalid tags '$var_val' in $file (alphanumeric, -, _, ; only), ignoring" + continue + fi + ;; + var_timezone) + if ! validate_timezone "$var_val"; then + msg_warn "Invalid timezone '$var_val' in $file, ignoring" + continue + fi + ;; + var_brg) + if ! validate_bridge "$var_val"; then + msg_warn "Bridge '$var_val' not found in $file, ignoring" + continue + fi + ;; + var_gateway) + if ! validate_gateway_ip "$var_val"; then + msg_warn "Invalid gateway IP '$var_val' in $file, ignoring" + continue + fi + ;; + var_hostname) + if ! validate_hostname "$var_val"; then + msg_warn "Invalid hostname '$var_val' in $file, ignoring" + continue + fi + ;; + var_cpu) + if ! [[ "$var_val" =~ ^[0-9]+$ ]] || ((var_val < 1 || var_val > 128)); then + msg_warn "Invalid CPU count '$var_val' in $file (must be 1-128), ignoring" + continue + fi + ;; + var_ram) + if ! [[ "$var_val" =~ ^[0-9]+$ ]] || ((var_val < 256)); then + msg_warn "Invalid RAM '$var_val' in $file (must be >= 256 MiB), ignoring" + continue + fi + ;; + var_disk) + if ! [[ "$var_val" =~ ^[0-9]+$ ]] || ((var_val < 1)); then + msg_warn "Invalid disk size '$var_val' in $file (must be >= 1 GB), ignoring" + continue + fi + ;; + var_unprivileged) + if [[ "$var_val" != "0" && "$var_val" != "1" ]]; then + msg_warn "Invalid unprivileged value '$var_val' in $file (must be 0 or 1), ignoring" + continue + fi + ;; + var_nesting) + if [[ "$var_val" != "0" && "$var_val" != "1" ]]; then + msg_warn "Invalid nesting value '$var_val' in $file (must be 0 or 1), ignoring" + continue + fi + # Warn about potential issues with systemd-based OS when nesting is disabled via vars file + if [[ "$var_val" == "0" && "${var_os:-debian}" != "alpine" ]]; then + msg_warn "Nesting disabled in $file - modern systemd-based distributions may require nesting for proper operation" + fi + ;; + var_keyctl) + if [[ "$var_val" != "0" && "$var_val" != "1" ]]; then + msg_warn "Invalid keyctl value '$var_val' in $file (must be 0 or 1), ignoring" + continue + fi + ;; + var_net) + # var_net can be: dhcp, static IP/CIDR, or IP range + if [[ "$var_val" != "dhcp" ]]; then + if is_ip_range "$var_val"; then + : # IP range is valid, will be resolved at runtime + elif ! validate_ip_address "$var_val"; then + msg_warn "Invalid network '$var_val' in $file (must be dhcp or IP/CIDR), ignoring" + continue + fi + fi + ;; + var_fuse | var_tun | var_gpu | var_ssh | var_verbose | var_protection) + if [[ "$var_val" != "yes" && "$var_val" != "no" ]]; then + msg_warn "Invalid boolean '$var_val' for $var_key in $file (must be yes/no), ignoring" + continue + fi + ;; + var_ipv6_method) + if [[ "$var_val" != "auto" && "$var_val" != "dhcp" && "$var_val" != "static" && "$var_val" != "none" ]]; then + msg_warn "Invalid IPv6 method '$var_val' in $file (must be auto/dhcp/static/none), ignoring" + continue + fi + ;; + var_container_storage | var_template_storage) + # Validate that the storage exists and is active on the current node + local _storage_status + _storage_status=$(pvesm status 2>/dev/null | awk -v s="$var_val" '$1 == s { print $3 }') + if [[ -z "$_storage_status" ]]; then + msg_warn "Storage '$var_val' from $file not found on this node, ignoring" + continue + elif [[ "$_storage_status" == "disabled" ]]; then + msg_warn "Storage '$var_val' from $file is disabled on this node, ignoring" + continue + fi + ;; + esac + fi + + # Set variable: force mode overrides existing, otherwise only set if empty + if [[ "$force" == "yes" ]]; then + export "${var_key}=${var_val}" + else + [[ -z "${!var_key+x}" ]] && export "${var_key}=${var_val}" + fi + fi + done <"$file" + msg_ok "Loaded ${file}" +} + +# ------------------------------------------------------------------------------ +# default_var_settings +# +# - Ensures /usr/local/community-scripts/default.vars exists (creates if missing) +# - Loads var_* values from default.vars (safe parser, no source/eval) +# - Precedence: ENV var_* > default.vars > built-in defaults +# - Maps var_verbose → VERBOSE +# - Calls base_settings "$VERBOSE" and echo_default +# ------------------------------------------------------------------------------ +default_var_settings() { + # Allowed var_* keys (alphabetically sorted) + # Note: Removed var_ctid (can only exist once), var_ipv6_static (static IPs are unique) + local VAR_WHITELIST=( + var_apt_cacher var_apt_cacher_ip var_brg var_cpu var_disk var_fuse var_gpu var_keyctl + var_gateway var_hostname var_ipv6_method var_mac var_mknod var_mount_fs var_mtu + var_net var_nesting var_ns var_os var_protection var_pw var_ram var_tags var_timezone var_tun var_unprivileged + var_verbose var_version var_vlan var_ssh var_ssh_authorized_key var_container_storage var_template_storage + ) + + # Snapshot: environment variables (highest precedence) + declare -A _HARD_ENV=() + local _k + for _k in "${VAR_WHITELIST[@]}"; do + if printenv "$_k" >/dev/null 2>&1; then _HARD_ENV["$_k"]=1; fi + done + + # Find default.vars location + local _find_default_vars + _find_default_vars() { + local f + for f in \ + /usr/local/community-scripts/default.vars \ + "$HOME/.config/community-scripts/default.vars" \ + "./default.vars"; do + [ -f "$f" ] && { + echo "$f" + return 0 + } + done + return 1 + } + # Allow override of storages via env (for non-interactive use cases) + [ -n "${var_template_storage:-}" ] && TEMPLATE_STORAGE="$var_template_storage" + [ -n "${var_container_storage:-}" ] && CONTAINER_STORAGE="$var_container_storage" + + # Create once, with storages already selected, no var_ctid/var_hostname lines + local _ensure_default_vars + _ensure_default_vars() { + _find_default_vars >/dev/null 2>&1 && return 0 + + local canonical="/usr/local/community-scripts/default.vars" + # Silent creation - no msg_info output + mkdir -p /usr/local/community-scripts + + # Pick storages before writing the file (always ask unless only one) + # Create a minimal temp file to write into + : >"$canonical" + + # Base content (no var_ctid / var_hostname here) + cat >"$canonical" <<'EOF' +# Community-Scripts defaults (var_* only). Lines starting with # are comments. +# Precedence: ENV var_* > default.vars > built-ins. +# Keep keys alphabetically sorted. + +# Container type +var_unprivileged=1 + +# Resources +var_cpu=1 +var_disk=4 +var_ram=1024 + +# Network +var_brg=vmbr0 +var_net=dhcp +var_ipv6_method=none +# var_gateway= +# var_vlan= +# var_mtu= +# var_mac= +# var_ns= + +# SSH +var_ssh=no +# var_ssh_authorized_key= + +# APT cacher (optional - with example) +# var_apt_cacher=yes +# var_apt_cacher_ip=192.168.1.10 + +# Features/Tags/verbosity +var_fuse=no +var_tun=no + +# Advanced Settings (Proxmox-official features) +var_nesting=1 # Allow nesting (required for Docker/LXC in CT) +var_keyctl=0 # Allow keyctl() - needed for Docker (systemd-networkd workaround) +var_mknod=0 # Allow device node creation (requires kernel 5.3+, experimental) +var_mount_fs= # Allow specific filesystems: nfs,fuse,ext4,etc (leave empty for defaults) +var_protection=no # Prevent accidental deletion of container +var_timezone= # Container timezone (e.g. Europe/Berlin, leave empty for host timezone) +var_tags=community-script +var_verbose=no + +# Security (root PW) – empty => autologin +# var_pw= +EOF + + # Now choose storages (always prompt unless just one exists) + choose_and_set_storage_for_file "$canonical" template + choose_and_set_storage_for_file "$canonical" container + + chmod 0644 "$canonical" + # Silent creation - no output message + } + + # Whitelist check + local _is_whitelisted_key + _is_whitelisted_key() { + local k="$1" + local w + for w in "${VAR_WHITELIST[@]}"; do [ "$k" = "$w" ] && return 0; done + return 1 + } + + # 1) Ensure file exists + _ensure_default_vars + + # 2) Load file + local dv + dv="$(_find_default_vars)" || { + msg_error "default.vars not found after ensure step" + return 1 + } + load_vars_file "$dv" + + # 3) Map var_verbose → VERBOSE + if [[ -n "${var_verbose:-}" ]]; then + case "${var_verbose,,}" in 1 | yes | true | on) VERBOSE="yes" ;; 0 | no | false | off) VERBOSE="no" ;; *) VERBOSE="${var_verbose}" ;; esac + else + VERBOSE="no" + fi + + # 4) Apply base settings and show summary + METHOD="mydefaults-global" + base_settings "$VERBOSE" + header_info + echo -e "${DEFAULT}${BOLD}${BL}Using User Defaults (default.vars) on node $PVEHOST_NAME${CL}" + echo_default +} + +# ------------------------------------------------------------------------------ +# get_app_defaults_path() +# +# - Returns full path for app-specific defaults file +# - Example: /usr/local/community-scripts/defaults/.vars +# ------------------------------------------------------------------------------ + +get_app_defaults_path() { + local n="${NSAPP:-${APP,,}}" + echo "/usr/local/community-scripts/defaults/${n}.vars" +} + +# ------------------------------------------------------------------------------ +# maybe_offer_save_app_defaults +# +# - Called after advanced_settings returned with fully chosen values. +# - If no .vars exists, offers to persist current advanced settings +# into /usr/local/community-scripts/defaults/.vars +# - Only writes whitelisted var_* keys. +# - Extracts raw values from flags like ",gw=..." ",mtu=..." etc. +# ------------------------------------------------------------------------------ +if ! declare -p VAR_WHITELIST >/dev/null 2>&1; then + # Note: Removed var_ctid (can only exist once), var_ipv6_static (static IPs are unique) + declare -ag VAR_WHITELIST=( + var_apt_cacher var_apt_cacher_ip var_brg var_cpu var_disk var_fuse var_gpu + var_gateway var_hostname var_ipv6_method var_mac var_mtu + var_net var_ns var_os var_pw var_ram var_tags var_tun var_unprivileged + var_verbose var_version var_vlan var_ssh var_ssh_authorized_key var_container_storage var_template_storage + ) +fi + +# Global whitelist check function (used by _load_vars_file_to_map and others) +_is_whitelisted_key() { + local k="$1" + local w + for w in "${VAR_WHITELIST[@]}"; do [ "$k" = "$w" ] && return 0; done + return 1 +} + +_sanitize_value() { + # Disallow Command-Substitution / Shell-Meta + case "$1" in + *'$('* | *'`'* | *';'* | *'&'* | *'<('*) + echo "" + return 0 + ;; + esac + echo "$1" +} + +# Map-Parser: read var_* from file into _VARS_IN associative array +# Note: Main _load_vars_file() with full validation is defined in default_var_settings section +# This simplified version is used specifically for diff operations via _VARS_IN array +declare -A _VARS_IN +_load_vars_file_to_map() { + local file="$1" + [ -f "$file" ] || return 0 + _VARS_IN=() # Clear array + local line key val + while IFS= read -r line || [ -n "$line" ]; do + line="${line#"${line%%[![:space:]]*}"}" + line="${line%"${line##*[![:space:]]}"}" + [ -z "$line" ] && continue + case "$line" in + \#*) continue ;; + esac + key=$(printf "%s" "$line" | cut -d= -f1) + val=$(printf "%s" "$line" | cut -d= -f2-) + case "$key" in + var_*) + if _is_whitelisted_key "$key"; then + _VARS_IN["$key"]="$val" + fi + ;; + esac + done <"$file" +} + +# Diff function for two var_* files -> produces human-readable diff list for $1 (old) vs $2 (new) +_build_vars_diff() { + local oldf="$1" newf="$2" + local k + local -A OLD=() NEW=() + _load_vars_file_to_map "$oldf" + for k in "${!_VARS_IN[@]}"; do OLD["$k"]="${_VARS_IN[$k]}"; done + _load_vars_file_to_map "$newf" + for k in "${!_VARS_IN[@]}"; do NEW["$k"]="${_VARS_IN[$k]}"; done + + local out + out+="# Diff for ${APP} (${NSAPP})\n" + out+="# Old: ${oldf}\n# New: ${newf}\n\n" + + local found_change=0 + + # Changed & Removed + for k in "${!OLD[@]}"; do + if [[ -v NEW["$k"] ]]; then + if [[ "${OLD[$k]}" != "${NEW[$k]}" ]]; then + out+="~ ${k}\n - old: ${OLD[$k]}\n + new: ${NEW[$k]}\n" + found_change=1 + fi + else + out+="- ${k}\n - old: ${OLD[$k]}\n" + found_change=1 + fi + done + + # Added + for k in "${!NEW[@]}"; do + if [[ ! -v OLD["$k"] ]]; then + out+="+ ${k}\n + new: ${NEW[$k]}\n" + found_change=1 + fi + done + + if [[ $found_change -eq 0 ]]; then + out+="(No differences)\n" + fi + + printf "%b" "$out" +} + +# Build a temporary .vars file from current advanced settings +_build_current_app_vars_tmp() { + tmpf="$(mktemp /tmp/${NSAPP:-app}.vars.new.XXXXXX)" + + # NET/GW + _net="${NET:-}" + _gate="" + case "${GATE:-}" in + ,gw=*) _gate=$(echo "$GATE" | sed 's/^,gw=//') ;; + esac + + # IPv6 + _ipv6_method="${IPV6_METHOD:-auto}" + _ipv6_static="" + _ipv6_gateway="" + if [ "$_ipv6_method" = "static" ]; then + _ipv6_static="${IPV6_ADDR:-}" + _ipv6_gateway="${IPV6_GATE:-}" + fi + + # MTU/VLAN/MAC + _mtu="" + _vlan="" + _mac="" + case "${MTU:-}" in + ,mtu=*) _mtu=$(echo "$MTU" | sed 's/^,mtu=//') ;; + esac + case "${VLAN:-}" in + ,tag=*) _vlan=$(echo "$VLAN" | sed 's/^,tag=//') ;; + esac + case "${MAC:-}" in + ,hwaddr=*) _mac=$(echo "$MAC" | sed 's/^,hwaddr=//') ;; + esac + + # DNS / Searchdomain + _ns="" + _searchdomain="" + case "${NS:-}" in + -nameserver=*) _ns=$(echo "$NS" | sed 's/^-nameserver=//') ;; + esac + case "${SD:-}" in + -searchdomain=*) _searchdomain=$(echo "$SD" | sed 's/^-searchdomain=//') ;; + esac + + # SSH / APT / Features + _ssh="${SSH:-no}" + _ssh_auth="${SSH_AUTHORIZED_KEY:-}" + _apt_cacher="${APT_CACHER:-}" + _apt_cacher_ip="${APT_CACHER_IP:-}" + _fuse="${ENABLE_FUSE:-no}" + _tun="${ENABLE_TUN:-no}" + _gpu="${ENABLE_GPU:-no}" + _nesting="${ENABLE_NESTING:-1}" + _keyctl="${ENABLE_KEYCTL:-0}" + _mknod="${ENABLE_MKNOD:-0}" + _mount_fs="${ALLOW_MOUNT_FS:-}" + _protect="${PROTECT_CT:-no}" + _timezone="${CT_TIMEZONE:-}" + _tags="${TAGS:-}" + _verbose="${VERBOSE:-no}" + + # Type / Resources / Identity + _unpriv="${CT_TYPE:-1}" + _cpu="${CORE_COUNT:-1}" + _ram="${RAM_SIZE:-1024}" + _disk="${DISK_SIZE:-4}" + _hostname="${HN:-$NSAPP}" + + # Storage + _tpl_storage="${TEMPLATE_STORAGE:-${var_template_storage:-}}" + _ct_storage="${CONTAINER_STORAGE:-${var_container_storage:-}}" + + { + echo "# App-specific defaults for ${APP} (${NSAPP})" + echo "# Generated on $(date -u '+%Y-%m-%dT%H:%M:%SZ')" + echo + + echo "var_os=$(_sanitize_value "${var_os:-}")" + echo "var_version=$(_sanitize_value "${var_version:-}")" + echo "var_unprivileged=$(_sanitize_value "$_unpriv")" + echo "var_cpu=$(_sanitize_value "$_cpu")" + echo "var_ram=$(_sanitize_value "$_ram")" + echo "var_disk=$(_sanitize_value "$_disk")" + + [ -n "${BRG:-}" ] && echo "var_brg=$(_sanitize_value "$BRG")" + [ -n "$_net" ] && echo "var_net=$(_sanitize_value "$_net")" + [ -n "$_gate" ] && echo "var_gateway=$(_sanitize_value "$_gate")" + [ -n "$_mtu" ] && echo "var_mtu=$(_sanitize_value "$_mtu")" + [ -n "$_vlan" ] && echo "var_vlan=$(_sanitize_value "$_vlan")" + [ -n "$_mac" ] && echo "var_mac=$(_sanitize_value "$_mac")" + [ -n "$_ns" ] && echo "var_ns=$(_sanitize_value "$_ns")" + + [ -n "$_ipv6_method" ] && echo "var_ipv6_method=$(_sanitize_value "$_ipv6_method")" + # var_ipv6_static removed - static IPs are unique, can't be default + + [ -n "$_ssh" ] && echo "var_ssh=$(_sanitize_value "$_ssh")" + [ -n "$_ssh_auth" ] && echo "var_ssh_authorized_key=$(_sanitize_value "$_ssh_auth")" + + [ -n "$_apt_cacher" ] && echo "var_apt_cacher=$(_sanitize_value "$_apt_cacher")" + [ -n "$_apt_cacher_ip" ] && echo "var_apt_cacher_ip=$(_sanitize_value "$_apt_cacher_ip")" + + [ -n "$_fuse" ] && echo "var_fuse=$(_sanitize_value "$_fuse")" + [ -n "$_tun" ] && echo "var_tun=$(_sanitize_value "$_tun")" + [ -n "$_gpu" ] && echo "var_gpu=$(_sanitize_value "$_gpu")" + [ -n "$_nesting" ] && echo "var_nesting=$(_sanitize_value "$_nesting")" + [ -n "$_keyctl" ] && echo "var_keyctl=$(_sanitize_value "$_keyctl")" + [ -n "$_mknod" ] && echo "var_mknod=$(_sanitize_value "$_mknod")" + [ -n "$_mount_fs" ] && echo "var_mount_fs=$(_sanitize_value "$_mount_fs")" + [ -n "$_protect" ] && echo "var_protection=$(_sanitize_value "$_protect")" + [ -n "$_timezone" ] && echo "var_timezone=$(_sanitize_value "$_timezone")" + [ -n "$_tags" ] && echo "var_tags=$(_sanitize_value "$_tags")" + [ -n "$_verbose" ] && echo "var_verbose=$(_sanitize_value "$_verbose")" + + [ -n "$_hostname" ] && echo "var_hostname=$(_sanitize_value "$_hostname")" + [ -n "$_searchdomain" ] && echo "var_searchdomain=$(_sanitize_value "$_searchdomain")" + + [ -n "$_tpl_storage" ] && echo "var_template_storage=$(_sanitize_value "$_tpl_storage")" + [ -n "$_ct_storage" ] && echo "var_container_storage=$(_sanitize_value "$_ct_storage")" + } >"$tmpf" + + echo "$tmpf" +} + +# ------------------------------------------------------------------------------ +# maybe_offer_save_app_defaults() +# +# - Called after advanced_settings() +# - Offers to save current values as app defaults if not existing +# - If file exists: shows diff and allows Update, Keep, View Diff, or Cancel +# ------------------------------------------------------------------------------ +maybe_offer_save_app_defaults() { + local app_vars_path + app_vars_path="$(get_app_defaults_path)" + + # always build from current settings + local new_tmp diff_tmp + new_tmp="$(_build_current_app_vars_tmp)" + diff_tmp="$(mktemp -p /tmp "${NSAPP:-app}.vars.diff.XXXXXX")" + + # 1) if no file → offer to create + if [[ ! -f "$app_vars_path" ]]; then + if whiptail --backtitle "Proxmox VE Helper Scripts" \ + --yesno "Save these advanced settings as defaults for ${APP}?\n\nThis will create:\n${app_vars_path}" 12 72; then + mkdir -p "$(dirname "$app_vars_path")" + install -m 0644 "$new_tmp" "$app_vars_path" + msg_ok "Saved app defaults: ${app_vars_path}" + fi + rm -f "$new_tmp" "$diff_tmp" + return 0 + fi + + # 2) if file exists → build diff + _build_vars_diff "$app_vars_path" "$new_tmp" >"$diff_tmp" + + # if no differences → do nothing + if grep -q "^(No differences)$" "$diff_tmp"; then + rm -f "$new_tmp" "$diff_tmp" + return 0 + fi + + # 3) if file exists → show menu with default selection "Update Defaults" + local app_vars_file + app_vars_file="$(basename "$app_vars_path")" + + while true; do + local sel + sel="$(whiptail --backtitle "Proxmox VE Helper Scripts" \ + --title "APP DEFAULTS – ${APP}" \ + --menu "Differences detected. What do you want to do?" 20 78 10 \ + "Update Defaults" "Write new values to ${app_vars_file}" \ + "Keep Current" "Keep existing defaults (no changes)" \ + "View Diff" "Show a detailed diff" \ + "Cancel" "Abort without changes" \ + --default-item "Update Defaults" \ + 3>&1 1>&2 2>&3)" || { sel="Cancel"; } + + case "$sel" in + "Update Defaults") + install -m 0644 "$new_tmp" "$app_vars_path" + msg_ok "Updated app defaults: ${app_vars_path}" + break + ;; + "Keep Current") + msg_custom "ℹ️" "${BL}" "Keeping current app defaults: ${app_vars_path}" + break + ;; + "View Diff") + whiptail --backtitle "Proxmox VE Helper Scripts" \ + --title "Diff – ${APP}" \ + --scrolltext --textbox "$diff_tmp" 25 100 + ;; + "Cancel" | *) + msg_custom "🚫" "${YW}" "Canceled. No changes to app defaults." + break + ;; + esac + done + + rm -f "$new_tmp" "$diff_tmp" +} + +ensure_storage_selection_for_vars_file() { + local vf="$1" + + # Read stored values (if any) + local tpl ct + tpl=$(grep -E '^var_template_storage=' "$vf" | cut -d= -f2-) + ct=$(grep -E '^var_container_storage=' "$vf" | cut -d= -f2-) + + if [[ -n "$tpl" && -n "$ct" ]]; then + TEMPLATE_STORAGE="$tpl" + CONTAINER_STORAGE="$ct" + + # Validate storage space for loaded container storage + if [[ -n "${DISK_SIZE:-}" ]]; then + validate_storage_space "$ct" "$DISK_SIZE" "yes" + # Continue even if validation fails - user was warned + fi + + return 0 + fi + + choose_and_set_storage_for_file "$vf" template + choose_and_set_storage_for_file "$vf" container + + # Silent operation - no output message +} + +ensure_global_default_vars_file() { + local vars_path="/usr/local/community-scripts/default.vars" + if [[ ! -f "$vars_path" ]]; then + mkdir -p "$(dirname "$vars_path")" + touch "$vars_path" + fi + echo "$vars_path" +} + +# ============================================================================== +# SECTION 6: ADVANCED INTERACTIVE CONFIGURATION +# ============================================================================== + +# ------------------------------------------------------------------------------ +# advanced_settings() +# +# - Interactive wizard-style configuration with BACK navigation +# - State-machine approach: each step can go forward or backward +# - Cancel at Step 1 = Exit Script, Cancel at other steps = Go Back +# - Allows user to customize all container settings +# ------------------------------------------------------------------------------ +advanced_settings() { + # Enter alternate screen buffer to prevent flicker between dialogs + tput smcup 2>/dev/null || true + trap 'tput rmcup 2>/dev/null || true' RETURN + + # Initialize defaults + TAGS="community-script;${var_tags:-}" + local STEP=1 + local MAX_STEP=28 + + # Store values for back navigation - inherit from var_* app defaults + local _ct_type="${var_unprivileged:-1}" + local _pw="" + local _pw_display="Automatic Login" + local _ct_id="$NEXTID" + local _hostname="$NSAPP" + local _disk_size="${var_disk:-4}" + local _core_count="${var_cpu:-1}" + local _ram_size="${var_ram:-1024}" + local _bridge="${var_brg:-vmbr0}" + local _net="${var_net:-dhcp}" + local _gate="${var_gateway:-}" + local _ipv6_method="${var_ipv6_method:-auto}" + local _ipv6_addr="" + local _ipv6_gate="" + local _apt_cacher="${var_apt_cacher:-no}" + local _apt_cacher_ip="${var_apt_cacher_ip:-}" + local _mtu="${var_mtu:-}" + local _sd="${var_searchdomain:-}" + local _ns="${var_ns:-}" + local _mac="${var_mac:-}" + local _vlan="${var_vlan:-}" + local _tags="$TAGS" + local _enable_fuse="${var_fuse:-no}" + local _enable_tun="${var_tun:-no}" + local _enable_gpu="${var_gpu:-no}" + local _enable_nesting="${var_nesting:-1}" + local _verbose="${var_verbose:-no}" + local _enable_keyctl="${var_keyctl:-0}" + local _enable_mknod="${var_mknod:-0}" + local _mount_fs="${var_mount_fs:-}" + local _protect_ct="${var_protection:-no}" + + # Detect host timezone for default (if not set via var_timezone) + local _host_timezone="" + if command -v timedatectl >/dev/null 2>&1; then + _host_timezone=$(timedatectl show --value --property=Timezone 2>/dev/null || echo "") + elif [ -f /etc/timezone ]; then + _host_timezone=$(cat /etc/timezone 2>/dev/null || echo "") + fi + # Map Etc/* timezones to "host" (pct doesn't accept Etc/* zones) + [[ "${_host_timezone:-}" == Etc/* ]] && _host_timezone="host" + local _ct_timezone="${var_timezone:-$_host_timezone}" + [[ "${_ct_timezone:-}" == Etc/* ]] && _ct_timezone="host" + + # Helper to show current progress + show_progress() { + local current=$1 + local total=$MAX_STEP + echo -e "\n${INFO}${BOLD}${DGN}Step $current of $total${CL}" + } + + # Detect available bridges (do this once) + local BRIDGES="" + local BRIDGE_MENU_OPTIONS=() + _detect_bridges() { + IFACE_FILEPATH_LIST="/etc/network/interfaces"$'\n'$(find "/etc/network/interfaces.d/" -type f 2>/dev/null) + BRIDGES="" + local OLD_IFS=$IFS + IFS=$'\n' + for iface_filepath in ${IFACE_FILEPATH_LIST}; do + local iface_indexes_tmpfile=$(mktemp -q -u '.iface-XXXX') + (grep -Pn '^\s*iface' "${iface_filepath}" 2>/dev/null | cut -d':' -f1 && wc -l "${iface_filepath}" 2>/dev/null | cut -d' ' -f1) | awk 'FNR==1 {line=$0; next} {print line":"$0-1; line=$0}' >"${iface_indexes_tmpfile}" 2>/dev/null || true + if [ -f "${iface_indexes_tmpfile}" ]; then + while read -r pair; do + local start=$(echo "${pair}" | cut -d':' -f1) + local end=$(echo "${pair}" | cut -d':' -f2) + if awk "NR >= ${start} && NR <= ${end}" "${iface_filepath}" 2>/dev/null | grep -qP '^\s*(bridge[-_](ports|stp|fd|vlan-aware|vids)|ovs_type\s+OVSBridge)\b'; then + local iface_name=$(sed "${start}q;d" "${iface_filepath}" | awk '{print $2}') + BRIDGES="${iface_name}"$'\n'"${BRIDGES}" + fi + done <"${iface_indexes_tmpfile}" + rm -f "${iface_indexes_tmpfile}" + fi + done + IFS=$OLD_IFS + BRIDGES=$(echo "$BRIDGES" | grep -v '^\s*$' | sort | uniq) + + # Build bridge menu + BRIDGE_MENU_OPTIONS=() + if [[ -n "$BRIDGES" ]]; then + while IFS= read -r bridge; do + if [[ -n "$bridge" ]]; then + local description=$(grep -A 10 "iface $bridge" /etc/network/interfaces 2>/dev/null | grep '^#' | head -n1 | sed 's/^#\s*//;s/^[- ]*//') + BRIDGE_MENU_OPTIONS+=("$bridge" "${description:- }") + fi + done <<<"$BRIDGES" + fi + } + _detect_bridges + + # Main wizard loop + while [ $STEP -le $MAX_STEP ]; do + case $STEP in + + # ═══════════════════════════════════════════════════════════════════════════ + # STEP 1: Container Type + # ═══════════════════════════════════════════════════════════════════════════ + 1) + local default_on="ON" + local default_off="OFF" + [[ "$_ct_type" == "0" ]] && { + default_on="OFF" + default_off="ON" + } + + if result=$(whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ + --title "CONTAINER TYPE" \ + --ok-button "Next" --cancel-button "Exit" \ + --radiolist "\nChoose container type:\n\nUse SPACE to select, ENTER to confirm." 14 58 2 \ + "1" "Unprivileged (recommended)" $default_on \ + "0" "Privileged" $default_off \ + 3>&1 1>&2 2>&3); then + [[ -n "$result" ]] && _ct_type="$result" + ((STEP++)) + else + exit_script + fi + ;; + + # ═══════════════════════════════════════════════════════════════════════════ + # STEP 2: Root Password + # ════════════════════════════════════════���═══════════════════════════════���══ + 2) + if PW1=$(whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ + --title "ROOT PASSWORD" \ + --ok-button "Next" --cancel-button "Back" \ + --passwordbox "\nSet Root Password (needed for root ssh access)\n\nLeave blank for automatic login (no password)" 12 58 \ + 3>&1 1>&2 2>&3); then + + if [[ -z "$PW1" ]]; then + _pw="" + _pw_display="Automatic Login" + ((STEP++)) + elif [[ "$PW1" == *" "* ]]; then + whiptail --msgbox "Password cannot contain spaces." 8 58 + else + local _pw1_clean="$PW1" + while [[ "$_pw1_clean" == -* ]]; do + _pw1_clean="${_pw1_clean#-}" + done + if [[ -z "$_pw1_clean" ]]; then + whiptail --msgbox "Password cannot be only '-' characters." 8 58 + continue + elif ((${#_pw1_clean} < 5)); then + whiptail --msgbox "Password must be at least 5 characters (after removing leading '-')." 8 70 + continue + fi + # Verify password + if PW2=$(whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ + --title "PASSWORD VERIFICATION" \ + --ok-button "Confirm" --cancel-button "Back" \ + --passwordbox "\nVerify Root Password" 10 58 \ + 3>&1 1>&2 2>&3); then + local _pw2_clean="$PW2" + while [[ "$_pw2_clean" == -* ]]; do + _pw2_clean="${_pw2_clean#-}" + done + if [[ "$_pw1_clean" == "$_pw2_clean" ]]; then + _pw="--password $_pw1_clean" + _pw_display="********" + ((STEP++)) + else + whiptail --msgbox "Passwords do not match. Please try again." 8 58 + fi + else + ((STEP--)) + fi + fi + else + ((STEP--)) + fi + ;; + + # ═══════════════════════════════════════════════════════════════════════════ + # STEP 3: Container ID + # ═══════════════════════════════════════════════════════════════════════════ + 3) + if result=$(whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ + --title "CONTAINER ID" \ + --ok-button "Next" --cancel-button "Back" \ + --inputbox "\nSet Container ID" 10 58 "$_ct_id" \ + 3>&1 1>&2 2>&3); then + local input_id="${result:-$NEXTID}" + + # Validate that ID is numeric + if ! [[ "$input_id" =~ ^[0-9]+$ ]]; then + whiptail --backtitle "Proxmox VE Helper Scripts" --title "Invalid ID" --msgbox "Container ID must be numeric." 8 58 + continue + fi + + # Check if ID is already in use + if ! validate_container_id "$input_id"; then + if whiptail --backtitle "Proxmox VE Helper Scripts" --title "ID Already In Use" \ + --yesno "Container/VM ID $input_id is already in use.\n\nWould you like to use the next available ID ($(get_valid_container_id "$input_id"))?" 10 58; then + _ct_id=$(get_valid_container_id "$input_id") + else + continue + fi + else + _ct_id="$input_id" + fi + ((STEP++)) + else + ((STEP--)) + fi + ;; + + # ═══════════════════════════════════════════════════════════════════════════ + # STEP 4: Hostname + # ═══════════════════════════════════════════════════════════════════════════ + 4) + if result=$(whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ + --title "HOSTNAME" \ + --ok-button "Next" --cancel-button "Back" \ + --inputbox "\nSet Hostname (or FQDN, e.g. host.example.com)" 10 58 "$_hostname" \ + 3>&1 1>&2 2>&3); then + local hn_test="${result:-$NSAPP}" + hn_test=$(echo "${hn_test,,}" | tr -d ' ') + + if validate_hostname "$hn_test"; then + _hostname="$hn_test" + ((STEP++)) + else + whiptail --msgbox "Invalid hostname: '$hn_test'\n\nRules:\n- Only lowercase letters, digits, dots and hyphens\n- Labels separated by dots (max 63 chars each)\n- No leading/trailing hyphens or dots\n- No consecutive dots\n- Total max 253 characters" 14 60 + fi + else + ((STEP--)) + fi + ;; + + # ═══════════════════════════════════════════════════════════════════════════ + # STEP 5: Disk Size + # ═══════════════════════════════════════════════════════════════════════════ + 5) + if result=$(whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ + --title "DISK SIZE" \ + --ok-button "Next" --cancel-button "Back" \ + --inputbox "\nSet Disk Size in GB" 10 58 "$_disk_size" \ + 3>&1 1>&2 2>&3); then + local disk_test="${result:-$var_disk}" + if [[ "$disk_test" =~ ^[1-9][0-9]*$ ]]; then + _disk_size="$disk_test" + ((STEP++)) + else + whiptail --msgbox "Disk size must be a positive integer!" 8 58 + fi + else + ((STEP--)) + fi + ;; + + # ═══════════════════════════════════════════════════════════════════════════ + # STEP 6: CPU Cores + # ═══════════════════════════════════════════════════════════════════════════ + 6) + if result=$(whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ + --title "CPU CORES" \ + --ok-button "Next" --cancel-button "Back" \ + --inputbox "\nAllocate CPU Cores" 10 58 "$_core_count" \ + 3>&1 1>&2 2>&3); then + local cpu_test="${result:-$var_cpu}" + if [[ "$cpu_test" =~ ^[1-9][0-9]*$ ]]; then + _core_count="$cpu_test" + ((STEP++)) + else + whiptail --msgbox "CPU core count must be a positive integer!" 8 58 + fi + else + ((STEP--)) + fi + ;; + + # ═══════════════════════════════════════════════════════════════════════════ + # STEP 7: RAM Size + # ═══════════════════════════════════════════════════════════════════════════ + 7) + if result=$(whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ + --title "RAM SIZE" \ + --ok-button "Next" --cancel-button "Back" \ + --inputbox "\nAllocate RAM in MiB" 10 58 "$_ram_size" \ + 3>&1 1>&2 2>&3); then + local ram_test="${result:-$var_ram}" + if [[ "$ram_test" =~ ^[1-9][0-9]*$ ]]; then + _ram_size="$ram_test" + ((STEP++)) + else + whiptail --msgbox "RAM size must be a positive integer!" 8 58 + fi + else + ((STEP--)) + fi + ;; + + # ═══════════════════════════════════════════════════════════════════════════ + # STEP 8: Network Bridge + # ═══════════════════════════════════════════════════════════════════════════ + 8) + if [[ ${#BRIDGE_MENU_OPTIONS[@]} -eq 0 ]]; then + # Validate default bridge exists + if validate_bridge "vmbr0"; then + _bridge="vmbr0" + ((STEP++)) + else + whiptail --msgbox "Default bridge 'vmbr0' not found!\n\nPlease configure a network bridge in Proxmox first." 10 58 + msg_error "Default bridge 'vmbr0' not found" + exit 116 + fi + else + if result=$(whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ + --title "NETWORK BRIDGE" \ + --ok-button "Next" --cancel-button "Back" \ + --menu "\nSelect network bridge:" 16 58 6 \ + "${BRIDGE_MENU_OPTIONS[@]}" \ + 3>&1 1>&2 2>&3); then + local bridge_test="${result:-vmbr0}" + # Skip separator entries (e.g., __other__) - re-display menu + if [[ "$bridge_test" == "__other__" || "$bridge_test" == -* ]]; then + continue + fi + if validate_bridge "$bridge_test"; then + _bridge="$bridge_test" + ((STEP++)) + else + whiptail --msgbox "Bridge '$bridge_test' is not available or not active." 8 58 + fi + else + ((STEP--)) + fi + fi + ;; + + # ═══════════════════════════════════════════════════════════════════════════ + # STEP 9: IPv4 Configuration + # ═══════════════════════════════════════════════════════════════════════════ + 9) + if result=$(whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ + --title "IPv4 CONFIGURATION" \ + --ok-button "Next" --cancel-button "Back" \ + --menu "\nSelect IPv4 Address Assignment:" 16 65 3 \ + "dhcp" "Automatic (DHCP, recommended)" \ + "static" "Static (manual entry)" \ + "range" "IP Range Scan (find first free IP)" \ + 3>&1 1>&2 2>&3); then + + if [[ "$result" == "static" ]]; then + # Get static IP + local static_ip + if static_ip=$(whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ + --title "STATIC IPv4 ADDRESS" \ + --ok-button "Next" --cancel-button "Back" \ + --inputbox "\nEnter Static IPv4 CIDR Address\n(e.g. 192.168.1.100/24)" 12 58 "" \ + 3>&1 1>&2 2>&3); then + if validate_ip_address "$static_ip"; then + # Get gateway + local gateway_ip + if gateway_ip=$(whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ + --title "GATEWAY IP" \ + --ok-button "Next" --cancel-button "Back" \ + --inputbox "\nEnter Gateway IP address" 10 58 "" \ + 3>&1 1>&2 2>&3); then + if validate_gateway_ip "$gateway_ip"; then + # Validate gateway is in same subnet + if validate_gateway_in_subnet "$static_ip" "$gateway_ip"; then + _net="$static_ip" + _gate=",gw=$gateway_ip" + ((STEP++)) + else + whiptail --msgbox "Gateway is not in the same subnet as the static IP.\n\nStatic IP: $static_ip\nGateway: $gateway_ip" 10 58 + fi + else + whiptail --msgbox "Invalid Gateway IP format.\n\nEach octet must be 0-255.\nExample: 192.168.1.1" 10 58 + fi + fi + else + whiptail --msgbox "Invalid IPv4 CIDR format.\n\nEach octet must be 0-255.\nCIDR must be 1-32.\nExample: 192.168.1.100/24" 12 58 + fi + fi + elif [[ "$result" == "range" ]]; then + # IP Range Scan + local ip_range + if ip_range=$(whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ + --title "IP RANGE SCAN" \ + --ok-button "Scan" --cancel-button "Back" \ + --inputbox "\nEnter IP range to scan for free address\n(e.g. 192.168.1.100/24-192.168.1.200/24)" 12 65 "" \ + 3>&1 1>&2 2>&3); then + if is_ip_range "$ip_range"; then + # Exit whiptail screen temporarily to show scan progress + clear + header_info + echo -e "${INFO}${BOLD}${DGN}Scanning IP range for free address...${CL}\n" + if resolve_ip_from_range "$ip_range"; then + # Get gateway + local gateway_ip + if gateway_ip=$(whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ + --title "GATEWAY IP" \ + --ok-button "Next" --cancel-button "Back" \ + --inputbox "\nFound free IP: $NET_RESOLVED\n\nEnter Gateway IP address" 12 58 "" \ + 3>&1 1>&2 2>&3); then + if validate_gateway_ip "$gateway_ip"; then + # Validate gateway is in same subnet + if validate_gateway_in_subnet "$NET_RESOLVED" "$gateway_ip"; then + _net="$NET_RESOLVED" + _gate=",gw=$gateway_ip" + ((STEP++)) + else + whiptail --msgbox "Gateway is not in the same subnet as the IP.\n\nIP: $NET_RESOLVED\nGateway: $gateway_ip" 10 58 + fi + else + whiptail --msgbox "Invalid Gateway IP format.\n\nEach octet must be 0-255.\nExample: 192.168.1.1" 10 58 + fi + fi + else + whiptail --msgbox "No free IP found in the specified range.\nAll IPs responded to ping." 10 58 + fi + else + whiptail --msgbox "Invalid IP range format.\n\nExample: 192.168.1.100/24-192.168.1.200/24" 10 58 + fi + fi + else + _net="dhcp" + _gate="" + ((STEP++)) + fi + else + ((STEP--)) + fi + ;; + + # ═══════════════════════════════════════════════════════════════════════════ + # STEP 10: IPv6 Configuration + # ═══════════════════════════════════════════════════════════════════════════ + 10) + if result=$(whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ + --title "IPv6 CONFIGURATION" \ + --ok-button "Next" --cancel-button "Back" \ + --menu "\nSelect IPv6 Address Management:" 16 70 5 \ + "auto" "SLAAC/AUTO (recommended) - Dynamic IPv6 from network" \ + "dhcp" "DHCPv6 - DHCP-assigned IPv6 address" \ + "static" "Static - Manual IPv6 address configuration" \ + "none" "None - No IPv6 assignment (most containers)" \ + "disable" "Fully Disabled - (breaks some services)" \ + 3>&1 1>&2 2>&3); then + + _ipv6_method="$result" + case "$result" in + static) + local ipv6_addr + if ipv6_addr=$(whiptail --backtitle "Proxmox VE Helper Scripts" \ + --title "STATIC IPv6 ADDRESS" \ + --inputbox "\nEnter IPv6 CIDR address\n(e.g. 2001:db8::1/64)" 12 58 "" \ + 3>&1 1>&2 2>&3); then + if validate_ipv6_address "$ipv6_addr"; then + _ipv6_addr="$ipv6_addr" + # Optional gateway - loop until valid or empty + local ipv6_gw_valid=false + while [[ "$ipv6_gw_valid" == "false" ]]; do + local ipv6_gw + ipv6_gw=$(whiptail --backtitle "Proxmox VE Helper Scripts" \ + --title "IPv6 GATEWAY" \ + --inputbox "\nEnter IPv6 gateway (optional, leave blank for none)" 10 58 "" \ + 3>&1 1>&2 2>&3) || true + # Validate gateway if provided + if [[ -n "$ipv6_gw" ]]; then + if validate_ipv6_address "$ipv6_gw"; then + _ipv6_gate="$ipv6_gw" + ipv6_gw_valid=true + ((STEP++)) + else + whiptail --msgbox "Invalid IPv6 gateway format.\n\nExample: 2001:db8::1" 8 58 + fi + else + _ipv6_gate="" + ipv6_gw_valid=true + ((STEP++)) + fi + done + else + whiptail --msgbox "Invalid IPv6 CIDR format.\n\nExample: 2001:db8::1/64\nCIDR must be 1-128." 10 58 + fi + fi + ;; + dhcp) + _ipv6_addr="dhcp" + _ipv6_gate="" + ((STEP++)) + ;; + + none) + _ipv6_addr="none" + _ipv6_gate="" + ((STEP++)) + ;; + *) + _ipv6_addr="" + _ipv6_gate="" + ((STEP++)) + ;; + esac + else + ((STEP--)) + fi + ;; + + # ═══════════════════════════════════════════════════════════════════════════ + # STEP 11: MTU Size + # ═══════════════════════════════════════════════════════════════════════════ + 11) + if result=$(whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ + --title "MTU SIZE" \ + --ok-button "Next" --cancel-button "Back" \ + --inputbox "\nSet Interface MTU Size\n(leave blank for default 1500, common values: 1500, 9000)" 12 62 "" \ + 3>&1 1>&2 2>&3); then + if validate_mtu "$result"; then + _mtu="$result" + ((STEP++)) + else + whiptail --msgbox "Invalid MTU size.\n\nMTU must be between 576 and 65535.\nCommon values: 1500 (default), 9000 (jumbo frames)" 10 58 + fi + else + ((STEP--)) + fi + ;; + + # ═══════════════════════════════════════════════════════════════════════════ + # STEP 12: DNS Search Domain + # ═══════════════════════════════════════════════════════════════════════════ + 12) + if result=$(whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ + --title "DNS SEARCH DOMAIN" \ + --ok-button "Next" --cancel-button "Back" \ + --inputbox "\nSet DNS Search Domain\n(leave blank to use host setting)" 12 58 "" \ + 3>&1 1>&2 2>&3); then + _sd="$result" + ((STEP++)) + else + ((STEP--)) + fi + ;; + + # ═══════════════════════════════════════════════════════════════════════════ + # STEP 13: DNS Server + # ═══════════════════════════════════════════════════════════════════════════ + 13) + if result=$(whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ + --title "DNS SERVER" \ + --ok-button "Next" --cancel-button "Back" \ + --inputbox "\nSet DNS Server IP\n(leave blank to use host setting)" 12 58 "" \ + 3>&1 1>&2 2>&3); then + _ns="$result" + ((STEP++)) + else + ((STEP--)) + fi + ;; + + # ═══════════════════════════════════════════════════════════════════════════ + # STEP 14: MAC Address + # ═══════════════════════════════════════════════════════════════════════════ + 14) + if result=$(whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ + --title "MAC ADDRESS" \ + --ok-button "Next" --cancel-button "Back" \ + --inputbox "\nSet MAC Address\n(leave blank for auto-generated, format: XX:XX:XX:XX:XX:XX)" 12 62 "" \ + 3>&1 1>&2 2>&3); then + if validate_mac_address "$result"; then + _mac="$result" + ((STEP++)) + else + whiptail --msgbox "Invalid MAC address format.\n\nRequired format: XX:XX:XX:XX:XX:XX\nExample: 02:00:00:00:00:01" 10 58 + fi + else + ((STEP--)) + fi + ;; + + # ═══════════════════════════════════════════════════════════════════════════ + # STEP 15: VLAN Tag + # ═══════════════════════════════════════════════════════════════════════════ + 15) + if result=$(whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ + --title "VLAN TAG" \ + --ok-button "Next" --cancel-button "Back" \ + --inputbox "\nSet VLAN Tag (1-4094)\n(leave blank for no VLAN)" 12 58 "" \ + 3>&1 1>&2 2>&3); then + if validate_vlan_tag "$result"; then + _vlan="$result" + ((STEP++)) + else + whiptail --msgbox "Invalid VLAN tag.\n\nVLAN must be a number between 1 and 4094." 8 58 + fi + else + ((STEP--)) + fi + ;; + + # ═══════════════════════════════════════════════════════════════════════════ + # STEP 16: Tags + # ═══════════════════════════════════════════════════════════════════════════ + 16) + if result=$(whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ + --title "CONTAINER TAGS" \ + --ok-button "Next" --cancel-button "Back" \ + --inputbox "\nSet Custom Tags (semicolon-separated)\n(alphanumeric, hyphens, underscores only)" 12 58 "$_tags" \ + 3>&1 1>&2 2>&3); then + local tags_test="${result:-}" + tags_test=$(echo "$tags_test" | tr -d '[:space:]') + if validate_tags "$tags_test"; then + _tags="$tags_test" + ((STEP++)) + else + whiptail --msgbox "Invalid tag format.\n\nTags can only contain:\n- Letters (a-z, A-Z)\n- Numbers (0-9)\n- Hyphens (-)\n- Underscores (_)\n- Semicolons (;) as separator" 14 58 + fi + else + ((STEP--)) + fi + ;; + + # ═══════════════════════════════════════════════════════════════════════════ + # STEP 17: SSH Settings + # ═══════════════════════════════════════════════════════════════════════════ + 17) + configure_ssh_settings "Step $STEP/$MAX_STEP" + # configure_ssh_settings handles its own flow, always advance + ((STEP++)) + ;; + + # ═══════════════════════════════════════════════════════════════════════════ + # STEP 18: FUSE Support + # ═══════════════════════════════════════════════════════════════════════════ + 18) + local fuse_default_flag="--defaultno" + [[ "$_enable_fuse" == "yes" || "$_enable_fuse" == "1" ]] && fuse_default_flag="" + + if whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ + --title "FUSE SUPPORT" \ + --ok-button "Next" --cancel-button "Back" \ + $fuse_default_flag \ + --yesno "\nEnable FUSE support?\n\nRequired for: rclone, mergerfs, AppImage, etc.\n\n(App default: ${var_fuse:-no})" 14 58; then + _enable_fuse="yes" + else + if [ $? -eq 1 ]; then + _enable_fuse="no" + else + ((STEP--)) + continue + fi + fi + ((STEP++)) + ;; + + # ═══════════════════════════════════════════════════════════════════════════ + # STEP 19: TUN/TAP Support + # ═══════════════════════════════════════════════════════════════════════════ + 19) + local tun_default_flag="--defaultno" + [[ "$_enable_tun" == "yes" || "$_enable_tun" == "1" ]] && tun_default_flag="" + + if whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ + --title "TUN/TAP SUPPORT" \ + --ok-button "Next" --cancel-button "Back" \ + $tun_default_flag \ + --yesno "\nEnable TUN/TAP device support?\n\nRequired for: VPN apps (WireGuard, OpenVPN, Tailscale),\nnetwork tunneling, and containerized networking.\n\n(App default: ${var_tun:-no})" 14 62; then + _enable_tun="yes" + else + if [ $? -eq 1 ]; then + _enable_tun="no" + else + ((STEP--)) + continue + fi + fi + ((STEP++)) + ;; + + # ═══════════════════════════════════════════════════════════════════════════ + # STEP 20: Nesting Support + # ═══════════════════════════════════════════════════════════════════════════ + 20) + local nesting_default_flag="" + [[ "$_enable_nesting" == "0" || "$_enable_nesting" == "no" ]] && nesting_default_flag="--defaultno" + + if whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ + --title "NESTING SUPPORT" \ + --ok-button "Next" --cancel-button "Back" \ + $nesting_default_flag \ + --yesno "\nEnable Nesting?\n\nRequired for: Docker, LXC inside LXC, Podman,\nand other containerization tools.\n\n(App default: ${var_nesting:-1})" 14 58; then + _enable_nesting="1" + else + if [ $? -eq 1 ]; then + _enable_nesting="0" + # Warn about potential issues with systemd-based OS when nesting is disabled + if [[ "$var_os" != "alpine" ]]; then + whiptail --backtitle "Proxmox VE Helper Scripts" \ + --title "⚠️ NESTING WARNING" \ + --msgbox "Modern systemd-based distributions (Debian 13+, Ubuntu 24.04+, etc.) may require nesting to be enabled for proper operation.\n\nWithout nesting, the container may start in a degraded state with failing services (error 243/CREDENTIALS).\n\nIf you experience issues, enable nesting in the container options." 14 68 + fi + else + ((STEP--)) + continue + fi + fi + ((STEP++)) + ;; + + # ═══════════════════════════════════════════════════════════════════════════ + # STEP 21: GPU Passthrough + # ═══════════════════════════════════════════════════════════════════════════ + 21) + local gpu_default_flag="--defaultno" + [[ "$_enable_gpu" == "yes" ]] && gpu_default_flag="" + + if whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ + --title "GPU PASSTHROUGH" \ + --ok-button "Next" --cancel-button "Back" \ + $gpu_default_flag \ + --yesno "\nEnable GPU Passthrough?\n\nAutomatically detects and passes through available GPUs\n(Intel/AMD/NVIDIA) for hardware acceleration.\n\nRecommended for: Media servers, AI/ML, Transcoding\n\n(App default: ${var_gpu:-no})" 16 62; then + _enable_gpu="yes" + else + if [ $? -eq 1 ]; then + _enable_gpu="no" + else + ((STEP--)) + continue + fi + fi + ((STEP++)) + ;; + + # ═══════════════════════════════════════════════════════════════════════════ + # STEP 22: Keyctl Support (Docker/systemd) + # ═══════════════════════════════════════════════════════════════════════════ + 22) + local keyctl_default_flag="--defaultno" + [[ "$_enable_keyctl" == "1" ]] && keyctl_default_flag="" + + if whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ + --title "KEYCTL SUPPORT" \ + --ok-button "Next" --cancel-button "Back" \ + $keyctl_default_flag \ + --yesno "\nEnable Keyctl support?\n\nRequired for: Docker containers, systemd-networkd,\nand kernel keyring operations.\n\nNote: Automatically enabled for unprivileged containers.\n\n(App default: ${var_keyctl:-0})" 16 62; then + _enable_keyctl="1" + else + if [ $? -eq 1 ]; then + _enable_keyctl="0" + else + ((STEP--)) + continue + fi + fi + ((STEP++)) + ;; + + # ═══════════════════════════════════════════════════════════════════════════ + # STEP 23: APT Cacher Proxy + # ═══════════════════════════════════════════════════════════════════════════ + 23) + local apt_cacher_default_flag="--defaultno" + [[ "$_apt_cacher" == "yes" ]] && apt_cacher_default_flag="" + + if whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ + --title "APT CACHER PROXY" \ + --ok-button "Next" --cancel-button "Back" \ + $apt_cacher_default_flag \ + --yesno "\nUse APT Cacher-NG proxy?\n\nSpeeds up package downloads by caching them locally.\nRequires apt-cacher-ng running on your network.\n\n(App default: ${var_apt_cacher:-no})" 14 62; then + _apt_cacher="yes" + # Ask for IP if enabled + if result=$(whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ + --title "APT CACHER IP" \ + --inputbox "\nEnter APT Cacher-NG server IP address:" 10 58 "$_apt_cacher_ip" \ + 3>&1 1>&2 2>&3); then + _apt_cacher_ip="$result" + fi + else + if [ $? -eq 1 ]; then + _apt_cacher="no" + _apt_cacher_ip="" + else + ((STEP--)) + continue + fi + fi + ((STEP++)) + ;; + + # ═══════════════════════════════════════════════════════════════════════════ + # STEP 24: Container Timezone + # ═══════════════════════════════════════════════════════════════════════════ + 24) + local tz_hint="$_ct_timezone" + [[ -z "$tz_hint" ]] && tz_hint="(empty - will use host timezone)" + + if result=$(whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ + --title "CONTAINER TIMEZONE" \ + --ok-button "Next" --cancel-button "Back" \ + --inputbox "\nSet container timezone.\n\nExamples: Europe/Berlin, America/New_York, Asia/Tokyo\n\nHost timezone: ${_host_timezone:-unknown}\n\nLeave empty to inherit from host." 16 62 "$_ct_timezone" \ + 3>&1 1>&2 2>&3); then + local tz_test="$result" + [[ "${tz_test:-}" == Etc/* ]] && tz_test="host" # pct doesn't accept Etc/* zones + if validate_timezone "$tz_test"; then + _ct_timezone="$tz_test" + ((STEP++)) + else + whiptail --msgbox "Invalid timezone: '$result'\n\nTimezone must exist in /usr/share/zoneinfo/\n\nExamples:\n- Europe/Berlin\n- America/New_York\n- Asia/Tokyo\n- UTC" 14 58 + fi + else + ((STEP--)) + fi + ;; + + # ═══════════════════════════════════════════════════════════════════════════ + # STEP 25: Container Protection + # ═══════════════════════════════════════════════════════════════════════════ + 25) + local protect_default_flag="--defaultno" + [[ "$_protect_ct" == "yes" || "$_protect_ct" == "1" ]] && protect_default_flag="" + + if whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ + --title "CONTAINER PROTECTION" \ + --ok-button "Next" --cancel-button "Back" \ + $protect_default_flag \ + --yesno "\nEnable Container Protection?\n\nPrevents accidental deletion of this container.\nYou must disable protection before removing.\n\n(App default: ${var_protection:-no})" 14 62; then + _protect_ct="yes" + else + if [ $? -eq 1 ]; then + _protect_ct="no" + else + ((STEP--)) + continue + fi + fi + ((STEP++)) + ;; + + # ═══════════════════════════════════════════════════════════════════════════ + # STEP 26: Device Node Creation (mknod) + # ═══════════════════════════════════════════════════════════════════════════ + 26) + local mknod_default_flag="--defaultno" + [[ "$_enable_mknod" == "1" ]] && mknod_default_flag="" + + if whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ + --title "DEVICE NODE CREATION" \ + --ok-button "Next" --cancel-button "Back" \ + $mknod_default_flag \ + --yesno "\nAllow device node creation (mknod)?\n\nRequired for: Creating device files inside container.\nExperimental feature (requires kernel 5.3+).\n\n(App default: ${var_mknod:-0})" 14 62; then + _enable_mknod="1" + else + if [ $? -eq 1 ]; then + _enable_mknod="0" + else + ((STEP--)) + continue + fi + fi + ((STEP++)) + ;; + + # ═══════════════════════════════════════════════════════════════════════════ + # STEP 27: Mount Filesystems + # ═══════════════════════════════════════════════════════════════════════════ + 27) + local mount_hint="" + [[ -n "$_mount_fs" ]] && mount_hint="$_mount_fs" || mount_hint="(none)" + + if result=$(whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ + --title "MOUNT FILESYSTEMS" \ + --ok-button "Next" --cancel-button "Back" \ + --inputbox "\nAllow specific filesystem mounts.\n\nComma-separated list: nfs, cifs, fuse, ext4, etc.\nLeave empty for defaults (none).\n\nCurrent: $mount_hint" 14 62 "$_mount_fs" \ + 3>&1 1>&2 2>&3); then + _mount_fs="$result" + ((STEP++)) + else + ((STEP--)) + fi + ;; + + # ═══════════════════════════════════════════════════════════════════════════ + # STEP 28: Verbose Mode & Confirmation + # ═══════════════════════════════════════════════════════════════════════════ + 28) + local verbose_default_flag="--defaultno" + [[ "$_verbose" == "yes" ]] && verbose_default_flag="" + + if whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ + --title "VERBOSE MODE" \ + $verbose_default_flag \ + --yesno "\nEnable Verbose Mode?\n\nShows detailed output during installation." 12 58; then + _verbose="yes" + else + _verbose="no" + fi + # Build summary + local ct_type_desc="Unprivileged" + [[ "$_ct_type" == "0" ]] && ct_type_desc="Privileged" + + local nesting_desc="Disabled" + [[ "$_enable_nesting" == "1" ]] && nesting_desc="Enabled" + + local keyctl_desc="Disabled" + [[ "$_enable_keyctl" == "1" ]] && keyctl_desc="Enabled" + + local protect_desc="No" + [[ "$_protect_ct" == "yes" || "$_protect_ct" == "1" ]] && protect_desc="Yes" + + local tz_display="${_ct_timezone:-Host TZ}" + local apt_display="${_apt_cacher:-no}" + [[ "$_apt_cacher" == "yes" && -n "$_apt_cacher_ip" ]] && apt_display="$_apt_cacher_ip" + + local summary="Container Type: $ct_type_desc +Container ID: $_ct_id +Hostname: $_hostname + +Resources: + Disk: ${_disk_size} GB + CPU: $_core_count cores + RAM: $_ram_size MiB + +Network: + Bridge: $_bridge + IPv4: $_net + IPv6: $_ipv6_method + +Features: + FUSE: $_enable_fuse | TUN: $_enable_tun + Nesting: $nesting_desc | Keyctl: $keyctl_desc + GPU: $_enable_gpu | Protection: $protect_desc + +Advanced: + Timezone: $tz_display + APT Cacher: $apt_display + Verbose: $_verbose" + + if whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ + --title "CONFIRM SETTINGS" \ + --ok-button "Create LXC" --cancel-button "Back" \ + --yesno "$summary\n\nCreate ${APP} LXC with these settings?" 32 62; then + ((STEP++)) + else + ((STEP--)) + fi + ;; + esac + done + + # ═══════════════════════════════════════════════════════════════════════════ + # Apply all collected values to global variables + # ═══════════════════════════════════════════════════════════════════════════ + CT_TYPE="$_ct_type" + PW="$_pw" + CT_ID="$_ct_id" + HN="$_hostname" + DISK_SIZE="$_disk_size" + CORE_COUNT="$_core_count" + RAM_SIZE="$_ram_size" + BRG="$_bridge" + NET="$_net" + GATE="$_gate" + IPV6_METHOD="$_ipv6_method" + IPV6_ADDR="$_ipv6_addr" + IPV6_GATE="$_ipv6_gate" + TAGS="$_tags" + ENABLE_FUSE="$_enable_fuse" + ENABLE_TUN="$_enable_tun" + ENABLE_GPU="$_enable_gpu" + ENABLE_NESTING="$_enable_nesting" + ENABLE_KEYCTL="$_enable_keyctl" + ENABLE_MKNOD="$_enable_mknod" + ALLOW_MOUNT_FS="$_mount_fs" + PROTECT_CT="$_protect_ct" + CT_TIMEZONE="$_ct_timezone" + APT_CACHER="$_apt_cacher" + APT_CACHER_IP="$_apt_cacher_ip" + VERBOSE="$_verbose" + + # Update var_* based on user choice (for functions that check these) + var_gpu="$_enable_gpu" + var_fuse="$_enable_fuse" + var_tun="$_enable_tun" + var_nesting="$_enable_nesting" + var_keyctl="$_enable_keyctl" + var_mknod="$_enable_mknod" + var_mount_fs="$_mount_fs" + var_protection="$_protect_ct" + var_timezone="$_ct_timezone" + var_apt_cacher="$_apt_cacher" + var_apt_cacher_ip="$_apt_cacher_ip" + + # Format optional values + [[ -n "$_mtu" ]] && MTU=",mtu=$_mtu" || MTU="" + [[ -n "$_sd" ]] && SD="-searchdomain=$_sd" || SD="" + [[ -n "$_ns" ]] && NS="-nameserver=$_ns" || NS="" + [[ -n "$_mac" ]] && MAC=",hwaddr=$_mac" || MAC="" + [[ -n "$_vlan" ]] && VLAN=",tag=$_vlan" || VLAN="" + + # Alpine UDHCPC fix + if [ "$var_os" == "alpine" ] && [ "$NET" == "dhcp" ] && [ -n "$_ns" ]; then + UDHCPC_FIX="yes" + else + UDHCPC_FIX="no" + fi + export UDHCPC_FIX + export SSH_KEYS_FILE + + # Exit alternate screen buffer before showing summary (so output remains visible) + tput rmcup 2>/dev/null || true + trap - RETURN + + # Display final summary + echo -e "\n${INFO}${BOLD}${DGN}PVE Version ${PVEVERSION} (Kernel: ${KERNEL_VERSION})${CL}" + echo -e "${OS}${BOLD}${DGN}Operating System: ${BGN}$var_os${CL}" + echo -e "${OSVERSION}${BOLD}${DGN}Version: ${BGN}$var_version${CL}" + echo -e "${CONTAINERTYPE}${BOLD}${DGN}Container Type: ${BGN}$([ "$CT_TYPE" == "1" ] && echo "Unprivileged" || echo "Privileged")${CL}" + echo -e "${CONTAINERID}${BOLD}${DGN}Container ID: ${BGN}$CT_ID${CL}" + echo -e "${HOSTNAME}${BOLD}${DGN}Hostname: ${BGN}$HN${CL}" + echo -e "${DISKSIZE}${BOLD}${DGN}Disk Size: ${BGN}${DISK_SIZE} GB${CL}" + echo -e "${CPUCORE}${BOLD}${DGN}CPU Cores: ${BGN}$CORE_COUNT${CL}" + echo -e "${RAMSIZE}${BOLD}${DGN}RAM Size: ${BGN}${RAM_SIZE} MiB${CL}" + echo -e "${BRIDGE}${BOLD}${DGN}Bridge: ${BGN}$BRG${CL}" + echo -e "${NETWORK}${BOLD}${DGN}IPv4: ${BGN}$NET${CL}" + echo -e "${NETWORK}${BOLD}${DGN}IPv6: ${BGN}$IPV6_METHOD${CL}" + echo -e "${FUSE}${BOLD}${DGN}FUSE Support: ${BGN}${ENABLE_FUSE:-no}${CL}" + [[ "${ENABLE_TUN:-no}" == "yes" ]] && echo -e "${NETWORK}${BOLD}${DGN}TUN/TAP Support: ${BGN}$ENABLE_TUN${CL}" + echo -e "${CONTAINERTYPE}${BOLD}${DGN}Nesting: ${BGN}$([ "${ENABLE_NESTING:-1}" == "1" ] && echo "Enabled" || echo "Disabled")${CL}" + [[ "${ENABLE_KEYCTL:-0}" == "1" ]] && echo -e "${CONTAINERTYPE}${BOLD}${DGN}Keyctl: ${BGN}Enabled${CL}" + echo -e "${GPU}${BOLD}${DGN}GPU Passthrough: ${BGN}${ENABLE_GPU:-no}${CL}" + [[ "${PROTECT_CT:-no}" == "yes" || "${PROTECT_CT:-no}" == "1" ]] && echo -e "${CONTAINERTYPE}${BOLD}${DGN}Protection: ${BGN}Enabled${CL}" + [[ -n "${CT_TIMEZONE:-}" ]] && echo -e "${INFO}${BOLD}${DGN}Timezone: ${BGN}$CT_TIMEZONE${CL}" + [[ "$APT_CACHER" == "yes" ]] && echo -e "${INFO}${BOLD}${DGN}APT Cacher: ${BGN}$APT_CACHER_IP${CL}" + echo -e "${SEARCH}${BOLD}${DGN}Verbose Mode: ${BGN}$VERBOSE${CL}" + echo -e "${CREATING}${BOLD}${RD}Creating an LXC of ${APP} using the above advanced settings${CL}" + + # Log settings to file + log_section "CONTAINER SETTINGS (ADVANCED) - ${APP}" + log_msg "Application: ${APP}" + log_msg "PVE Version: ${PVEVERSION} (Kernel: ${KERNEL_VERSION})" + log_msg "Operating System: $var_os ($var_version)" + log_msg "Container Type: $([ "$CT_TYPE" == "1" ] && echo "Unprivileged" || echo "Privileged")" + log_msg "Container ID: $CT_ID" + log_msg "Hostname: $HN" + log_msg "Disk Size: ${DISK_SIZE} GB" + log_msg "CPU Cores: $CORE_COUNT" + log_msg "RAM Size: ${RAM_SIZE} MiB" + log_msg "Bridge: $BRG" + log_msg "IPv4: $NET" + log_msg "IPv6: $IPV6_METHOD" + log_msg "FUSE Support: ${ENABLE_FUSE:-no}" + log_msg "Nesting: $([ "${ENABLE_NESTING:-1}" == "1" ] && echo "Enabled" || echo "Disabled")" + log_msg "GPU Passthrough: ${ENABLE_GPU:-no}" + log_msg "Verbose Mode: $VERBOSE" + log_msg "Session ID: ${SESSION_ID}" +} + +# ============================================================================== +# SECTION 7: USER INTERFACE & DIAGNOSTICS +# ============================================================================== + +# ------------------------------------------------------------------------------ +# diagnostics_check() +# +# - Ensures diagnostics config file exists at /usr/local/community-scripts/diagnostics +# - Asks user whether to send anonymous diagnostic data (first run only) +# - Saves DIAGNOSTICS=yes/no in the config file +# - Reads current diagnostics setting from existing file +# - Sets global DIAGNOSTICS variable for API telemetry opt-in/out +# ------------------------------------------------------------------------------ +diagnostics_check() { + local config_dir="/usr/local/community-scripts" + local config_file="${config_dir}/diagnostics" + + mkdir -p "$config_dir" + + if [[ -f "$config_file" ]]; then + DIAGNOSTICS=$(awk -F '=' '/^DIAGNOSTICS/ {print $2}' "$config_file") || true + DIAGNOSTICS="${DIAGNOSTICS:-no}" + return + fi + + local result + result=$(whiptail --backtitle "Proxmox VE Helper Scripts" \ + --title "TELEMETRY & DIAGNOSTICS" \ + --ok-button "Confirm" --cancel-button "Exit" \ + --radiolist "\nHelp improve Community-Scripts by sharing anonymous data.\n\nWhat we collect:\n - Container resources (CPU, RAM, disk), OS & PVE version\n - Application name, install method and status\n\nWhat we DON'T collect:\n - No IP addresses, hostnames, or personal data\n\nYou can change this anytime in the Settings menu.\nPrivacy: https://github.com/community-scripts/telemetry-service/blob/main/docs/PRIVACY.md\n\nUse SPACE to select, ENTER to confirm." 22 76 2 \ + "yes" "Yes, share anonymous data" OFF \ + "no" "No, opt out" OFF \ + 3>&1 1>&2 2>&3) || result="no" + + DIAGNOSTICS="${result:-no}" + + cat <"$config_file" +DIAGNOSTICS=${DIAGNOSTICS} + +# Community-Scripts Telemetry Configuration +# https://telemetry.community-scripts.org +# +# This file stores your telemetry preference. +# Set DIAGNOSTICS=yes to share anonymous installation data. +# Set DIAGNOSTICS=no to disable telemetry. +# +# You can also change this via the Settings menu during installation. +# +# Data collected (when enabled): +# disk_size, core_count, ram_size, os_type, os_version, +# nsapp, method, pve_version, status, exit_code +# +# No personal data (IPs, hostnames, passwords) is ever collected. +# Privacy: https://github.com/community-scripts/telemetry-service/blob/main/docs/PRIVACY.md +EOF +} + +diagnostics_menu() { + local current="${DIAGNOSTICS:-no}" + local status_text="DISABLED" + [[ "$current" == "yes" ]] && status_text="ENABLED" + + local dialog_text=( + "Telemetry is currently: ${status_text}\n\n" + "Anonymous data helps us improve scripts and track issues.\n" + "No personal data is ever collected.\n\n" + "More info: https://telemetry.community-scripts.org\n\n" + "Do you want to ${current:+change this setting}?" + ) + + if [[ "$current" == "yes" ]]; then + if whiptail --backtitle "Proxmox VE Helper Scripts" \ + --title "TELEMETRY SETTINGS" \ + --yesno "${dialog_text[*]}" 14 64 \ + --yes-button "Disable" --no-button "Keep enabled"; then + DIAGNOSTICS="no" + sed -i 's/^DIAGNOSTICS=.*/DIAGNOSTICS=no/' /usr/local/community-scripts/diagnostics + whiptail --msgbox "Telemetry disabled.\n\nNote: Existing containers keep their current setting.\nNew containers will inherit this choice." 10 58 + fi + else + if whiptail --backtitle "Proxmox VE Helper Scripts" \ + --title "TELEMETRY SETTINGS" \ + --yesno "${dialog_text[*]}" 14 64 \ + --yes-button "Enable" --no-button "Keep disabled"; then + DIAGNOSTICS="yes" + sed -i 's/^DIAGNOSTICS=.*/DIAGNOSTICS=yes/' /usr/local/community-scripts/diagnostics + whiptail --msgbox "Telemetry enabled.\n\nNote: Existing containers keep their current setting.\nNew containers will inherit this choice." 10 58 + fi + fi +} + +# ------------------------------------------------------------------------------ +# echo_default() +# +# - Prints summary of default values (ID, OS, type, disk, RAM, CPU, etc.) +# - Uses icons and formatting for readability +# - Convert CT_TYPE to description +# - Also logs settings to log file for debugging +# ------------------------------------------------------------------------------ +echo_default() { + CT_TYPE_DESC="Unprivileged" + if [ "$CT_TYPE" -eq 0 ]; then + CT_TYPE_DESC="Privileged" + fi + echo -e "${INFO}${BOLD}${DGN}PVE Version ${PVEVERSION} (Kernel: ${KERNEL_VERSION})${CL}" + echo -e "${CONTAINERID}${BOLD}${DGN}Container ID: ${BGN}${CT_ID}${CL}" + echo -e "${OS}${BOLD}${DGN}Operating System: ${BGN}$var_os ($var_version)${CL}" + echo -e "${CONTAINERTYPE}${BOLD}${DGN}Container Type: ${BGN}$CT_TYPE_DESC${CL}" + echo -e "${DISKSIZE}${BOLD}${DGN}Disk Size: ${BGN}${DISK_SIZE} GB${CL}" + echo -e "${CPUCORE}${BOLD}${DGN}CPU Cores: ${BGN}${CORE_COUNT}${CL}" + echo -e "${RAMSIZE}${BOLD}${DGN}RAM Size: ${BGN}${RAM_SIZE} MiB${CL}" + if [[ -n "${var_gpu:-}" && "${var_gpu}" == "yes" ]]; then + echo -e "${GPU}${BOLD}${DGN}GPU Passthrough: ${BGN}Enabled${CL}" + fi + if [ "$VERBOSE" == "yes" ]; then + echo -e "${SEARCH}${BOLD}${DGN}Verbose Mode: ${BGN}Enabled${CL}" + fi + echo -e "${CREATING}${BOLD}${BL}Creating a ${APP} LXC using the above default settings${CL}" + echo -e " " + + # Log settings to file + log_section "CONTAINER SETTINGS - ${APP}" + log_msg "Application: ${APP}" + log_msg "PVE Version: ${PVEVERSION} (Kernel: ${KERNEL_VERSION})" + log_msg "Container ID: ${CT_ID}" + log_msg "Operating System: $var_os ($var_version)" + log_msg "Container Type: $CT_TYPE_DESC" + log_msg "Disk Size: ${DISK_SIZE} GB" + log_msg "CPU Cores: ${CORE_COUNT}" + log_msg "RAM Size: ${RAM_SIZE} MiB" + [[ -n "${var_gpu:-}" && "${var_gpu}" == "yes" ]] && log_msg "GPU Passthrough: Enabled" + [[ "$VERBOSE" == "yes" ]] && log_msg "Verbose Mode: Enabled" + log_msg "Session ID: ${SESSION_ID}" +} + +# ------------------------------------------------------------------------------ +# install_script() +# +# - Main entrypoint for installation mode +# - Runs safety checks (pve_check, root_check, maxkeys_check, diagnostics_check) +# - Builds interactive menu (Default, Verbose, Advanced, My Defaults, App Defaults, Diagnostics, Storage, Exit) +# - Applies chosen settings and triggers container build +# ------------------------------------------------------------------------------ +install_script() { + pve_check + shell_check + root_check + arch_check + ssh_check + maxkeys_check + diagnostics_check + + if systemctl is-active -q ping-instances.service; then + systemctl -q stop ping-instances.service + fi + + NEXTID=$(pvesh get /cluster/nextid) + + # Get timezone using timedatectl (Debian 13+ compatible) + # Fallback to /etc/timezone for older systems + if command -v timedatectl >/dev/null 2>&1; then + timezone=$(timedatectl show --value --property=Timezone 2>/dev/null || echo "UTC") + elif [ -f /etc/timezone ]; then + timezone=$(cat /etc/timezone) + else + timezone="UTC" + fi + [[ "${timezone:-}" == Etc/* ]] && timezone="host" # pct doesn't accept Etc/* zones + + # Show APP Header + header_info + + # --- Support CLI argument as direct preset (default, advanced, …) --- + CHOICE="${mode:-${1:-}}" + + # If no CLI argument → show whiptail menu + # Build menu dynamically based on available options + local appdefaults_option="" + local settings_option="" + local menu_items=( + "1" "Default Install" + "2" "Advanced Install" + "3" "User Defaults" + ) + + if [ -f "$(get_app_defaults_path)" ]; then + appdefaults_option="4" + menu_items+=("4" "App Defaults for ${APP}") + settings_option="5" + menu_items+=("5" "Settings") + else + settings_option="4" + menu_items+=("4" "Settings") + fi + + APPDEFAULTS_OPTION="$appdefaults_option" + SETTINGS_OPTION="$settings_option" + + # Main menu loop - allows returning from Settings + while true; do + if [ -z "$CHOICE" ]; then + TMP_CHOICE=$(whiptail \ + --backtitle "Proxmox VE Helper Scripts" \ + --title "Community-Scripts Options" \ + --ok-button "Select" --cancel-button "Exit Script" \ + --notags \ + --menu "\nChoose an option:\n Use TAB or Arrow keys to navigate, ENTER to select.\n" \ + 20 60 9 \ + "${menu_items[@]}" \ + --default-item "1" \ + 3>&1 1>&2 2>&3) || exit_script + CHOICE="$TMP_CHOICE" + fi + + # --- Main case --- + local defaults_target="" + local run_maybe_offer="no" + case "$CHOICE" in + 1 | default | DEFAULT) + header_info + echo -e "${DEFAULT}${BOLD}${BL}Using Default Settings on node $PVEHOST_NAME${CL}" + VERBOSE="no" + METHOD="default" + base_settings "$VERBOSE" + echo_default + defaults_target="$(ensure_global_default_vars_file)" + break + ;; + 2 | advanced | ADVANCED) + header_info + echo -e "${ADVANCED}${BOLD}${RD}Using Advanced Install on node $PVEHOST_NAME${CL}" + METHOD="advanced" + base_settings + advanced_settings + defaults_target="$(ensure_global_default_vars_file)" + run_maybe_offer="yes" + break + ;; + 3 | mydefaults | MYDEFAULTS | userdefaults | USERDEFAULTS) + default_var_settings || { + msg_error "Failed to apply default.vars" + exit 110 + } + defaults_target="/usr/local/community-scripts/default.vars" + break + ;; + "$APPDEFAULTS_OPTION" | appdefaults | APPDEFAULTS) + if [ -f "$(get_app_defaults_path)" ]; then + header_info + echo -e "${DEFAULT}${BOLD}${BL}Using App Defaults for ${APP} on node $PVEHOST_NAME${CL}" + METHOD="appdefaults" + load_vars_file "$(get_app_defaults_path)" "yes" # Force override script defaults + base_settings + echo_default + defaults_target="$(get_app_defaults_path)" + break + else + msg_error "No App Defaults available for ${APP}" + exit 111 + fi + ;; + "$SETTINGS_OPTION" | settings | SETTINGS) + settings_menu + # After settings menu, show main menu again + header_info + CHOICE="" + ;; + generated | GENERATED) + header_info + echo -e "${DEFAULT}${BOLD}${BL}Using Generated Settings on node $PVEHOST_NAME${CL}" + VERBOSE="no" + METHOD="generated" + base_settings "$VERBOSE" + echo_default + break + ;; + *) + msg_error "Invalid option: $CHOICE" + exit 112 + ;; + esac + done + + if [[ -n "$defaults_target" ]]; then + ensure_storage_selection_for_vars_file "$defaults_target" + fi + + if [[ "$run_maybe_offer" == "yes" ]]; then + maybe_offer_save_app_defaults + fi +} + +edit_default_storage() { + local vf="/usr/local/community-scripts/default.vars" + + # Ensure file exists + if [[ ! -f "$vf" ]]; then + mkdir -p "$(dirname "$vf")" + touch "$vf" + fi + + # Let ensure_storage_selection_for_vars_file handle everything + ensure_storage_selection_for_vars_file "$vf" +} + +settings_menu() { + while true; do + local settings_items=( + "1" "Manage API-Diagnostic Setting" + "2" "Edit Default.vars" + ) + if [ -f "$(get_app_defaults_path)" ]; then + settings_items+=("3" "Edit App.vars for ${APP}") + settings_items+=("4" "Back to Main Menu") + else + settings_items+=("3" "Back to Main Menu") + fi + + local choice + choice=$(whiptail --backtitle "Proxmox VE Helper Scripts" \ + --title "Community-Scripts SETTINGS Menu" \ + --ok-button "Select" --cancel-button "Exit Script" \ + --menu "\n\nChoose a settings option:\n\nUse Arrow keys to navigate, ENTER to select, TAB for buttons." 20 60 9 \ + "${settings_items[@]}" \ + 3>&1 1>&2 2>&3) || exit_script + + case "$choice" in + 1) diagnostics_menu ;; + 2) ${EDITOR:-nano} /usr/local/community-scripts/default.vars ;; + 3) + if [ -f "$(get_app_defaults_path)" ]; then + ${EDITOR:-nano} "$(get_app_defaults_path)" + else + # Back was selected (no app.vars available) + return + fi + ;; + 4) + # Back to main menu + return + ;; + esac + done +} + +# ------------------------------------------------------------------------------ +# check_container_resources() +# +# - Compares host RAM/CPU with required values +# - Warns if under-provisioned and asks user to continue or abort +# ------------------------------------------------------------------------------ +check_container_resources() { + current_ram=$(free -m | awk 'NR==2{print $2}') + current_cpu=$(nproc) + + if [[ "$current_ram" -lt "$var_ram" ]] || [[ "$current_cpu" -lt "$var_cpu" ]]; then + msg_warn "Under-provisioned: Required ${var_cpu} CPU/${var_ram}MB RAM, Current ${current_cpu} CPU/${current_ram}MB RAM" + echo -e "${YWB}Please ensure that the ${APP} LXC is configured with at least ${var_cpu} vCPU and ${var_ram} MB RAM for the build process.${CL}\n" + echo -ne "${INFO}${HOLD} May cause data loss! ${INFO} Continue update with under-provisioned LXC? " + read -r prompt 80% and asks user confirmation before proceeding +# ------------------------------------------------------------------------------ +check_container_storage() { + total_size=$(df /boot --output=size | tail -n 1) + local used_size=$(df /boot --output=used | tail -n 1) + usage=$((100 * used_size / total_size)) + if ((usage > 80)); then + msg_warn "Storage is dangerously low (${usage}% used on /boot)" + echo -ne "Continue anyway? " + read -r prompt 0) { print substr($0, RSTART) } + } + ' +} + +# ------------------------------------------------------------------------------ +# ssh_build_choices_from_files() +# +# - Builds interactive whiptail checklist of available SSH keys +# - Generates fingerprint, type and comment for each key +# ------------------------------------------------------------------------------ +ssh_build_choices_from_files() { + local -a files=("$@") + CHOICES=() + COUNT=0 + MAPFILE="$(mktemp)" + local id key typ fp cmt base ln=0 + + for f in "${files[@]}"; do + [[ -f "$f" && -r "$f" ]] || continue + base="$(basename -- "$f")" + case "$base" in + known_hosts | known_hosts.* | config) continue ;; + id_*) [[ "$f" != *.pub ]] && continue ;; + esac + + # map every key in file + while IFS= read -r key; do + [[ -n "$key" ]] || continue + + typ="" + fp="" + cmt="" + # Only the pure key part (without options) is already included in ‘key’. + read -r _typ _b64 _cmt <<<"$key" + typ="${_typ:-key}" + cmt="${_cmt:-}" + # Fingerprint via ssh-keygen (if available) + if command -v ssh-keygen >/dev/null 2>&1; then + fp="$(printf '%s\n' "$key" | ssh-keygen -lf - 2>/dev/null | awk '{print $2}')" + fi + # Label shorten + [[ ${#cmt} -gt 40 ]] && cmt="${cmt:0:37}..." + + ln=$((ln + 1)) + COUNT=$((COUNT + 1)) + id="K${COUNT}" + echo "${id}|${key}" >>"$MAPFILE" + CHOICES+=("$id" "[$typ] ${fp:+$fp }${cmt:+$cmt }— ${base}" "OFF") + done < <(ssh_extract_keys_from_file "$f") + done +} + +# ------------------------------------------------------------------------------ +# ssh_discover_default_files() +# +# - Scans standard paths for SSH keys +# - Includes ~/.ssh/*.pub, /etc/ssh/authorized_keys, etc. +# ------------------------------------------------------------------------------ +ssh_discover_default_files() { + local -a cand=() + shopt -s nullglob + cand+=(/root/.ssh/authorized_keys /root/.ssh/authorized_keys2) + cand+=(/root/.ssh/*.pub) + cand+=(/etc/ssh/authorized_keys /etc/ssh/authorized_keys.d/*) + shopt -u nullglob + printf '%s\0' "${cand[@]}" +} + +configure_ssh_settings() { + local step_info="${1:-}" + local backtitle="Proxmox VE Helper Scripts" + [[ -n "$step_info" ]] && backtitle="Proxmox VE Helper Scripts [${step_info}]" + + SSH_KEYS_FILE="$(mktemp)" + : >"$SSH_KEYS_FILE" + + IFS=$'\0' read -r -d '' -a _def_files < <(ssh_discover_default_files && printf '\0') + ssh_build_choices_from_files "${_def_files[@]}" + local default_key_count="$COUNT" + + local ssh_key_mode + if [[ "$default_key_count" -gt 0 ]]; then + ssh_key_mode=$(whiptail --backtitle "$backtitle" --title "SSH KEY SOURCE" --menu \ + "Provision SSH keys for root:" 14 72 4 \ + "found" "Select from detected keys (${default_key_count})" \ + "manual" "Paste a single public key" \ + "folder" "Scan another folder (path or glob)" \ + "none" "No keys" 3>&1 1>&2 2>&3) || exit_script + else + ssh_key_mode=$(whiptail --backtitle "$backtitle" --title "SSH KEY SOURCE" --menu \ + "No host keys detected; choose manual/none:" 12 72 2 \ + "manual" "Paste a single public key" \ + "none" "No keys" 3>&1 1>&2 2>&3) || exit_script + fi + + case "$ssh_key_mode" in + found) + local selection + selection=$(whiptail --backtitle "$backtitle" --title "SELECT HOST KEYS" \ + --checklist "Select one or more keys to import:" 20 140 10 "${CHOICES[@]}" 3>&1 1>&2 2>&3) || exit_script + for tag in $selection; do + tag="${tag%\"}" + tag="${tag#\"}" + local line + line=$(grep -E "^${tag}\|" "$MAPFILE" | head -n1 | cut -d'|' -f2-) + [[ -n "$line" ]] && printf '%s\n' "$line" >>"$SSH_KEYS_FILE" + done + ;; + manual) + SSH_AUTHORIZED_KEY="$(whiptail --backtitle "$backtitle" \ + --inputbox "Paste one SSH public key line (ssh-ed25519/ssh-rsa/...)" 10 72 --title "SSH Public Key" 3>&1 1>&2 2>&3)" + [[ -n "$SSH_AUTHORIZED_KEY" ]] && printf '%s\n' "$SSH_AUTHORIZED_KEY" >>"$SSH_KEYS_FILE" + ;; + folder) + local glob_path + glob_path=$(whiptail --backtitle "$backtitle" \ + --inputbox "Enter a folder or glob to scan (e.g. /root/.ssh/*.pub)" 10 72 --title "Scan Folder/Glob" 3>&1 1>&2 2>&3) + if [[ -n "$glob_path" ]]; then + shopt -s nullglob + read -r -a _scan_files <<<"$glob_path" + shopt -u nullglob + if [[ "${#_scan_files[@]}" -gt 0 ]]; then + ssh_build_choices_from_files "${_scan_files[@]}" + if [[ "$COUNT" -gt 0 ]]; then + local folder_selection + folder_selection=$(whiptail --backtitle "$backtitle" --title "SELECT FOLDER KEYS" \ + --checklist "Select key(s) to import:" 20 78 10 "${CHOICES[@]}" 3>&1 1>&2 2>&3) || exit_script + for tag in $folder_selection; do + tag="${tag%\"}" + tag="${tag#\"}" + local line + line=$(grep -E "^${tag}\|" "$MAPFILE" | head -n1 | cut -d'|' -f2-) + [[ -n "$line" ]] && printf '%s\n' "$line" >>"$SSH_KEYS_FILE" + done + else + whiptail --backtitle "$backtitle" --msgbox "No keys found in: $glob_path" 8 60 + fi + else + whiptail --backtitle "$backtitle" --msgbox "Path/glob returned no files." 8 60 + fi + fi + ;; + none) + : + ;; + esac + + if [[ -s "$SSH_KEYS_FILE" ]]; then + sort -u -o "$SSH_KEYS_FILE" "$SSH_KEYS_FILE" + printf '\n' >>"$SSH_KEYS_FILE" + fi + + # Always show SSH access dialog - user should be able to enable SSH even without keys + if (whiptail --backtitle "$backtitle" --defaultno --title "SSH ACCESS" --yesno "Enable root SSH access?" 10 58); then + SSH="yes" + else + SSH="no" + fi +} + +# ------------------------------------------------------------------------------ +# msg_menu() +# +# - Displays a numbered menu for update_script() functions +# - In silent mode (PHS_SILENT=1): auto-selects the default option +# - In interactive mode: shows menu via read with 10s timeout + default fallback +# - Usage: CHOICE=$(msg_menu "Title" "tag1" "Description 1" "tag2" "Desc 2" ...) +# - The first item is always the default +# - Returns the selected tag to stdout +# - If no valid selection or timeout, returns the default (first) tag +# ------------------------------------------------------------------------------ +msg_menu() { + local title="$1" + shift + + # Parse items into parallel arrays: tags[] and descriptions[] + local -a tags=() + local -a descs=() + while [[ $# -ge 2 ]]; do + tags+=("$1") + descs+=("$2") + shift 2 + done + + local default_tag="${tags[0]}" + local count=${#tags[@]} + + # Silent mode: return default immediately + if [[ -n "${PHS_SILENT+x}" ]] && [[ "${PHS_SILENT}" == "1" ]]; then + echo "$default_tag" + return 0 + fi + + # Display menu to /dev/tty so it doesn't get captured by command substitution + { + echo "" + msg_custom "📋" "${BL}" "${title}" + echo "" + for i in "${!tags[@]}"; do + local marker=" " + [[ $i -eq 0 ]] && marker="* " + printf "${TAB3}${marker}%s) %s\n" "${tags[$i]}" "${descs[$i]}" + done + echo "" + } >/dev/tty + + local selection="" + read -r -t 10 -p "${TAB3}Select [default=${default_tag}, timeout 10s]: " selection /dev/tty || true + + # Validate selection + if [[ -n "$selection" ]]; then + for tag in "${tags[@]}"; do + if [[ "$selection" == "$tag" ]]; then + echo "$selection" + return 0 + fi + done + msg_warn "Invalid selection '${selection}' - using default: ${default_tag}" + fi + + echo "$default_tag" + return 0 +} + +# ------------------------------------------------------------------------------ +# start() +# +# - Entry point of script +# - On Proxmox host: calls install_script +# - In silent mode: runs update_script with automatic cleanup +# - Otherwise: shows update/setting menu and runs update_script with cleanup +# ------------------------------------------------------------------------------ +start() { + source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/tools.func) + if command -v pveversion >/dev/null 2>&1; then + install_script || return 0 + return 0 + elif [ ! -z ${PHS_SILENT+x} ] && [[ "${PHS_SILENT}" == "1" ]]; then + VERBOSE="no" + set_std_mode + ensure_profile_loaded + get_lxc_ip + update_script + update_motd_ip + cleanup_lxc + else + CHOICE=$(whiptail --backtitle "Proxmox VE Helper Scripts" --title "${APP} LXC Update/Setting" --menu \ + "Support/Update functions for ${APP} LXC. Choose an option:" \ + 12 60 3 \ + "1" "YES (Silent Mode)" \ + "2" "YES (Verbose Mode)" \ + "3" "NO (Cancel Update)" --nocancel --default-item "1" 3>&1 1>&2 2>&3) + + case "$CHOICE" in + 1) + VERBOSE="no" + set_std_mode + ;; + 2) + VERBOSE="yes" + set_std_mode + ;; + 3) + clear + exit_script + exit 0 + ;; + esac + ensure_profile_loaded + get_lxc_ip + update_script + update_motd_ip + cleanup_lxc + fi +} + +# ============================================================================== +# SECTION 8: CONTAINER CREATION & DEPLOYMENT +# ============================================================================== + +# ------------------------------------------------------------------------------ +# build_container() +# +# - Main function for creating and configuring LXC container +# - Builds network configuration string (IP, gateway, VLAN, MTU, MAC, IPv6) +# - Creates container via pct create with all specified settings +# - Applies features: FUSE, TUN, keyctl, VAAPI passthrough +# - Starts container and waits for network connectivity +# - Installs base packages (curl, sudo, etc.) +# - Injects SSH keys if configured +# - Executes -install.sh inside container +# - Posts installation telemetry to API if diagnostics enabled +# ------------------------------------------------------------------------------ +build_container() { + # if [ "$VERBOSE" == "yes" ]; then set -x; fi + + NET_STRING="-net0 name=eth0,bridge=${BRG:-vmbr0}" + + # MAC + if [[ -n "$MAC" ]]; then + case "$MAC" in + ,hwaddr=*) NET_STRING+="$MAC" ;; + *) NET_STRING+=",hwaddr=$MAC" ;; + esac + fi + + # IP (always required, default dhcp) + NET_STRING+=",ip=${NET:-dhcp}" + + # Gateway + if [[ -n "$GATE" ]]; then + case "$GATE" in + ,gw=*) NET_STRING+="$GATE" ;; + *) NET_STRING+=",gw=$GATE" ;; + esac + fi + + # VLAN + if [[ -n "$VLAN" ]]; then + case "$VLAN" in + ,tag=*) NET_STRING+="$VLAN" ;; + *) NET_STRING+=",tag=$VLAN" ;; + esac + fi + + # MTU + if [[ -n "$MTU" ]]; then + case "$MTU" in + ,mtu=*) NET_STRING+="$MTU" ;; + *) NET_STRING+=",mtu=$MTU" ;; + esac + fi + + # IPv6 Handling + case "$IPV6_METHOD" in + auto) NET_STRING="$NET_STRING,ip6=auto" ;; + dhcp) NET_STRING="$NET_STRING,ip6=dhcp" ;; + static) + NET_STRING="$NET_STRING,ip6=$IPV6_ADDR" + [ -n "$IPV6_GATE" ] && NET_STRING="$NET_STRING,gw6=$IPV6_GATE" + ;; + none) ;; + esac + + # Build FEATURES string based on container type and user choices + FEATURES="" + + # Nesting support (user configurable, default enabled) + if [ "${ENABLE_NESTING:-1}" == "1" ]; then + FEATURES="nesting=1" + fi + + # Keyctl for unprivileged containers (needed for Docker) + if [ "$CT_TYPE" == "1" ]; then + [ -n "$FEATURES" ] && FEATURES="$FEATURES," + FEATURES="${FEATURES}keyctl=1" + fi + + if [ "$ENABLE_FUSE" == "yes" ]; then + [ -n "$FEATURES" ] && FEATURES="$FEATURES," + FEATURES="${FEATURES}fuse=1" + fi + + # Build PCT_OPTIONS as string for export + TEMP_DIR=$(mktemp -d) + pushd "$TEMP_DIR" >/dev/null + local _func_url + if [ "$var_os" == "alpine" ]; then + _func_url="https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/alpine-install.func" + else + _func_url="https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/install.func" + fi + export FUNCTIONS_FILE_PATH="$(curl -fsSL "$_func_url")" + if [[ -z "$FUNCTIONS_FILE_PATH" || ${#FUNCTIONS_FILE_PATH} -lt 100 ]]; then + msg_error "Failed to download install functions from: $_func_url" + exit 115 + fi + + # Core exports for install.func + export DIAGNOSTICS="$DIAGNOSTICS" + export RANDOM_UUID="$RANDOM_UUID" + export EXECUTION_ID="$EXECUTION_ID" + export SESSION_ID="$SESSION_ID" + export CACHER="$APT_CACHER" + export CACHER_IP="$APT_CACHER_IP" + export tz="$timezone" + export APPLICATION="$APP" + export app="$NSAPP" + export PASSWORD="$PW" + export VERBOSE="$VERBOSE" + export SSH_ROOT="${SSH}" + export SSH_AUTHORIZED_KEY + export CTID="$CT_ID" + export CTTYPE="$CT_TYPE" + export ENABLE_FUSE="$ENABLE_FUSE" + export ENABLE_TUN="$ENABLE_TUN" + export PCT_OSTYPE="$var_os" + export PCT_OSVERSION="$var_version" + export PCT_DISK_SIZE="$DISK_SIZE" + export IPV6_METHOD="$IPV6_METHOD" + export ENABLE_GPU="$ENABLE_GPU" + + # DEV_MODE exports (optional, for debugging) + export BUILD_LOG="$BUILD_LOG" + export INSTALL_LOG="/root/.install-${SESSION_ID}.log" + + # Keep host-side logging on BUILD_LOG (not exported — invisible to container) + # Without this, get_active_logfile() would return INSTALL_LOG (a container path) + # and all host msg_info/msg_ok/msg_error would write to /root/.install-SESSION.log + # on the HOST instead of BUILD_LOG, causing incomplete telemetry logs. + _HOST_LOGFILE="$BUILD_LOG" + + export dev_mode="${dev_mode:-}" + export DEV_MODE_MOTD="${DEV_MODE_MOTD:-false}" + export DEV_MODE_KEEP="${DEV_MODE_KEEP:-false}" + export DEV_MODE_TRACE="${DEV_MODE_TRACE:-false}" + export DEV_MODE_PAUSE="${DEV_MODE_PAUSE:-false}" + export DEV_MODE_BREAKPOINT="${DEV_MODE_BREAKPOINT:-false}" + export DEV_MODE_LOGS="${DEV_MODE_LOGS:-false}" + export DEV_MODE_DRYRUN="${DEV_MODE_DRYRUN:-false}" + + # Build PCT_OPTIONS as multi-line string + PCT_OPTIONS_STRING=" -hostname $HN" + + # Only add -tags if TAGS is not empty + if [ -n "$TAGS" ]; then + PCT_OPTIONS_STRING="$PCT_OPTIONS_STRING + -tags $TAGS" + fi + + # Only add -features if FEATURES is not empty + if [ -n "$FEATURES" ]; then + PCT_OPTIONS_STRING=" -features $FEATURES +$PCT_OPTIONS_STRING" + fi + + # Add searchdomain if specified + if [ -n "$SD" ]; then + PCT_OPTIONS_STRING="$PCT_OPTIONS_STRING + $SD" + fi + + # Add nameserver if specified + if [ -n "$NS" ]; then + PCT_OPTIONS_STRING="$PCT_OPTIONS_STRING + $NS" + fi + + # Network configuration + PCT_OPTIONS_STRING="$PCT_OPTIONS_STRING + $NET_STRING + -onboot 1 + -cores $CORE_COUNT + -memory $RAM_SIZE + -unprivileged $CT_TYPE" + + # Protection flag (if var_protection was set) + if [ "${PROTECT_CT:-}" == "1" ] || [ "${PROTECT_CT:-}" == "yes" ]; then + PCT_OPTIONS_STRING="$PCT_OPTIONS_STRING + -protection 1" + fi + + # Timezone (map Etc/* to "host" as pct doesn't accept them) + if [ -n "${CT_TIMEZONE:-}" ]; then + local _pct_timezone="$CT_TIMEZONE" + [[ "$_pct_timezone" == Etc/* ]] && _pct_timezone="host" + PCT_OPTIONS_STRING="$PCT_OPTIONS_STRING + -timezone $_pct_timezone" + fi + + # Password (already formatted) + if [ -n "$PW" ]; then + PCT_OPTIONS_STRING="$PCT_OPTIONS_STRING + $PW" + fi + + # Export as string (this works, unlike arrays!) + export PCT_OPTIONS="$PCT_OPTIONS_STRING" + export TEMPLATE_STORAGE="${var_template_storage:-}" + export CONTAINER_STORAGE="${var_container_storage:-}" + + # Validate storage space only if CONTAINER_STORAGE is already set + # (Storage selection happens in create_lxc_container for some modes) + if [[ -n "$CONTAINER_STORAGE" ]]; then + msg_info "Validating storage space" + if ! validate_storage_space "$CONTAINER_STORAGE" "$DISK_SIZE" "no"; then + local free_space + free_space=$(pvesm status 2>/dev/null | awk -v s="$CONTAINER_STORAGE" '$1 == s { print $6 }') + local free_fmt + free_fmt=$(numfmt --to=iec --from-unit=1024 --suffix=B --format %.1f "$free_space" 2>/dev/null || echo "${free_space}KB") + msg_error "Not enough space on '$CONTAINER_STORAGE'. Required: ${DISK_SIZE}GB, Available: ${free_fmt}" + exit 214 + fi + msg_ok "Storage space validated" + fi + + create_lxc_container || exit $? + + # Transition to 'configuring' — container created, now setting up OS/userland + post_progress_to_api "configuring" + + LXC_CONFIG="/etc/pve/lxc/${CTID}.conf" + + # ============================================================================ + # GPU/USB PASSTHROUGH CONFIGURATION + # ============================================================================ + + # Check if GPU passthrough is enabled + # Returns true only if var_gpu is explicitly set to "yes" + # Can be set via: + # - Environment variable: var_gpu=yes bash -c "..." + # - CT script default: var_gpu="${var_gpu:-no}" + # - Advanced settings wizard + # - App defaults file: /usr/local/community-scripts/defaults/.vars + is_gpu_app() { + [[ "${var_gpu:-no}" == "yes" ]] && return 0 + return 1 + } + + # Detect all available GPU devices + detect_gpu_devices() { + INTEL_DEVICES=() + AMD_DEVICES=() + NVIDIA_DEVICES=() + + # Store PCI info to avoid multiple calls + # grep returns exit 1 when no match — use || true to prevent ERR trap + local pci_vga_info + pci_vga_info=$(lspci -nn 2>/dev/null | grep -E "VGA|Display|3D" || true) + + # No GPU-related PCI devices at all? Skip silently. + if [[ -z "$pci_vga_info" ]]; then + msg_debug "No VGA/Display/3D PCI devices found" + return 0 + fi + + # Check for Intel GPU - look for Intel vendor ID [8086] + if grep -q "\[8086:" <<<"$pci_vga_info"; then + msg_custom "🎮" "${BL}" "Detected Intel GPU" + if [[ -d /dev/dri ]]; then + for d in /dev/dri/renderD* /dev/dri/card*; do + [[ -e "$d" ]] && INTEL_DEVICES+=("$d") + done + fi + fi + + # Check for AMD GPU - look for AMD vendor IDs [1002] (AMD/ATI) or [1022] (AMD) + if grep -qE "\[1002:|\[1022:" <<<"$pci_vga_info"; then + msg_custom "🎮" "${RD}" "Detected AMD GPU" + if [[ -d /dev/dri ]]; then + # Only add if not already claimed by Intel + if [[ ${#INTEL_DEVICES[@]} -eq 0 ]]; then + for d in /dev/dri/renderD* /dev/dri/card*; do + [[ -e "$d" ]] && AMD_DEVICES+=("$d") + done + fi + fi + fi + + # Check for NVIDIA GPU - look for NVIDIA vendor ID [10de] + if grep -q "\[10de:" <<<"$pci_vga_info"; then + msg_custom "🎮" "${GN}" "Detected NVIDIA GPU" + + # Simple passthrough - just bind /dev/nvidia* devices if they exist + # Only include character devices (-c), skip directories like /dev/nvidia-caps + for d in /dev/nvidia*; do + [[ -c "$d" ]] && NVIDIA_DEVICES+=("$d") + done + # Also check for devices inside /dev/nvidia-caps/ directory + if [[ -d /dev/nvidia-caps ]]; then + for d in /dev/nvidia-caps/*; do + [[ -c "$d" ]] && NVIDIA_DEVICES+=("$d") + done + fi + + if [[ ${#NVIDIA_DEVICES[@]} -gt 0 ]]; then + msg_custom "🎮" "${GN}" "Found ${#NVIDIA_DEVICES[@]} NVIDIA device(s) for passthrough" + else + msg_warn "NVIDIA GPU detected via PCI but no /dev/nvidia* devices found" + msg_custom "ℹ️" "${YW}" "Skipping NVIDIA passthrough (host drivers may not be loaded)" + fi + fi + + # Debug output + msg_debug "Intel devices: ${INTEL_DEVICES[*]}" + msg_debug "AMD devices: ${AMD_DEVICES[*]}" + msg_debug "NVIDIA devices: ${NVIDIA_DEVICES[*]}" + } + + # Configure USB passthrough for privileged containers + configure_usb_passthrough() { + if [[ "$CT_TYPE" != "0" ]]; then + return 0 + fi + + msg_info "Configuring automatic USB passthrough (privileged container)" + cat <>"$LXC_CONFIG" +# Automatic USB passthrough (privileged container) +lxc.cgroup2.devices.allow: a +lxc.cap.drop: +lxc.cgroup2.devices.allow: c 188:* rwm +lxc.cgroup2.devices.allow: c 189:* rwm +lxc.mount.entry: /dev/serial/by-id dev/serial/by-id none bind,optional,create=dir +lxc.mount.entry: /dev/ttyUSB0 dev/ttyUSB0 none bind,optional,create=file +lxc.mount.entry: /dev/ttyUSB1 dev/ttyUSB1 none bind,optional,create=file +lxc.mount.entry: /dev/ttyACM0 dev/ttyACM0 none bind,optional,create=file +lxc.mount.entry: /dev/ttyACM1 dev/ttyACM1 none bind,optional,create=file +EOF + msg_ok "USB passthrough configured" + } + + # Configure GPU passthrough + configure_gpu_passthrough() { + # Skip if: + # GPU passthrough is enabled when var_gpu="yes": + # - Set via environment variable: var_gpu=yes bash -c "..." + # - Set in CT script: var_gpu="${var_gpu:-no}" + # - Enabled in advanced_settings wizard + # - Configured in app defaults file + if ! is_gpu_app "$APP"; then + return 0 + fi + + detect_gpu_devices + + # Count available GPU types + local gpu_count=0 + local available_gpus=() + + if [[ ${#INTEL_DEVICES[@]} -gt 0 ]]; then + available_gpus+=("INTEL") + gpu_count=$((gpu_count + 1)) + fi + + if [[ ${#AMD_DEVICES[@]} -gt 0 ]]; then + available_gpus+=("AMD") + gpu_count=$((gpu_count + 1)) + fi + + if [[ ${#NVIDIA_DEVICES[@]} -gt 0 ]]; then + available_gpus+=("NVIDIA") + gpu_count=$((gpu_count + 1)) + fi + + if [[ $gpu_count -eq 0 ]]; then + msg_custom "ℹ️" "${YW}" "No GPU devices found for passthrough" + return 0 + fi + + local selected_gpu="" + + if [[ $gpu_count -eq 1 ]]; then + # Automatic selection for single GPU + selected_gpu="${available_gpus[0]}" + msg_ok "Automatically configuring ${selected_gpu} GPU passthrough" + else + # Multiple GPUs - ask user + echo -e "\n${INFO} Multiple GPU types detected:" + for gpu in "${available_gpus[@]}"; do + echo " - $gpu" + done + read -rp "Which GPU type to passthrough? (${available_gpus[*]}): " selected_gpu >"$LXC_CONFIG" + dev_index=$((dev_index + 1)) + done + + export GPU_TYPE="$selected_gpu" + msg_ok "${selected_gpu} GPU passthrough configured (${#devices[@]} devices)" + ;; + + NVIDIA) + if [[ ${#NVIDIA_DEVICES[@]} -eq 0 ]]; then + msg_warn "No NVIDIA devices available for passthrough" + return 0 + fi + + # Use pct set for NVIDIA devices + local dev_index=0 + for dev in "${NVIDIA_DEVICES[@]}"; do + echo "dev${dev_index}: ${dev},gid=44" >>"$LXC_CONFIG" + dev_index=$((dev_index + 1)) + done + + export GPU_TYPE="NVIDIA" + msg_ok "NVIDIA GPU passthrough configured (${#NVIDIA_DEVICES[@]} devices) - install drivers in container if needed" + ;; + esac + } + + # Additional device passthrough + configure_additional_devices() { + # TUN device passthrough + if [ "$ENABLE_TUN" == "yes" ]; then + cat <>"$LXC_CONFIG" +lxc.cgroup2.devices.allow: c 10:200 rwm +lxc.mount.entry: /dev/net/tun dev/net/tun none bind,create=file +EOF + fi + + # Coral TPU passthrough + if [[ -e /dev/apex_0 ]]; then + msg_custom "🔌" "${BL}" "Detected Coral TPU - configuring passthrough" + echo "lxc.mount.entry: /dev/apex_0 dev/apex_0 none bind,optional,create=file" >>"$LXC_CONFIG" + fi + } + + # Execute pre-start configurations + configure_usb_passthrough + configure_gpu_passthrough + configure_additional_devices + + # Increase disk size for AMD ROCm runtime (~4GB extra needed) + if [[ "${GPU_TYPE:-}" == "AMD" ]]; then + local rocm_extra=4 + local new_disk_size=$((PCT_DISK_SIZE + rocm_extra)) + if pct resize "$CTID" rootfs "${new_disk_size}G" >/dev/null 2>&1; then + msg_ok "Disk resized ${PCT_DISK_SIZE}GB → ${new_disk_size}GB for ROCm" + else + msg_warn "Failed to resize disk for ROCm — installation may fail if space is insufficient" + fi + fi + + # ============================================================================ + # START CONTAINER AND INSTALL USERLAND + # ============================================================================ + + msg_info "Starting LXC Container" + pct start "$CTID" + + # Wait for container to be running + for i in {1..10}; do + if pct status "$CTID" | grep -q "status: running"; then + msg_ok "Started LXC Container" + break + fi + sleep 1 + if [ "$i" -eq 10 ]; then + local ct_status + ct_status=$(pct status "$CTID" 2>/dev/null || echo "unknown") + msg_error "LXC Container did not reach running state (status: ${ct_status})" + exit 117 + fi + done + + # Wait for network (skip for Alpine initially) + if [ "$var_os" != "alpine" ]; then + msg_info "Waiting for network in LXC container" + + # Wait for IP assignment (IPv4 or IPv6) + local ip_in_lxc="" + for i in {1..20}; do + # Try IPv4 first + ip_in_lxc=$(pct exec "$CTID" -- ip -4 addr show dev eth0 2>/dev/null | awk '/inet / {print $2}' | cut -d/ -f1) + # Fallback to IPv6 if IPv4 not available + if [ -z "$ip_in_lxc" ]; then + ip_in_lxc=$(pct exec "$CTID" -- ip -6 addr show dev eth0 scope global 2>/dev/null | awk '/inet6 / {print $2}' | cut -d/ -f1 | head -n1) + fi + [ -n "$ip_in_lxc" ] && break + sleep 1 + done + + if [ -z "$ip_in_lxc" ]; then + msg_error "No IP assigned to CT $CTID after 20s" + msg_custom "🔧" "${YW}" "Troubleshooting:" + echo " • Verify bridge ${BRG} exists and has connectivity" + echo " • Check if DHCP server is reachable (if using DHCP)" + echo " • Verify static IP configuration (if using static IP)" + echo " • Check Proxmox firewall rules" + echo " • If using Tailscale: Disable MagicDNS temporarily" + exit 118 + fi + + # Verify basic connectivity (ping test) + local ping_success=false + for retry in {1..3}; do + if pct exec "$CTID" -- ping -c 1 -W 2 1.1.1.1 &>/dev/null || + pct exec "$CTID" -- ping -c 1 -W 2 8.8.8.8 &>/dev/null || + pct exec "$CTID" -- ping6 -c 1 -W 2 2606:4700:4700::1111 &>/dev/null; then + ping_success=true + break + fi + sleep 2 + done + + if [ "$ping_success" = false ]; then + msg_warn "Network configured (IP: $ip_in_lxc) but connectivity test failed - installation will continue" + else + msg_ok "Network in LXC is reachable (ping)" + fi + fi + # Function to get correct GID inside container + get_container_gid() { + local group="$1" + local gid=$(pct exec "$CTID" -- getent group "$group" 2>/dev/null | cut -d: -f3) + echo "${gid:-44}" # Default to 44 if not found + } + + fix_gpu_gids + + # Fix Debian 13 LXC template bug where / is owned by nobody:nogroup + # This must be done from the host as unprivileged containers cannot chown / + local rootfs + rootfs=$(pct config "$CTID" | grep -E '^rootfs:' | sed 's/rootfs: //' | cut -d',' -f1) + if [[ -n "$rootfs" ]]; then + local mount_point="/var/lib/lxc/${CTID}/rootfs" + if [[ -d "$mount_point" ]] && [[ "$(stat -c '%U' "$mount_point")" != "root" ]]; then + chown root:root "$mount_point" 2>/dev/null || true + fi + fi + + # Continue with standard container setup + msg_info "Customizing LXC Container" + + # # Install GPU userland if configured + # if [[ "${ENABLE_VAAPI:-0}" == "1" ]]; then + # install_gpu_userland "VAAPI" + # fi + + # if [[ "${ENABLE_NVIDIA:-0}" == "1" ]]; then + # install_gpu_userland "NVIDIA" + # fi + + # Disable error trap for entire customization & install phase. + # All errors are handled explicitly — recovery menu shown on failure. + # Without this, customization errors (e.g. container stopped during base package + # install) would trigger error_handler() with a simple "Remove broken container?" + # prompt instead of the full recovery menu with retry/repair options. + set +Eeuo pipefail + trap - ERR + + local install_exit_code=0 + + # Continue with standard container setup + if [ "$var_os" == "alpine" ]; then + sleep 3 + pct exec "$CTID" -- /bin/sh -c 'cat </etc/apk/repositories +http://dl-cdn.alpinelinux.org/alpine/latest-stable/main +http://dl-cdn.alpinelinux.org/alpine/latest-stable/community +EOF' + pct exec "$CTID" -- ash -c "apk add bash newt curl openssh nano mc ncurses jq" >>"$BUILD_LOG" 2>&1 || { + msg_error "Failed to install base packages in Alpine container" + install_exit_code=1 + } + else + sleep 3 + LANG=${LANG:-en_US.UTF-8} + pct exec "$CTID" -- bash -c "sed -i \"/$LANG/ s/^# //\" /etc/locale.gen" + pct exec "$CTID" -- bash -c "locale_line=\$(grep -v '^#' /etc/locale.gen | grep -E '^[a-zA-Z]' | awk '{print \$1}' | head -n 1) && \ + echo LANG=\$locale_line >/etc/default/locale && \ + locale-gen >/dev/null && \ + export LANG=\$locale_line" + + if [[ -z "${tz:-}" ]]; then + tz=$(timedatectl show --property=Timezone --value 2>/dev/null || echo "UTC") + fi + [[ "${tz:-}" == Etc/* ]] && tz="UTC" # Normalize Etc/* to UTC for container setup + + if pct exec "$CTID" -- test -e "/usr/share/zoneinfo/$tz"; then + # Set timezone using symlink (Debian 13+ compatible) + # Create /etc/timezone for backwards compatibility with older scripts + pct exec "$CTID" -- bash -c "tz='$tz'; ln -sf \"/usr/share/zoneinfo/\$tz\" /etc/localtime && echo \"\$tz\" >/etc/timezone || true" + else + msg_warn "Skipping timezone setup – zone '$tz' not found in container" + fi + + pct exec "$CTID" -- bash -c "apt-get update 2>&1 && apt-get install -y sudo curl mc gnupg2 jq 2>&1" >>"$BUILD_LOG" 2>&1 || { + msg_error "apt-get base packages installation failed" + install_exit_code=1 + } + fi + + # Only continue with installation if customization succeeded + if [[ $install_exit_code -eq 0 ]]; then + msg_ok "Customized LXC Container" + + # Optional DNS override for retry scenarios (inside LXC, never on host) + if [[ "${DNS_RETRY_OVERRIDE:-false}" == "true" ]]; then + msg_info "Applying DNS retry override in LXC (8.8.8.8, 1.1.1.1)" + pct exec "$CTID" -- bash -c "printf 'nameserver 8.8.8.8\nnameserver 1.1.1.1\n' >/etc/resolv.conf" >/dev/null 2>&1 || true + msg_ok "DNS override applied in LXC" + fi + + # Install SSH keys + install_ssh_keys_into_ct + + # Start timer for duration tracking + start_install_timer + + # Run application installer + # Error handling already disabled above (before customization phase) + + # Signal handlers use this flag to stop the container on abort (SIGHUP/SIGINT/SIGTERM) + # Without this, SSH disconnects leave the container running as an orphan process + # that sends "configuring" status AFTER the host already reported "failed" + export CONTAINER_INSTALLING=true + + lxc-attach -n "$CTID" -- bash -c "$(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/install/${var_install}.sh)" + local lxc_exit=$? + + unset CONTAINER_INSTALLING + + # Keep error handling DISABLED during failure detection and recovery + # Re-enabling it here would cause any pct exec/pull failure to trigger + # error_handler() on the host, bypassing the recovery menu entirely + + # Check for error flag file in container (more reliable than lxc-attach exit code) + if [[ -n "${SESSION_ID:-}" ]]; then + local error_flag="/root/.install-${SESSION_ID}.failed" + if pct exec "$CTID" -- test -f "$error_flag" 2>/dev/null; then + install_exit_code=$(pct exec "$CTID" -- cat "$error_flag" 2>/dev/null || echo "1") + pct exec "$CTID" -- rm -f "$error_flag" 2>/dev/null || true + fi + fi + + # Fallback to lxc-attach exit code if no flag file + if [[ $install_exit_code -eq 0 && ${lxc_exit:-0} -ne 0 ]]; then + install_exit_code=${lxc_exit:-0} + fi + fi # end: if [[ $install_exit_code -eq 0 ]] (customization succeeded) + + # Installation or customization failed? + if [[ $install_exit_code -ne 0 ]]; then + # Prevent job-control signals from suspending the script during recovery. + # In non-interactive shells (bash -c), background processes (spinner) can + # trigger terminal-related signals that stop the entire process group. + # TSTP = Ctrl+Z, TTIN = bg read from tty, TTOU = bg write to tty (tostop) + trap '' TSTP TTIN TTOU + + msg_error "Installation failed in container ${CTID} (exit code: ${install_exit_code})" + + # Copy install log from container BEFORE API call so get_error_text() can read it + local build_log_copied=false + local install_log_copied=false + local combined_log="/tmp/${NSAPP:-lxc}-${CTID}-${SESSION_ID}.log" + + if [[ -n "$CTID" && -n "${SESSION_ID:-}" ]]; then + # Create combined log with header + { + echo "================================================================================" + echo "COMBINED INSTALLATION LOG - ${APP:-LXC}" + echo "Container ID: ${CTID}" + echo "Session ID: ${SESSION_ID}" + echo "Timestamp: $(date '+%Y-%m-%d %H:%M:%S')" + echo "================================================================================" + echo "" + } >"$combined_log" + + # Append BUILD_LOG (host-side creation log) if it exists + if [[ -f "${BUILD_LOG}" ]]; then + { + echo "================================================================================" + echo "PHASE 1: CONTAINER CREATION (Host)" + echo "================================================================================" + cat "${BUILD_LOG}" + echo "" + } >>"$combined_log" + build_log_copied=true + fi + + # Copy and append INSTALL_LOG from container + local temp_install_log="/tmp/.install-temp-${SESSION_ID}.log" + if pct pull "$CTID" "/root/.install-${SESSION_ID}.log" "$temp_install_log" 2>/dev/null; then + { + echo "================================================================================" + echo "PHASE 2: APPLICATION INSTALLATION (Container)" + echo "================================================================================" + cat "$temp_install_log" + echo "" + } >>"$combined_log" + rm -f "$temp_install_log" + install_log_copied=true + # Point INSTALL_LOG to combined log so get_full_log() finds it + INSTALL_LOG="$combined_log" + fi + fi + + # Report failure to telemetry API (now with log available on host) + # NOTE: Do NOT use msg_info/spinner here — the background spinner process + # causes SIGTSTP in non-interactive shells (bash -c "$(curl ...)"), which + # stops the entire process group and prevents the recovery dialog from appearing. + $STD echo -e "${TAB}⏳ Reporting failure to telemetry..." + post_update_to_api "failed" "$install_exit_code" + $STD echo -e "${TAB}${CM:-✔} Failure reported" + + # Defense-in-depth: Ensure error handling stays disabled during recovery. + # Some functions (e.g. silent/$STD) unconditionally re-enable set -Eeuo pipefail + # and trap 'error_handler' ERR. If any code path above called such a function, + # the grep/sed pipelines below would trigger error_handler on non-match (exit 1). + set +Eeuo pipefail + trap - ERR + + # Show combined log location + if [[ -n "$CTID" && -n "${SESSION_ID:-}" ]]; then + msg_custom "📋" "${YW}" "Installation log: ${combined_log}" + fi + + # Dev mode: Keep container or open breakpoint shell + if [[ "${DEV_MODE_KEEP:-false}" == "true" ]]; then + msg_dev "Keep mode active - container ${CTID} preserved" + return 0 + elif [[ "${DEV_MODE_BREAKPOINT:-false}" == "true" ]]; then + msg_dev "Breakpoint mode - opening shell in container ${CTID}" + echo -e "${YW}Type 'exit' to return to host${CL}" + pct enter "$CTID" + echo "" + echo -en "${YW}Container ${CTID} still running. Remove now? (y/N): ${CL}" + if read -r response /dev/null || true + pct destroy "$CTID" &>/dev/null || true + msg_ok "Container ${CTID} removed" + else + msg_dev "Container ${CTID} kept for debugging" + fi + exit $install_exit_code + fi + + # Prompt user for cleanup with 60s timeout + echo "" + + # Detect error type for smart recovery options + local is_oom=false + local is_network_issue=false + local is_apt_issue=false + local is_cmd_not_found=false + local is_disk_full=false + local error_explanation="" + if declare -f explain_exit_code >/dev/null 2>&1; then + error_explanation="$(explain_exit_code "$install_exit_code")" + fi + + # OOM detection: exit codes 134 (SIGABRT/heap), 137 (SIGKILL/OOM), 243 (Node.js heap) + if [[ $install_exit_code -eq 134 || $install_exit_code -eq 137 || $install_exit_code -eq 243 ]]; then + is_oom=true + fi + + # APT/DPKG detection: exit codes 100-102 (APT), 255 (DPKG with log evidence) + case "$install_exit_code" in + 100 | 101 | 102) is_apt_issue=true ;; + 255) + if [[ -f "$combined_log" ]] && grep -qiE 'dpkg|apt-get|apt\.conf|broken packages|unmet dependencies|E: Sub-process|E: Failed' "$combined_log"; then + is_apt_issue=true + fi + ;; + esac + + # Disk full / ENOSPC detection: errno -28 (ENOSPC), exit 228 (custom handler), exit 23 (curl write error) + if [[ $install_exit_code -eq 228 || $install_exit_code -eq 23 ]]; then + is_disk_full=true + fi + if [[ -f "$combined_log" ]] && grep -qiE 'ENOSPC|no space left on device|No space left on device|Disk quota exceeded|errno -28' "$combined_log"; then + is_disk_full=true + fi + + # Command not found detection + if [[ $install_exit_code -eq 127 ]]; then + is_cmd_not_found=true + fi + + # Network-related detection (curl/apt/git fetch failures and transient network issues) + case "$install_exit_code" in + 6 | 7 | 22 | 28 | 35 | 52 | 56 | 57 | 75 | 78) is_network_issue=true ;; + 100) + # APT can fail due to network (Failed to fetch) + if [[ -f "$combined_log" ]] && grep -qiE 'Failed to fetch|Could not resolve|Connection failed|Network is unreachable|Temporary failure resolving' "$combined_log"; then + is_network_issue=true + fi + ;; + 128) + if [[ -f "$combined_log" ]] && grep -qiE 'RPC failed|early EOF|fetch-pack|HTTP/2 stream|Could not resolve host|Temporary failure resolving|Failed to fetch|Connection reset|Network is unreachable' "$combined_log"; then + is_network_issue=true + fi + ;; + esac + + # Exit 1 subclassification: analyze logs to identify actual root cause + # Many exit 1 errors are actually APT, OOM, network, or command-not-found issues + if [[ $install_exit_code -eq 1 && -f "$combined_log" ]]; then + if grep -qiE 'E: Unable to|E: Package|E: Failed to fetch|dpkg.*error|broken packages|unmet dependencies|dpkg --configure -a' "$combined_log"; then + is_apt_issue=true + fi + if grep -qiE 'Cannot allocate memory|Out of memory|oom-killer|Killed process|JavaScript heap' "$combined_log"; then + is_oom=true + fi + if grep -qiE 'Could not resolve|DNS|Connection refused|Network is unreachable|No route to host|Temporary failure resolving|Failed to fetch' "$combined_log"; then + is_network_issue=true + fi + if grep -qiE ': command not found|No such file or directory.*/s?bin/' "$combined_log"; then + is_cmd_not_found=true + fi + if grep -qiE 'ENOSPC|no space left on device|Disk quota exceeded|errno -28' "$combined_log"; then + is_disk_full=true + fi + fi + + # Show error explanation if available + if [[ -n "$error_explanation" ]]; then + echo -e "${TAB}${RD}Error: ${error_explanation}${CL}" + echo "" + fi + + # Show specific hints for known error types + if [[ $install_exit_code -eq 10 ]]; then + echo -e "${TAB}${INFO} This error usually means the container needs ${GN}privileged${CL} mode or Docker/nesting support." + echo -e "${TAB}${INFO} Recreate with: Advanced Install → Container Type: ${GN}Privileged${CL}" + echo "" + fi + + if [[ $install_exit_code -eq 125 || $install_exit_code -eq 126 ]]; then + echo -e "${TAB}${INFO} The command exists but cannot be executed. This may be a ${GN}permission${CL} issue." + echo -e "${TAB}${INFO} If using Docker, ensure the container is ${GN}privileged${CL} or has correct permissions." + echo "" + fi + + if [[ "$is_disk_full" == true ]]; then + echo -e "${TAB}${INFO} The container ran out of disk space during installation (${GN}ENOSPC${CL})." + echo -e "${TAB}${INFO} Current disk size: ${GN}${DISK_SIZE} GB${CL}. A rebuild with doubled disk may resolve this." + echo "" + fi + + if [[ "$is_cmd_not_found" == true ]]; then + local missing_cmd="" + if [[ -f "$combined_log" ]]; then + missing_cmd=$(grep -oiE '[a-zA-Z0-9_.-]+: command not found' "$combined_log" 2>/dev/null | tail -1 | sed 's/: command not found//') || true + fi + if [[ -n "$missing_cmd" ]]; then + echo -e "${TAB}${INFO} Missing command: ${GN}${missing_cmd}${CL}" + fi + echo "" + fi + + # Build recovery menu based on error type + echo -e "${YW}What would you like to do?${CL}" + echo "" + echo -e " ${GN}1)${CL} Remove container and exit" + echo -e " ${GN}2)${CL} Keep container for debugging" + echo -e " ${GN}3)${CL} Retry with verbose mode (full rebuild)" + + local next_option=4 + local APT_OPTION="" OOM_OPTION="" DNS_OPTION="" DISK_OPTION="" + + if [[ "$is_apt_issue" == true ]]; then + if [[ "$var_os" == "alpine" ]]; then + echo -e " ${GN}${next_option})${CL} Repair APK state and re-run install (in-place)" + else + echo -e " ${GN}${next_option})${CL} Repair APT/DPKG state and re-run install (in-place)" + fi + APT_OPTION=$next_option + next_option=$((next_option + 1)) + fi + + if [[ "$is_oom" == true ]]; then + local recovery_attempt="${RECOVERY_ATTEMPT:-0}" + if [[ $recovery_attempt -lt 2 ]]; then + local new_ram=$((RAM_SIZE * 2)) + local new_cpu=$((CORE_COUNT * 2)) + echo -e " ${GN}${next_option})${CL} Retry with more resources (RAM: ${RAM_SIZE}→${new_ram} MiB, CPU: ${CORE_COUNT}→${new_cpu} cores)" + OOM_OPTION=$next_option + next_option=$((next_option + 1)) + else + echo -e " ${DGN}-)${CL} ${DGN}OOM retry exhausted (already retried ${recovery_attempt}x)${CL}" + fi + fi + + if [[ "$is_disk_full" == true ]]; then + local disk_recovery_attempt="${DISK_RECOVERY_ATTEMPT:-0}" + if [[ $disk_recovery_attempt -lt 2 ]]; then + local new_disk=$((DISK_SIZE * 2)) + echo -e " ${GN}${next_option})${CL} Retry with more disk space (Disk: ${DISK_SIZE}→${new_disk} GB)" + DISK_OPTION=$next_option + next_option=$((next_option + 1)) + else + echo -e " ${DGN}-)${CL} ${DGN}Disk resize retry exhausted (already retried ${disk_recovery_attempt}x)${CL}" + fi + fi + + if [[ "$is_network_issue" == true ]]; then + echo -e " ${GN}${next_option})${CL} Retry with DNS override in LXC (8.8.8.8 / 1.1.1.1)" + DNS_OPTION=$next_option + next_option=$((next_option + 1)) + fi + + local max_option=$((next_option - 1)) + + echo "" + echo -en "${YW}Select option [1-${max_option}] (default: 1, auto-remove in 60s): ${CL}" + + local response="" + if read -t 60 -r response; then + case "${response:-1}" in + 1) + # Remove container + echo -e "\n${TAB}${HOLD}${YW}Removing container ${CTID}${CL}" + pct stop "$CTID" &>/dev/null || true + pct destroy "$CTID" &>/dev/null || true + echo -e "${BFR}${CM}${GN}Container ${CTID} removed${CL}" + ;; + 2) + echo -e "\n${TAB}${YW}Container ${CTID} kept for debugging${CL}" + # Dev mode: Setup MOTD/SSH for debugging access to broken container + if [[ "${DEV_MODE_MOTD:-false}" == "true" ]]; then + echo -e "${TAB}${HOLD}${DGN}Setting up MOTD and SSH for debugging...${CL}" + if pct exec "$CTID" -- bash -c " + source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/install.func) + declare -f motd_ssh >/dev/null 2>&1 && motd_ssh || true + " >/dev/null 2>&1; then + local ct_ip=$(pct exec "$CTID" ip a s dev eth0 2>/dev/null | awk '/inet / {print $2}' | cut -d/ -f1) + echo -e "${BFR}${CM}${GN}MOTD/SSH ready - SSH into container: ssh root@${ct_ip}${CL}" + fi + fi + exit $install_exit_code + ;; + 3) + # Retry with verbose mode (full rebuild) + echo -e "\n${TAB}${HOLD}${YW}Removing container ${CTID} for rebuild...${CL}" + pct stop "$CTID" &>/dev/null || true + pct destroy "$CTID" &>/dev/null || true + echo -e "${BFR}${CM}${GN}Container ${CTID} removed${CL}" + echo "" + # Get new container ID + local old_ctid="$CTID" + export CTID=$(get_valid_container_id "$CTID") + export VERBOSE="yes" + export var_verbose="yes" + + # Show rebuild summary + echo -e "${YW}Rebuilding with preserved settings:${CL}" + echo -e " Container ID: ${old_ctid} → ${CTID}" + echo -e " RAM: ${RAM_SIZE} MiB | CPU: ${CORE_COUNT} cores | Disk: ${DISK_SIZE} GB" + echo -e " Network: ${NET:-dhcp} | Bridge: ${BRG:-vmbr0}" + echo -e " Verbose: ${GN}enabled${CL}" + echo "" + msg_info "Restarting installation..." + # Re-run build_container + build_container + return $? + ;; + *) + # Handle dynamic smart recovery options via named option variables + local handled=false + + if [[ -n "${APT_OPTION}" && "${response}" == "${APT_OPTION}" ]]; then + # Package manager in-place repair: fix broken state and re-run install script + handled=true + if [[ "$var_os" == "alpine" ]]; then + echo -e "\n${TAB}${HOLD}${YW}Repairing APK state in container ${CTID}...${CL}" + pct exec "$CTID" -- ash -c " + apk fix 2>/dev/null || true + apk cache clean 2>/dev/null || true + apk update 2>/dev/null || true + " >/dev/null 2>&1 || true + echo -e "${BFR}${CM}${GN}APK state repaired in container ${CTID}${CL}" + else + echo -e "\n${TAB}${HOLD}${YW}Repairing APT/DPKG state in container ${CTID}...${CL}" + pct exec "$CTID" -- bash -c " + DEBIAN_FRONTEND=noninteractive dpkg --configure -a 2>/dev/null || true + apt-get -f install -y 2>/dev/null || true + apt-get clean 2>/dev/null + apt-get update 2>/dev/null || true + " >/dev/null 2>&1 || true + echo -e "${BFR}${CM}${GN}APT/DPKG state repaired in container ${CTID}${CL}" + fi + echo "" + export VERBOSE="yes" + export var_verbose="yes" + + echo -e "${YW}Re-running installation in existing container ${CTID}:${CL}" + echo -e " RAM: ${RAM_SIZE} MiB | CPU: ${CORE_COUNT} cores | Disk: ${DISK_SIZE} GB" + echo -e " Verbose: ${GN}enabled${CL}" + echo "" + msg_info "Re-running installation script..." + + # Re-run install script in existing container (don't destroy/recreate) + set +Eeuo pipefail + trap - ERR + lxc-attach -n "$CTID" -- bash -c "$(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/install/${var_install}.sh)" + local apt_retry_exit=$? + set -Eeuo pipefail + trap 'error_handler' ERR + + # Check for error flag from retry + local apt_retry_code=0 + if [[ -n "${SESSION_ID:-}" ]]; then + local retry_error_flag="/root/.install-${SESSION_ID}.failed" + if pct exec "$CTID" -- test -f "$retry_error_flag" 2>/dev/null; then + apt_retry_code=$(pct exec "$CTID" -- cat "$retry_error_flag" 2>/dev/null || echo "1") + pct exec "$CTID" -- rm -f "$retry_error_flag" 2>/dev/null || true + fi + fi + + if [[ $apt_retry_code -eq 0 && $apt_retry_exit -ne 0 ]]; then + apt_retry_code=$apt_retry_exit + fi + + if [[ $apt_retry_code -eq 0 ]]; then + msg_ok "Installation completed successfully after APT repair!" + post_update_to_api "done" "0" "force" + return 0 + else + msg_error "Installation still failed after APT repair (exit code: ${apt_retry_code})" + install_exit_code=$apt_retry_code + fi + fi + + if [[ -n "${OOM_OPTION}" && "${response}" == "${OOM_OPTION}" ]]; then + # Retry with doubled resources + handled=true + echo -e "\n${TAB}${HOLD}${YW}Removing container ${CTID} for rebuild with more resources...${CL}" + pct stop "$CTID" &>/dev/null || true + pct destroy "$CTID" &>/dev/null || true + echo -e "${BFR}${CM}${GN}Container ${CTID} removed${CL}" + echo "" + local old_ctid="$CTID" + local old_ram="$RAM_SIZE" + local old_cpu="$CORE_COUNT" + export CTID=$(get_valid_container_id "$CTID") + export RAM_SIZE=$((RAM_SIZE * 2)) + export CORE_COUNT=$((CORE_COUNT * 2)) + export var_ram="$RAM_SIZE" + export var_cpu="$CORE_COUNT" + export VERBOSE="yes" + export var_verbose="yes" + export RECOVERY_ATTEMPT=$((${RECOVERY_ATTEMPT:-0} + 1)) + + echo -e "${YW}Rebuilding with increased resources (attempt ${RECOVERY_ATTEMPT}/2):${CL}" + echo -e " Container ID: ${old_ctid} → ${CTID}" + echo -e " RAM: ${old_ram} → ${GN}${RAM_SIZE}${CL} MiB (x2)" + echo -e " CPU: ${old_cpu} → ${GN}${CORE_COUNT}${CL} cores (x2)" + echo -e " Disk: ${DISK_SIZE} GB | Network: ${NET:-dhcp} | Bridge: ${BRG:-vmbr0}" + echo -e " Verbose: ${GN}enabled${CL}" + echo "" + msg_info "Restarting installation..." + build_container + return $? + fi + + if [[ -n "${DISK_OPTION}" && "${response}" == "${DISK_OPTION}" ]]; then + # Retry with doubled disk size + handled=true + echo -e "\n${TAB}${HOLD}${YW}Removing container ${CTID} for rebuild with more disk space...${CL}" + pct stop "$CTID" &>/dev/null || true + pct destroy "$CTID" &>/dev/null || true + echo -e "${BFR}${CM}${GN}Container ${CTID} removed${CL}" + echo "" + local old_ctid="$CTID" + local old_disk="$DISK_SIZE" + export CTID=$(get_valid_container_id "$CTID") + export DISK_SIZE=$((DISK_SIZE * 2)) + export var_disk="$DISK_SIZE" + export VERBOSE="yes" + export var_verbose="yes" + export DISK_RECOVERY_ATTEMPT=$((${DISK_RECOVERY_ATTEMPT:-0} + 1)) + + echo -e "${YW}Rebuilding with increased disk space (attempt ${DISK_RECOVERY_ATTEMPT}/2):${CL}" + echo -e " Container ID: ${old_ctid} → ${CTID}" + echo -e " Disk: ${old_disk} → ${GN}${DISK_SIZE}${CL} GB (x2)" + echo -e " RAM: ${RAM_SIZE} MiB | CPU: ${CORE_COUNT} cores" + echo -e " Network: ${NET:-dhcp} | Bridge: ${BRG:-vmbr0}" + echo -e " Verbose: ${GN}enabled${CL}" + echo "" + msg_info "Restarting installation..." + build_container + return $? + fi + + if [[ -n "${DNS_OPTION}" && "${response}" == "${DNS_OPTION}" ]]; then + # Retry with DNS override in LXC + handled=true + echo -e "\n${TAB}${HOLD}${YW}Removing container ${CTID} for rebuild with DNS override...${CL}" + pct stop "$CTID" &>/dev/null || true + pct destroy "$CTID" &>/dev/null || true + echo -e "${BFR}${CM}${GN}Container ${CTID} removed${CL}" + echo "" + local old_ctid="$CTID" + export CTID=$(get_valid_container_id "$CTID") + export DNS_RETRY_OVERRIDE="true" + export VERBOSE="yes" + export var_verbose="yes" + + echo -e "${YW}Rebuilding with DNS override in LXC:${CL}" + echo -e " Container ID: ${old_ctid} → ${CTID}" + echo -e " DNS: ${GN}8.8.8.8, 1.1.1.1${CL} (inside LXC only)" + echo -e " Verbose: ${GN}enabled${CL}" + echo "" + msg_info "Restarting installation..." + build_container + return $? + fi + + if [[ "$handled" == false ]]; then + echo -e "\n${TAB}${YW}Invalid option. Container ${CTID} kept.${CL}" + exit $install_exit_code + fi + ;; + esac + else + # Timeout - auto-remove + echo "" + msg_info "No response - removing container ${CTID}" + pct stop "$CTID" &>/dev/null || true + pct destroy "$CTID" &>/dev/null || true + msg_ok "Container ${CTID} removed" + fi + + # Force one final status update attempt after cleanup + # This ensures status is updated even if the first attempt failed (e.g., HTTP 400) + $STD echo -e "${TAB}⏳ Finalizing telemetry report..." + post_update_to_api "failed" "$install_exit_code" "force" + $STD echo -e "${TAB}${CM:-✔} Telemetry finalized" + + # Restore default job-control signal handling before exit + trap - TSTP TTIN TTOU + exit $install_exit_code + fi + + # Re-enable error handling after successful install or recovery menu completion + set -Eeuo pipefail + trap 'error_handler' ERR +} + +destroy_lxc() { + if [[ -z "$CT_ID" ]]; then + msg_error "No CT_ID found. Nothing to remove." + return 1 + fi + + # Abort on Ctrl-C / Ctrl-D / ESC + trap 'echo; msg_error "Aborted by user (SIGINT/SIGQUIT)"; return 130' INT QUIT + + local prompt + if ! read -rp "Remove this Container? " prompt /dev/null && pct destroy "$CT_ID" &>/dev/null; then + msg_ok "Removed Container $CT_ID" + else + msg_error "Failed to remove Container $CT_ID" + return 1 + fi + ;; + "" | n | no) + msg_custom "ℹ️" "${BL}" "Container was not removed." + ;; + *) + msg_warn "Invalid response. Container was not removed." + ;; + esac +} + +# ------------------------------------------------------------------------------ +# Storage discovery / selection helpers +# ------------------------------------------------------------------------------ +resolve_storage_preselect() { + local class="$1" preselect="$2" required_content="" + case "$class" in + template) required_content="vztmpl" ;; + container) required_content="rootdir" ;; + *) return 1 ;; + esac + [[ -z "$preselect" ]] && return 1 + if ! pvesm status -content "$required_content" | awk 'NR>1{print $1}' | grep -qx -- "$preselect"; then + msg_warn "Preselected storage '${preselect}' does not support content '${required_content}' (or not found)" + return 1 + fi + + local line total used free + line="$(pvesm status | awk -v s="$preselect" 'NR>1 && $1==s {print $0}')" + if [[ -z "$line" ]]; then + STORAGE_INFO="n/a" + else + total="$(awk '{print $4}' <<<"$line")" + used="$(awk '{print $5}' <<<"$line")" + free="$(awk '{print $6}' <<<"$line")" + local total_h used_h free_h + if command -v numfmt >/dev/null 2>&1; then + total_h="$(numfmt --to=iec --from-unit=1024 --suffix=B --format %.1f "$total" 2>/dev/null || echo "$total")" + used_h="$(numfmt --to=iec --from-unit=1024 --suffix=B --format %.1f "$used" 2>/dev/null || echo "$used")" + free_h="$(numfmt --to=iec --from-unit=1024 --suffix=B --format %.1f "$free" 2>/dev/null || echo "$free")" + STORAGE_INFO="Free: ${free_h} Used: ${used_h}" + else + STORAGE_INFO="Free: ${free} Used: ${used}" + fi + fi + STORAGE_RESULT="$preselect" + return 0 +} + +fix_gpu_gids() { + if [[ -z "${GPU_TYPE:-}" ]]; then + return 0 + fi + + msg_info "Detecting and setting correct GPU group IDs" + + # Get actual GIDs from container + local video_gid=$(pct exec "$CTID" -- sh -c "getent group video 2>/dev/null | cut -d: -f3") + local render_gid=$(pct exec "$CTID" -- sh -c "getent group render 2>/dev/null | cut -d: -f3") + + # Create groups if they don't exist + if [[ -z "$video_gid" ]]; then + pct exec "$CTID" -- sh -c "groupadd -r video 2>/dev/null || true" >/dev/null 2>&1 + video_gid=$(pct exec "$CTID" -- sh -c "getent group video 2>/dev/null | cut -d: -f3") + [[ -z "$video_gid" ]] && video_gid="44" + fi + + if [[ -z "$render_gid" ]]; then + pct exec "$CTID" -- sh -c "groupadd -r render 2>/dev/null || true" >/dev/null 2>&1 + render_gid=$(pct exec "$CTID" -- sh -c "getent group render 2>/dev/null | cut -d: -f3") + [[ -z "$render_gid" ]] && render_gid="104" + fi + + # Stop container to update config + pct stop "$CTID" >/dev/null 2>&1 + sleep 1 + + # Update dev entries with correct GIDs + sed -i.bak -E "s|(dev[0-9]+: /dev/dri/renderD[0-9]+),gid=[0-9]+|\1,gid=${render_gid}|g" "$LXC_CONFIG" + sed -i -E "s|(dev[0-9]+: /dev/dri/card[0-9]+),gid=[0-9]+|\1,gid=${video_gid}|g" "$LXC_CONFIG" + + # Restart container + pct start "$CTID" >/dev/null 2>&1 + sleep 2 + + msg_ok "GPU passthrough configured (video:${video_gid}, render:${render_gid})" + + # For privileged containers: also fix permissions inside container + if [[ "$CT_TYPE" == "0" ]]; then + pct exec "$CTID" -- sh -c " + if [ -d /dev/dri ]; then + for dev in /dev/dri/*; do + if [ -e \"\$dev\" ]; then + case \"\$dev\" in + *renderD*) chgrp ${render_gid} \"\$dev\" 2>/dev/null || true ;; + *) chgrp ${video_gid} \"\$dev\" 2>/dev/null || true ;; + esac + chmod 660 \"\$dev\" 2>/dev/null || true + fi + done + fi + " >/dev/null 2>&1 + fi +} + +check_storage_support() { + local CONTENT="$1" VALID=0 + while IFS= read -r line; do + local STORAGE_NAME + STORAGE_NAME=$(awk '{print $1}' <<<"$line") + [[ -n "$STORAGE_NAME" ]] && VALID=1 + done < <(pvesm status -content "$CONTENT" 2>/dev/null | awk 'NR>1') + [[ $VALID -eq 1 ]] +} + +select_storage() { + local CLASS=$1 CONTENT CONTENT_LABEL + case $CLASS in + container) + CONTENT='rootdir' + CONTENT_LABEL='Container' + ;; + template) + CONTENT='vztmpl' + CONTENT_LABEL='Container template' + ;; + iso) + CONTENT='iso' + CONTENT_LABEL='ISO image' + ;; + images) + CONTENT='images' + CONTENT_LABEL='VM Disk image' + ;; + backup) + CONTENT='backup' + CONTENT_LABEL='Backup' + ;; + snippets) + CONTENT='snippets' + CONTENT_LABEL='Snippets' + ;; + *) + msg_error "Invalid storage class '$CLASS'" + return 1 + ;; + esac + + declare -A STORAGE_MAP + local -a MENU=() + local COL_WIDTH=0 + + while read -r TAG TYPE _ TOTAL USED FREE _; do + [[ -n "$TAG" && -n "$TYPE" ]] || continue + local DISPLAY="${TAG} (${TYPE})" + local USED_FMT=$(numfmt --to=iec --from-unit=1024 --format %.1f <<<"$USED") + local FREE_FMT=$(numfmt --to=iec --from-unit=1024 --format %.1f <<<"$FREE") + local INFO="Free: ${FREE_FMT}B Used: ${USED_FMT}B" + STORAGE_MAP["$DISPLAY"]="$TAG" + MENU+=("$DISPLAY" "$INFO" "OFF") + ((${#DISPLAY} > COL_WIDTH)) && COL_WIDTH=${#DISPLAY} + done < <(pvesm status -content "$CONTENT" | awk 'NR>1') + + if [[ ${#MENU[@]} -eq 0 ]]; then + msg_error "No storage found for content type '$CONTENT'." + return 2 + fi + + if [[ $((${#MENU[@]} / 3)) -eq 1 ]]; then + STORAGE_RESULT="${STORAGE_MAP[${MENU[0]}]}" + STORAGE_INFO="${MENU[1]}" + return 0 + fi + + local WIDTH=$((COL_WIDTH + 42)) + while true; do + local DISPLAY_SELECTED + DISPLAY_SELECTED=$(whiptail --backtitle "Proxmox VE Helper Scripts" \ + --title "Storage Pools" \ + --radiolist "Which storage pool for ${CONTENT_LABEL,,}?\n(Spacebar to select)" \ + 16 "$WIDTH" 6 "${MENU[@]}" 3>&1 1>&2 2>&3) || { exit_script; } + + DISPLAY_SELECTED=$(sed 's/[[:space:]]*$//' <<<"$DISPLAY_SELECTED") + if [[ -z "$DISPLAY_SELECTED" || -z "${STORAGE_MAP[$DISPLAY_SELECTED]+_}" ]]; then + whiptail --msgbox "No valid storage selected. Please try again." 8 58 + continue + fi + STORAGE_RESULT="${STORAGE_MAP[$DISPLAY_SELECTED]}" + for ((i = 0; i < ${#MENU[@]}; i += 3)); do + if [[ "${MENU[$i]}" == "$DISPLAY_SELECTED" ]]; then + STORAGE_INFO="${MENU[$i + 1]}" + break + fi + done + + # Validate storage space for container storage + if [[ "$CLASS" == "container" && -n "${DISK_SIZE:-}" ]]; then + validate_storage_space "$STORAGE_RESULT" "$DISK_SIZE" "yes" + # Continue even if validation fails - user was warned + fi + + return 0 + done +} + +# ------------------------------------------------------------------------------ +# validate_storage_space() +# +# - Validates if storage has enough free space for container +# - Takes storage name and required size in GB +# - Returns 0 if enough space, 1 if not enough, 2 if storage unavailable +# - Can optionally show whiptail warning +# - Handles all storage types: dir, lvm, lvmthin, zfs, nfs, cifs, etc. +# ------------------------------------------------------------------------------ +validate_storage_space() { + local storage="$1" + local required_gb="${2:-8}" + local show_dialog="${3:-no}" + + # Get full storage line from pvesm status + local storage_line + storage_line=$(pvesm status 2>/dev/null | awk -v s="$storage" '$1 == s {print $0}') + + # Check if storage exists and is active + if [[ -z "$storage_line" ]]; then + [[ "$show_dialog" == "yes" ]] && whiptail --msgbox "⚠️ Warning: Storage '$storage' not found!\n\nThe storage may be unavailable or disabled." 10 60 + return 2 + fi + + # Check storage status (column 3) + local status + status=$(awk '{print $3}' <<<"$storage_line") + if [[ "$status" == "disabled" ]]; then + [[ "$show_dialog" == "yes" ]] && whiptail --msgbox "⚠️ Warning: Storage '$storage' is disabled!\n\nPlease enable the storage first." 10 60 + return 2 + fi + + # Get storage type and free space (column 6) + local storage_type storage_free + storage_type=$(awk '{print $2}' <<<"$storage_line") + storage_free=$(awk '{print $6}' <<<"$storage_line") + + # Some storage types (like PBS, iSCSI) don't report size info + # In these cases, skip space validation + if [[ -z "$storage_free" || "$storage_free" == "0" ]]; then + # Silent pass for storages without size info + return 0 + fi + + local required_kb=$((required_gb * 1024 * 1024)) + local free_gb_fmt + free_gb_fmt=$(numfmt --to=iec --from-unit=1024 --suffix=B --format %.1f "$storage_free" 2>/dev/null || echo "${storage_free}KB") + + if [[ "$storage_free" -lt "$required_kb" ]]; then + if [[ "$show_dialog" == "yes" ]]; then + whiptail --msgbox "⚠️ Warning: Storage '$storage' may not have enough space!\n\nStorage Type: ${storage_type}\nRequired: ${required_gb}GB\nAvailable: ${free_gb_fmt}\n\nYou can continue, but creation might fail." 14 70 + fi + return 1 + fi + + return 0 +} + +# ============================================================================== +# SECTION 8: CONTAINER CREATION +# ============================================================================== + +# ------------------------------------------------------------------------------ +# create_lxc_container() +# +# - Main function for creating LXC containers +# - Handles all phases: validation, template discovery, container creation, +# network config, storage, etc. +# - Extensive error checking with detailed exit codes +# ------------------------------------------------------------------------------ +create_lxc_container() { + # ------------------------------------------------------------------------------ + # Optional verbose mode (debug tracing) + # ------------------------------------------------------------------------------ + if [[ "${CREATE_LXC_VERBOSE:-no}" == "yes" ]]; then set -x; fi + + # ------------------------------------------------------------------------------ + # Helpers (dynamic versioning / template parsing) + # ------------------------------------------------------------------------------ + pkg_ver() { dpkg-query -W -f='${Version}\n' "$1" 2>/dev/null || echo ""; } + pkg_cand() { apt-cache policy "$1" 2>/dev/null | awk '/Candidate:/ {print $2}'; } + + ver_ge() { dpkg --compare-versions "$1" ge "$2"; } + ver_gt() { dpkg --compare-versions "$1" gt "$2"; } + ver_lt() { dpkg --compare-versions "$1" lt "$2"; } + + # Extract Debian OS minor from template name: debian-13-standard_13.1-1_amd64.tar.zst => "13.1" + parse_template_osver() { sed -n 's/.*_\([0-9][0-9]*\(\.[0-9]\+\)\?\)-.*/\1/p' <<<"$1"; } + + # Offer upgrade for pve-container/lxc-pve if candidate > installed; optional auto-retry pct create + # Returns: + # 0 = no upgrade needed + # 1 = upgraded (and if do_retry=yes and retry succeeded, creation done) + # 2 = user declined + # 3 = upgrade attempted but failed OR retry failed + offer_lxc_stack_upgrade_and_maybe_retry() { + local do_retry="${1:-no}" # yes|no + local _pvec_i _pvec_c _lxcp_i _lxcp_c need=0 + + _pvec_i="$(pkg_ver pve-container)" + _lxcp_i="$(pkg_ver lxc-pve)" + _pvec_c="$(pkg_cand pve-container)" + _lxcp_c="$(pkg_cand lxc-pve)" + + if [[ -n "$_pvec_c" && "$_pvec_c" != "none" ]]; then + ver_gt "$_pvec_c" "${_pvec_i:-0}" && need=1 + fi + if [[ -n "$_lxcp_c" && "$_lxcp_c" != "none" ]]; then + ver_gt "$_lxcp_c" "${_lxcp_i:-0}" && need=1 + fi + if [[ $need -eq 0 ]]; then + msg_debug "No newer candidate for pve-container/lxc-pve (installed=$_pvec_i/$_lxcp_i, cand=$_pvec_c/$_lxcp_c)" + return 0 + fi + + msg_info "An update for the Proxmox LXC stack is available" + echo " pve-container: installed=${_pvec_i:-n/a} candidate=${_pvec_c:-n/a}" + echo " lxc-pve : installed=${_lxcp_i:-n/a} candidate=${_lxcp_c:-n/a}" + echo + read -rp "Do you want to upgrade now? [y/N] " _ans >"$LOGFILE" 2>&1; then + msg_ok "Container created successfully after upgrade." + return 0 + else + msg_error "pct create still failed after upgrade. See $LOGFILE" + return 3 + fi + fi + return 1 + else + msg_error "Upgrade failed. Please check APT output." + return 3 + fi + ;; + *) return 2 ;; + esac + } + + # ------------------------------------------------------------------------------ + # Required input variables + # ------------------------------------------------------------------------------ + [[ "${CTID:-}" ]] || { + msg_error "You need to set 'CTID' variable." + exit 203 + } + [[ "${PCT_OSTYPE:-}" ]] || { + msg_error "You need to set 'PCT_OSTYPE' variable." + exit 204 + } + + msg_debug "CTID=$CTID" + msg_debug "PCT_OSTYPE=$PCT_OSTYPE" + msg_debug "PCT_OSVERSION=${PCT_OSVERSION:-default}" + + # ID checks + [[ "$CTID" -ge 100 ]] || { + msg_error "ID cannot be less than 100." + exit 205 + } + if qm status "$CTID" &>/dev/null || pct status "$CTID" &>/dev/null; then + unset CTID + msg_error "Cannot use ID that is already in use." + exit 206 + fi + + # Report installation start to API early - captures failures in storage/template/create + post_to_api + + # Transition to 'validation' — Proxmox-internal checks (storage, template, cluster) + post_progress_to_api "validation" + + # Storage capability check + check_storage_support "rootdir" || { + msg_error "No valid storage found for 'rootdir' [Container]" + exit 119 + } + check_storage_support "vztmpl" || { + msg_error "No valid storage found for 'vztmpl' [Template]" + exit 120 + } + + # Template storage selection + if resolve_storage_preselect template "${TEMPLATE_STORAGE:-}"; then + TEMPLATE_STORAGE="$STORAGE_RESULT" + TEMPLATE_STORAGE_INFO="$STORAGE_INFO" + msg_ok "Storage ${BL}${TEMPLATE_STORAGE}${CL} (${TEMPLATE_STORAGE_INFO}) [Template]" + else + while true; do + if [[ -z "${var_template_storage:-}" ]]; then + if select_storage template; then + TEMPLATE_STORAGE="$STORAGE_RESULT" + TEMPLATE_STORAGE_INFO="$STORAGE_INFO" + msg_ok "Storage ${BL}${TEMPLATE_STORAGE}${CL} (${TEMPLATE_STORAGE_INFO}) [Template]" + break + fi + fi + done + fi + + # Container storage selection + if resolve_storage_preselect container "${CONTAINER_STORAGE:-}"; then + CONTAINER_STORAGE="$STORAGE_RESULT" + CONTAINER_STORAGE_INFO="$STORAGE_INFO" + msg_ok "Storage ${BL}${CONTAINER_STORAGE}${CL} (${CONTAINER_STORAGE_INFO}) [Container]" + else + if [[ -z "${var_container_storage:-}" ]]; then + if select_storage container; then + CONTAINER_STORAGE="$STORAGE_RESULT" + CONTAINER_STORAGE_INFO="$STORAGE_INFO" + msg_ok "Storage ${BL}${CONTAINER_STORAGE}${CL} (${CONTAINER_STORAGE_INFO}) [Container]" + fi + fi + fi + + msg_info "Validating storage '$CONTAINER_STORAGE'" + STORAGE_TYPE=$(grep -E "^[^:]+: $CONTAINER_STORAGE$" /etc/pve/storage.cfg | cut -d: -f1 | head -1) + + if [[ -z "$STORAGE_TYPE" ]]; then + msg_error "Storage '$CONTAINER_STORAGE' not found in /etc/pve/storage.cfg" + exit 213 + fi + + case "$STORAGE_TYPE" in + iscsidirect) + msg_error "Storage '$CONTAINER_STORAGE' uses iSCSI-direct which does not support container rootfs." + exit 212 + ;; + iscsi | zfs) + msg_error "Storage '$CONTAINER_STORAGE' ($STORAGE_TYPE) does not support container rootdir content." + exit 213 + ;; + cephfs) + msg_error "Storage '$CONTAINER_STORAGE' uses CephFS which is not supported for LXC rootfs." + exit 219 + ;; + pbs) + msg_error "Storage '$CONTAINER_STORAGE' is a Proxmox Backup Server — cannot be used for containers." + exit 224 + ;; + linstor | rbd | nfs | cifs) + if ! pvesm status -storage "$CONTAINER_STORAGE" &>/dev/null; then + msg_error "Storage '$CONTAINER_STORAGE' ($STORAGE_TYPE) is not accessible or inactive." + exit 217 + fi + ;; + esac + + if ! pvesm status -content rootdir 2>/dev/null | awk 'NR>1{print $1}' | grep -qx "$CONTAINER_STORAGE"; then + msg_error "Storage '$CONTAINER_STORAGE' ($STORAGE_TYPE) does not support 'rootdir' content." + exit 213 + fi + msg_ok "Storage '$CONTAINER_STORAGE' ($STORAGE_TYPE) validated" + + msg_info "Validating template storage '$TEMPLATE_STORAGE'" + TEMPLATE_TYPE=$(grep -E "^[^:]+: $TEMPLATE_STORAGE$" /etc/pve/storage.cfg | cut -d: -f1) + + if ! pvesm status -content vztmpl 2>/dev/null | awk 'NR>1{print $1}' | grep -qx "$TEMPLATE_STORAGE"; then + msg_warn "Template storage '$TEMPLATE_STORAGE' may not support 'vztmpl'" + fi + msg_ok "Template storage '$TEMPLATE_STORAGE' validated" + + # Cluster quorum (if cluster) + if [[ -f /etc/pve/corosync.conf ]]; then + msg_info "Checking cluster quorum" + if ! pvecm status | awk -F':' '/^Quorate/ { exit ($2 ~ /Yes/) ? 0 : 1 }'; then + msg_error "Cluster is not quorate. Start all nodes or configure quorum device (QDevice)." + exit 210 + fi + msg_ok "Cluster is quorate" + fi + + # ------------------------------------------------------------------------------ + # Template discovery & validation + # ------------------------------------------------------------------------------ + TEMPLATE_SEARCH="${PCT_OSTYPE}-${PCT_OSVERSION:-}" + case "$PCT_OSTYPE" in + debian | ubuntu) TEMPLATE_PATTERN="-standard_" ;; + alpine | fedora | rocky | centos) TEMPLATE_PATTERN="-default_" ;; + *) TEMPLATE_PATTERN="" ;; + esac + + msg_info "Searching for template '$TEMPLATE_SEARCH'" + + # Initialize variables + ONLINE_TEMPLATE="" + ONLINE_TEMPLATES=() + + # Step 1: Check local templates first (instant) + mapfile -t LOCAL_TEMPLATES < <( + pveam list "$TEMPLATE_STORAGE" 2>/dev/null | + awk -v search="${TEMPLATE_SEARCH}" -v pattern="${TEMPLATE_PATTERN}" '$1 ~ search && $1 ~ pattern {print $1}' | + sed 's|.*/||' | sort -t - -k 2 -V + ) + + # Step 2: If local template found, use it immediately (skip pveam update) + if [[ ${#LOCAL_TEMPLATES[@]} -gt 0 ]]; then + TEMPLATE="${LOCAL_TEMPLATES[-1]}" + TEMPLATE_SOURCE="local" + msg_ok "Template search completed" + else + # Step 3: No local template - need to check online (this may be slow) + msg_info "No local template found, checking online catalog..." + + # Update catalog with timeout to prevent long hangs + if command -v timeout &>/dev/null; then + if ! timeout 30 pveam update >/dev/null 2>&1; then + msg_warn "Template catalog update timed out (possible network/DNS issue). Run 'pveam update' manually to diagnose." + fi + else + pveam update >/dev/null 2>&1 || msg_warn "Could not update template catalog (pveam update failed)" + fi + + ONLINE_TEMPLATES=() + mapfile -t ONLINE_TEMPLATES < <(pveam available -section system 2>/dev/null | grep -E '\.(tar\.zst|tar\.xz|tar\.gz)$' | awk '{print $2}' | grep -E "^${TEMPLATE_SEARCH}.*${TEMPLATE_PATTERN}" | sort -t - -k 2 -V 2>/dev/null || true) + [[ ${#ONLINE_TEMPLATES[@]} -gt 0 ]] && ONLINE_TEMPLATE="${ONLINE_TEMPLATES[-1]}" + + TEMPLATE="$ONLINE_TEMPLATE" + TEMPLATE_SOURCE="online" + msg_ok "Template search completed" + fi + + # If still no template, try to find alternatives + if [[ -z "$TEMPLATE" ]]; then + msg_warn "No template found for ${PCT_OSTYPE} ${PCT_OSVERSION}, searching for alternatives..." + + # Get all available versions for this OS type + AVAILABLE_VERSIONS=() + mapfile -t AVAILABLE_VERSIONS < <( + pveam available -section system 2>/dev/null | + grep -E '\.(tar\.zst|tar\.xz|tar\.gz)$' | + awk -F'\t' '{print $1}' | + grep "^${PCT_OSTYPE}-" | + sed -E "s/.*${PCT_OSTYPE}-([0-9]+(\.[0-9]+)?).*/\1/" | + sort -u -V 2>/dev/null + ) + + if [[ ${#AVAILABLE_VERSIONS[@]} -gt 0 ]]; then + echo "" + echo "${BL}Available ${PCT_OSTYPE} versions:${CL}" + for i in "${!AVAILABLE_VERSIONS[@]}"; do + echo " [$((i + 1))] ${AVAILABLE_VERSIONS[$i]}" + done + echo "" + read -p "Select version [1-${#AVAILABLE_VERSIONS[@]}] or press Enter to cancel: " choice /dev/null | + grep -E '\.(tar\.zst|tar\.xz|tar\.gz)$' | + awk '{print $2}' | + grep -E "^${TEMPLATE_SEARCH}-.*${TEMPLATE_PATTERN}" | + sort -t - -k 2 -V 2>/dev/null || true + ) + + if [[ ${#ONLINE_TEMPLATES[@]} -gt 0 ]]; then + TEMPLATE="${ONLINE_TEMPLATES[-1]}" + TEMPLATE_SOURCE="online" + else + msg_error "No templates available for ${PCT_OSTYPE} ${PCT_OSVERSION}" + exit 225 + fi + else + msg_custom "🚫" "${YW}" "Installation cancelled" + exit 0 + fi + else + msg_error "No ${PCT_OSTYPE} templates available at all" + exit 225 + fi + fi + + TEMPLATE_PATH="$(pvesm path $TEMPLATE_STORAGE:vztmpl/$TEMPLATE 2>/dev/null || true)" + if [[ -z "$TEMPLATE_PATH" ]]; then + TEMPLATE_BASE=$(awk -v s="$TEMPLATE_STORAGE" '$1==s {f=1} f && /path/ {print $2; exit}' /etc/pve/storage.cfg) + [[ -n "$TEMPLATE_BASE" ]] && TEMPLATE_PATH="$TEMPLATE_BASE/template/cache/$TEMPLATE" + fi + + # If we still don't have a path but have a valid template name, construct it + if [[ -z "$TEMPLATE_PATH" && -n "$TEMPLATE" ]]; then + TEMPLATE_PATH="/var/lib/vz/template/cache/$TEMPLATE" + fi + + [[ -n "$TEMPLATE_PATH" ]] || { + if [[ -z "$TEMPLATE" ]]; then + msg_error "Template ${PCT_OSTYPE} ${PCT_OSVERSION} not available" + + # Get available versions + mapfile -t AVAILABLE_VERSIONS < <( + pveam available -section system 2>/dev/null | + grep "^${PCT_OSTYPE}-" | + sed -E 's/.*'"${PCT_OSTYPE}"'-([0-9]+\.[0-9]+).*/\1/' | + grep -E '^[0-9]+\.[0-9]+$' | + sort -u -V 2>/dev/null || sort -u + ) + + if [[ ${#AVAILABLE_VERSIONS[@]} -gt 0 ]]; then + echo -e "\n${BL}Available versions:${CL}" + for i in "${!AVAILABLE_VERSIONS[@]}"; do + echo " [$((i + 1))] ${AVAILABLE_VERSIONS[$i]}" + done + + echo "" + read -p "Select version [1-${#AVAILABLE_VERSIONS[@]}] or Enter to exit: " choice /dev/null | + awk -v search="${TEMPLATE_SEARCH}-" -v pattern="${TEMPLATE_PATTERN}" '$1 ~ search && $1 ~ pattern {print $1}' | + sed 's|.*/||' | sort -t - -k 2 -V + ) + mapfile -t ONLINE_TEMPLATES < <( + pveam available -section system 2>/dev/null | + grep -E '\.(tar\.zst|tar\.xz|tar\.gz)$' | + awk '{print $2}' | + grep -E "^${TEMPLATE_SEARCH}-.*${TEMPLATE_PATTERN}" | + sort -t - -k 2 -V 2>/dev/null || true + ) + ONLINE_TEMPLATE="" + [[ ${#ONLINE_TEMPLATES[@]} -gt 0 ]] && ONLINE_TEMPLATE="${ONLINE_TEMPLATES[-1]}" + + if [[ ${#LOCAL_TEMPLATES[@]} -gt 0 ]]; then + TEMPLATE="${LOCAL_TEMPLATES[-1]}" + TEMPLATE_SOURCE="local" + else + TEMPLATE="$ONLINE_TEMPLATE" + TEMPLATE_SOURCE="online" + fi + + TEMPLATE_PATH="$(pvesm path $TEMPLATE_STORAGE:vztmpl/$TEMPLATE 2>/dev/null || true)" + if [[ -z "$TEMPLATE_PATH" ]]; then + TEMPLATE_BASE=$(awk -v s="$TEMPLATE_STORAGE" '$1==s {f=1} f && /path/ {print $2; exit}' /etc/pve/storage.cfg) + [[ -n "$TEMPLATE_BASE" ]] && TEMPLATE_PATH="$TEMPLATE_BASE/template/cache/$TEMPLATE" + fi + + # If we still don't have a path but have a valid template name, construct it + if [[ -z "$TEMPLATE_PATH" && -n "$TEMPLATE" ]]; then + TEMPLATE_PATH="/var/lib/vz/template/cache/$TEMPLATE" + fi + + [[ -n "$TEMPLATE_PATH" ]] || { + msg_error "Template still not found after version change" + exit 220 + } + else + msg_custom "🚫" "${YW}" "Installation cancelled" + exit 0 + fi + else + msg_error "No ${PCT_OSTYPE} templates available" + exit 220 + fi + fi + } + + # Validate that we found a template + if [[ -z "$TEMPLATE" ]]; then + msg_error "No template found for ${PCT_OSTYPE} ${PCT_OSVERSION}" + msg_custom "ℹ️" "${YW}" "Please check:" + msg_custom " •" "${YW}" "Is pveam catalog available? (run: pveam available -section system)" + msg_custom " •" "${YW}" "Does the template exist for your OS version?" + exit 225 + fi + + msg_ok "Template ${BL}$TEMPLATE${CL} [$TEMPLATE_SOURCE]" + msg_debug "Resolved TEMPLATE_PATH=$TEMPLATE_PATH" + + NEED_DOWNLOAD=0 + if [[ ! -f "$TEMPLATE_PATH" ]]; then + msg_info "Template not present locally – will download." + NEED_DOWNLOAD=1 + elif [[ ! -r "$TEMPLATE_PATH" ]]; then + msg_error "Template file exists but is not readable – check permissions." + exit 221 + elif [[ "$(stat -c%s "$TEMPLATE_PATH")" -lt 1000000 ]]; then + if [[ -n "$ONLINE_TEMPLATE" ]]; then + msg_warn "Template file too small (<1MB) – re-downloading." + NEED_DOWNLOAD=1 + else + msg_warn "Template looks too small, but no online version exists. Keeping local file." + fi + elif ! tar -tf "$TEMPLATE_PATH" &>/dev/null; then + if [[ -n "$ONLINE_TEMPLATE" ]]; then + msg_warn "Template appears corrupted – re-downloading." + NEED_DOWNLOAD=1 + else + msg_warn "Template appears corrupted, but no online version exists. Keeping local file." + fi + else + $STD msg_ok "Template $TEMPLATE is present and valid." + fi + + if [[ "$TEMPLATE_SOURCE" == "local" && -n "$ONLINE_TEMPLATE" && "$TEMPLATE" != "$ONLINE_TEMPLATE" ]]; then + msg_warn "Local template is outdated: $TEMPLATE (latest available: $ONLINE_TEMPLATE)" + if whiptail --yesno "A newer template is available:\n$ONLINE_TEMPLATE\n\nDo you want to download and use it instead?" 12 70; then + TEMPLATE="$ONLINE_TEMPLATE" + NEED_DOWNLOAD=1 + else + msg_custom "ℹ️" "${BL}" "Continuing with local template $TEMPLATE" + fi + fi + + if [[ "$NEED_DOWNLOAD" -eq 1 ]]; then + [[ -f "$TEMPLATE_PATH" ]] && rm -f "$TEMPLATE_PATH" + for attempt in {1..3}; do + msg_info "Attempt $attempt: Downloading template $TEMPLATE to $TEMPLATE_STORAGE" + if pveam download "$TEMPLATE_STORAGE" "$TEMPLATE" >>"${BUILD_LOG:-/dev/null}" 2>&1; then + msg_ok "Template download successful." + break + fi + if [[ $attempt -eq 3 ]]; then + msg_error "Failed after 3 attempts. Please check network access, permissions, or manually run:\n pveam download $TEMPLATE_STORAGE $TEMPLATE" + exit 222 + fi + sleep $((attempt * 5)) + done + fi + + if ! pveam list "$TEMPLATE_STORAGE" 2>/dev/null | grep -q "$TEMPLATE"; then + msg_error "Template $TEMPLATE not available in storage $TEMPLATE_STORAGE after download." + exit 223 + fi + + # ------------------------------------------------------------------------------ + # Dynamic preflight for Debian 13.x: offer upgrade if available (no hard mins) + # ------------------------------------------------------------------------------ + if [[ "$PCT_OSTYPE" == "debian" ]]; then + OSVER="$(parse_template_osver "$TEMPLATE")" + if [[ -n "$OSVER" ]]; then + offer_lxc_stack_upgrade_and_maybe_retry "no" || true + fi + fi + + # ------------------------------------------------------------------------------ + # Create LXC Container + # ------------------------------------------------------------------------------ + msg_info "Creating LXC container" + + # Ensure subuid/subgid entries exist + grep -q "root:100000:65536" /etc/subuid || echo "root:100000:65536" >>/etc/subuid + grep -q "root:100000:65536" /etc/subgid || echo "root:100000:65536" >>/etc/subgid + + # PCT_OPTIONS is now a string (exported from build_container) + # Add rootfs if not already specified + if [[ ! "$PCT_OPTIONS" =~ "-rootfs" ]]; then + PCT_OPTIONS="$PCT_OPTIONS + -rootfs $CONTAINER_STORAGE:${PCT_DISK_SIZE:-8}" + fi + + # Lock by template file (avoid concurrent template downloads/validation) + lockfile="/tmp/template.${TEMPLATE}.lock" + + # Cleanup stale lock files (older than 1 hour - likely from crashed processes) + if [[ -f "$lockfile" ]]; then + local lock_age=$(($(date +%s) - $(stat -c %Y "$lockfile" 2>/dev/null || echo 0))) + if [[ $lock_age -gt 3600 ]]; then + msg_warn "Removing stale template lock file (age: ${lock_age}s)" + rm -f "$lockfile" + fi + fi + + exec 9>"$lockfile" || { + msg_error "Failed to create lock file '$lockfile'." + exit 200 + } + + # Retry logic for template lock (another container creation may be running) + local lock_attempts=0 + local max_lock_attempts=10 + local lock_wait_time=30 + + while ! flock -w "$lock_wait_time" 9; do + lock_attempts=$((lock_attempts + 1)) + if [[ $lock_attempts -ge $max_lock_attempts ]]; then + msg_error "Timeout while waiting for template lock after ${max_lock_attempts} attempts." + msg_custom "💡" "${YW}" "Another container creation may be stuck. Check running processes or remove: $lockfile" + exit 211 + fi + msg_custom "⏳" "${YW}" "Another container is being created with this template. Waiting... (attempt ${lock_attempts}/${max_lock_attempts})" + done + + LOGFILE="/tmp/pct_create_${CTID}_$(date +%Y%m%d_%H%M%S)_${SESSION_ID}.log" + + # Helper: append pct_create log to BUILD_LOG before exit so combined log has full context + _flush_pct_log() { + if [[ -s "${LOGFILE:-}" && -n "${BUILD_LOG:-}" ]]; then + { + echo "" + echo "--- pct create output (${LOGFILE}) ---" + cat "$LOGFILE" + echo "--- end pct create output ---" + } >>"$BUILD_LOG" 2>/dev/null || true + fi + } + + # Validate template before pct create (while holding lock) + if [[ ! -s "$TEMPLATE_PATH" || "$(stat -c%s "$TEMPLATE_PATH" 2>/dev/null || echo 0)" -lt 1000000 ]]; then + msg_info "Template file missing or too small – downloading" + rm -f "$TEMPLATE_PATH" + pveam download "$TEMPLATE_STORAGE" "$TEMPLATE" >>"${BUILD_LOG:-/dev/null}" 2>&1 || { + msg_error "Failed to download template '$TEMPLATE' to storage '$TEMPLATE_STORAGE'" + exit 222 + } + msg_ok "Template downloaded" + elif ! tar -tf "$TEMPLATE_PATH" &>/dev/null; then + if [[ -n "$ONLINE_TEMPLATE" ]]; then + msg_info "Template appears corrupted – re-downloading" + rm -f "$TEMPLATE_PATH" + pveam download "$TEMPLATE_STORAGE" "$TEMPLATE" >>"${BUILD_LOG:-/dev/null}" 2>&1 || { + msg_error "Failed to re-download template '$TEMPLATE'" + exit 222 + } + msg_ok "Template re-downloaded" + else + msg_warn "Template appears corrupted, but no online version exists. Skipping re-download." + fi + fi + + # Release lock after template validation - pct create has its own internal locking + exec 9>&- + + msg_debug "pct create command: pct create $CTID ${TEMPLATE_STORAGE}:vztmpl/${TEMPLATE} $PCT_OPTIONS" + msg_debug "Logfile: $LOGFILE" + + # First attempt (PCT_OPTIONS is a multi-line string, use it directly) + if ! pct create "$CTID" "${TEMPLATE_STORAGE}:vztmpl/${TEMPLATE}" $PCT_OPTIONS >"$LOGFILE" 2>&1; then + msg_debug "Container creation failed on ${TEMPLATE_STORAGE}. Checking error..." + + # Check if template issue - retry with fresh download + if grep -qiE 'unable to open|corrupt|invalid' "$LOGFILE"; then + msg_info "Template may be corrupted – re-downloading" + rm -f "$TEMPLATE_PATH" + pveam download "$TEMPLATE_STORAGE" "$TEMPLATE" >>"${BUILD_LOG:-/dev/null}" 2>&1 + msg_ok "Template re-downloaded" + fi + + # Retry after repair + if ! pct create "$CTID" "${TEMPLATE_STORAGE}:vztmpl/${TEMPLATE}" $PCT_OPTIONS >>"$LOGFILE" 2>&1; then + # Fallback to local storage if not already on local + if [[ "$TEMPLATE_STORAGE" != "local" ]]; then + msg_info "Retrying container creation with fallback to local storage" + LOCAL_TEMPLATE_PATH="/var/lib/vz/template/cache/$TEMPLATE" + if [[ ! -f "$LOCAL_TEMPLATE_PATH" ]]; then + msg_ok "Trying local storage fallback" + msg_info "Downloading template to local" + pveam download local "$TEMPLATE" >>"${BUILD_LOG:-/dev/null}" 2>&1 + msg_ok "Template downloaded to local" + else + msg_ok "Trying local storage fallback" + fi + if ! pct create "$CTID" "local:vztmpl/${TEMPLATE}" $PCT_OPTIONS >>"$LOGFILE" 2>&1; then + # Local fallback also failed - check for LXC stack version issue + if grep -qiE 'unsupported .* version' "$LOGFILE"; then + msg_warn "pct reported 'unsupported version' – LXC stack might be too old for this template" + offer_lxc_stack_upgrade_and_maybe_retry "yes" + rc=$? + case $rc in + 0) : ;; # success - container created, continue + 2) + msg_error "Upgrade declined. Please update and re-run: apt update && apt install --only-upgrade pve-container lxc-pve" + _flush_pct_log + exit 231 + ;; + 3) + msg_error "Upgrade and/or retry failed. Please inspect: $LOGFILE" + _flush_pct_log + exit 231 + ;; + esac + else + msg_error "Container creation failed. See $LOGFILE" + if whiptail --yesno "pct create failed.\nDo you want to enable verbose debug mode and view detailed logs?" 12 70; then + set -x + pct create "$CTID" "local:vztmpl/${TEMPLATE}" $PCT_OPTIONS 2>&1 | tee -a "$LOGFILE" + set +x + fi + _flush_pct_log + exit 209 + fi + else + msg_ok "Container successfully created using local fallback." + fi + else + # Already on local storage and still failed - check LXC stack version + if grep -qiE 'unsupported .* version' "$LOGFILE"; then + msg_warn "pct reported 'unsupported version' – LXC stack might be too old for this template" + offer_lxc_stack_upgrade_and_maybe_retry "yes" + rc=$? + case $rc in + 0) : ;; # success - container created, continue + 2) + msg_error "Upgrade declined. Please update and re-run: apt update && apt install --only-upgrade pve-container lxc-pve" + _flush_pct_log + exit 231 + ;; + 3) + msg_error "Upgrade and/or retry failed. Please inspect: $LOGFILE" + _flush_pct_log + exit 231 + ;; + esac + else + msg_error "Container creation failed. See $LOGFILE" + if whiptail --yesno "pct create failed.\nDo you want to enable verbose debug mode and view detailed logs?" 12 70; then + set -x + pct create "$CTID" "local:vztmpl/${TEMPLATE}" $PCT_OPTIONS 2>&1 | tee -a "$LOGFILE" + set +x + fi + _flush_pct_log + exit 209 + fi + fi + else + msg_ok "Container successfully created after template repair." + fi + fi + + # Verify container exists + pct list | awk '{print $1}' | grep -qx "$CTID" || { + msg_error "Container ID $CTID not listed in 'pct list'. See $LOGFILE" + _flush_pct_log + exit 215 + } + + # Verify config rootfs + grep -q '^rootfs:' "/etc/pve/lxc/$CTID.conf" || { + msg_error "RootFS entry missing in container config. See $LOGFILE" + _flush_pct_log + exit 216 + } + + msg_ok "LXC Container ${BL}$CTID${CL} ${GN}was successfully created." + + # Append pct create log to BUILD_LOG for combined log visibility + if [[ -s "$LOGFILE" && -n "${BUILD_LOG:-}" ]]; then + { + echo "" + echo "--- pct create output ---" + cat "$LOGFILE" + echo "--- end pct create output ---" + } >>"$BUILD_LOG" 2>/dev/null || true + fi +} + +# ============================================================================== +# SECTION 9: POST-INSTALLATION & FINALIZATION +# ============================================================================== + +# ------------------------------------------------------------------------------ +# description() +# +# - Sets container description with formatted HTML content +# - Includes: +# * Community-Scripts logo +# * Application name +# * Links to GitHub, Discussions, Issues +# * Ko-fi donation badge +# - Restarts ping-instances.service if present (monitoring) +# - Posts final "done" status to API telemetry +# ------------------------------------------------------------------------------ +description() { + IP=$(pct exec "$CTID" ip a s dev eth0 | awk '/inet / {print $2}' | cut -d/ -f1) + + # Generate LXC Description + DESCRIPTION=$( + cat < + + Logo + + +

${APP} LXC

+ +

+ + spend Coffee + +

+ + + + GitHub + + + + Discussions + + + + Issues + + +EOF + ) + pct set "$CTID" -description "$DESCRIPTION" + + if [[ -f /etc/systemd/system/ping-instances.service ]]; then + systemctl start ping-instances.service + fi + + post_update_to_api "done" "none" +} + +# ============================================================================== +# SECTION 10: ERROR HANDLING & EXIT TRAPS +# ============================================================================== + +# ------------------------------------------------------------------------------ +# ensure_log_on_host() +# +# - Ensures INSTALL_LOG points to a readable file on the host +# - If INSTALL_LOG points to a container path (e.g. /root/.install-*), +# tries to pull it from the container and create a combined log +# - This allows get_error_text() to find actual error output for telemetry +# - Uses timeout on pct pull to prevent hangs on dead/unresponsive containers +# ------------------------------------------------------------------------------ +ensure_log_on_host() { + # Already readable on host? Nothing to do. + [[ -n "${INSTALL_LOG:-}" && -s "${INSTALL_LOG}" ]] && return 0 + + # Try pulling from container and creating combined log + if [[ -n "${CTID:-}" && -n "${SESSION_ID:-}" ]] && command -v pct &>/dev/null; then + local combined_log="/tmp/${NSAPP:-lxc}-${CTID}-${SESSION_ID}.log" + if [[ ! -s "$combined_log" ]]; then + # Create combined log + { + echo "================================================================================" + echo "COMBINED INSTALLATION LOG - ${APP:-LXC}" + echo "Container ID: ${CTID}" + echo "Session ID: ${SESSION_ID}" + echo "Timestamp: $(date '+%Y-%m-%d %H:%M:%S')" + echo "================================================================================" + echo "" + } >"$combined_log" 2>/dev/null || return 0 + # Append BUILD_LOG if it exists + if [[ -f "${BUILD_LOG:-}" ]]; then + { + echo "================================================================================" + echo "PHASE 1: CONTAINER CREATION (Host)" + echo "================================================================================" + cat "${BUILD_LOG}" + echo "" + } >>"$combined_log" + fi + # Pull INSTALL_LOG from container (with timeout to prevent hangs on dead containers) + local temp_log="/tmp/.install-temp-${SESSION_ID}.log" + if timeout 8 pct pull "$CTID" "/root/.install-${SESSION_ID}.log" "$temp_log" 2>/dev/null; then + { + echo "================================================================================" + echo "PHASE 2: APPLICATION INSTALLATION (Container)" + echo "================================================================================" + cat "$temp_log" + echo "" + } >>"$combined_log" + rm -f "$temp_log" + fi + fi + if [[ -s "$combined_log" ]]; then + INSTALL_LOG="$combined_log" + fi + fi +} + +# ============================================================================== +# TRAP MANAGEMENT +# ============================================================================== +# All traps (ERR, EXIT, INT, TERM, HUP) are set by catch_errors() in +# error_handler.func — called at the top of this file after sourcing. +# +# Do NOT set duplicate traps here. The handlers in error_handler.func +# (on_exit, on_interrupt, on_terminate, on_hangup, error_handler) already: +# - Send telemetry via post_update_to_api / _send_abort_telemetry +# - Stop orphaned containers via _stop_container_if_installing +# - Collect logs via ensure_log_on_host +# - Clean up lock files and spinner processes +# +# Previously, inline traps here overwrote catch_errors() traps, causing: +# - error_handler() never fired (no error output, no cleanup dialog) +# - on_hangup() never fired (SSH disconnect → stuck records) +# - Duplicated logic in two places (hard to debug) +# ============================================================================== diff --git a/misc/core.func b/misc/core.func new file mode 100644 index 0000000..e3b9d2c --- /dev/null +++ b/misc/core.func @@ -0,0 +1,1719 @@ +#!/usr/bin/env bash +# Copyright (c) 2021-2026 community-scripts ORG +# License: MIT | https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/LICENSE + +# ============================================================================== +# CORE FUNCTIONS - LXC CONTAINER UTILITIES +# ============================================================================== +# +# This file provides core utility functions for LXC container management +# including colors, formatting, validation checks, message output, and +# execution helpers used throughout the Community-Scripts ecosystem. +# +# Usage: +# source <(curl -fsSL https://git.community-scripts.org/.../core.func) +# load_functions +# +# ============================================================================== + +[[ -n "${_CORE_FUNC_LOADED:-}" ]] && return +_CORE_FUNC_LOADED=1 + +# ============================================================================== +# SECTION 1: INITIALIZATION & SETUP +# ============================================================================== + +# ------------------------------------------------------------------------------ +# load_functions() +# +# - Initializes all core utility groups (colors, formatting, icons, defaults) +# - Ensures functions are loaded only once via __FUNCTIONS_LOADED flag +# - Must be called at start of any script using these utilities +# ------------------------------------------------------------------------------ +load_functions() { + [[ -n "${__FUNCTIONS_LOADED:-}" ]] && return + __FUNCTIONS_LOADED=1 + color + formatting + icons + default_vars + set_std_mode +} + +# ------------------------------------------------------------------------------ +# color() +# +# - Sets ANSI color codes for styled terminal output +# - Variables: YW (yellow), YWB (yellow bright), BL (blue), RD (red) +# GN (green), DGN (dark green), BGN (background green), CL (clear) +# ------------------------------------------------------------------------------ +color() { + YW=$(echo "\033[33m") + YWB=$'\e[93m' + BL=$(echo "\033[36m") + RD=$(echo "\033[01;31m") + BGN=$(echo "\033[4;92m") + GN=$(echo "\033[1;92m") + DGN=$(echo "\033[32m") + CL=$(echo "\033[m") +} + +# ------------------------------------------------------------------------------ +# color_spinner() +# +# - Sets ANSI color codes specifically for spinner animation +# - Variables: CS_YW (spinner yellow), CS_YWB (spinner yellow bright), +# CS_CL (spinner clear) +# - Used by spinner() function to avoid color conflicts +# ------------------------------------------------------------------------------ +color_spinner() { + CS_YW=$'\033[33m' + CS_YWB=$'\033[93m' + CS_CL=$'\033[m' +} + +# ------------------------------------------------------------------------------ +# formatting() +# +# - Defines formatting helpers for terminal output +# - BFR: Backspace and clear line sequence +# - BOLD: Bold text escape code +# - TAB/TAB3: Indentation spacing +# ------------------------------------------------------------------------------ +formatting() { + BFR="\\r\\033[K" + BOLD=$(echo "\033[1m") + HOLD=" " + TAB=" " + TAB3=" " +} + +# ------------------------------------------------------------------------------ +# icons() +# +# - Sets symbolic emoji icons used throughout user feedback +# - Provides consistent visual indicators for success, error, info, etc. +# - Icons: CM (checkmark), CROSS (error), INFO (info), HOURGLASS (wait), etc. +# ------------------------------------------------------------------------------ +icons() { + CM="${TAB}✔️${TAB}" + CROSS="${TAB}✖️${TAB}" + DNSOK="✔️ " + DNSFAIL="${TAB}✖️${TAB}" + INFO="${TAB}💡${TAB}${CL}" + OS="${TAB}🖥️${TAB}${CL}" + OSVERSION="${TAB}🌟${TAB}${CL}" + CONTAINERTYPE="${TAB}📦${TAB}${CL}" + DISKSIZE="${TAB}💾${TAB}${CL}" + CPUCORE="${TAB}🧠${TAB}${CL}" + RAMSIZE="${TAB}🛠️${TAB}${CL}" + SEARCH="${TAB}🔍${TAB}${CL}" + VERBOSE_CROPPED="🔍${TAB}" + VERIFYPW="${TAB}🔐${TAB}${CL}" + CONTAINERID="${TAB}🆔${TAB}${CL}" + HOSTNAME="${TAB}🏠${TAB}${CL}" + BRIDGE="${TAB}🌉${TAB}${CL}" + NETWORK="${TAB}📡${TAB}${CL}" + GATEWAY="${TAB}🌐${TAB}${CL}" + ICON_DISABLEIPV6="${TAB}🚫${TAB}${CL}" + DEFAULT="${TAB}⚙️${TAB}${CL}" + MACADDRESS="${TAB}🔗${TAB}${CL}" + VLANTAG="${TAB}🏷️${TAB}${CL}" + ROOTSSH="${TAB}🔑${TAB}${CL}" + CREATING="${TAB}🚀${TAB}${CL}" + ADVANCED="${TAB}🧩${TAB}${CL}" + FUSE="${TAB}🗂️${TAB}${CL}" + GPU="${TAB}🎮${TAB}${CL}" + HOURGLASS="${TAB}⏳${TAB}" +} + +# ------------------------------------------------------------------------------ +# ensure_profile_loaded() +# +# - Sources /etc/profile.d/*.sh scripts if not already loaded +# - Fixes PATH issues when running via pct enter/exec (non-login shells) +# - Safe to call multiple times (uses guard variable) +# - Should be called in update_script() or any script running inside LXC +# ------------------------------------------------------------------------------ +ensure_profile_loaded() { + # Skip if already loaded or running on Proxmox host + [[ -n "${_PROFILE_LOADED:-}" ]] && return + command -v pveversion &>/dev/null && return + + # Source all profile.d scripts to ensure PATH is complete + if [[ -d /etc/profile.d ]]; then + for script in /etc/profile.d/*.sh; do + [[ -r "$script" ]] && source "$script" + done + fi + + # Also ensure /usr/local/bin is in PATH (common install location) + if [[ ":$PATH:" != *":/usr/local/bin:"* ]]; then + export PATH="/usr/local/bin:$PATH" + fi + + export _PROFILE_LOADED=1 +} + +# ------------------------------------------------------------------------------ +# default_vars() +# +# - Sets default retry and wait variables used for system actions +# - RETRY_NUM: Maximum number of retry attempts (default: 10) +# - RETRY_EVERY: Seconds to wait between retries (default: 3) +# - i: Counter variable initialized to RETRY_NUM +# ------------------------------------------------------------------------------ +default_vars() { + RETRY_NUM=10 + RETRY_EVERY=3 + i=$RETRY_NUM +} + +# ------------------------------------------------------------------------------ +# set_std_mode() +# +# - Sets default verbose mode for script and OS execution +# - If VERBOSE=yes: STD="" (show all output) +# - If VERBOSE=no: STD="silent" (suppress output via silent() wrapper) +# - If DEV_MODE_TRACE=true: Enables bash tracing (set -x) +# ------------------------------------------------------------------------------ +set_std_mode() { + if [ "${VERBOSE:-no}" = "yes" ]; then + STD="" + else + STD="silent" + fi + + # Enable bash tracing if trace mode active + if [[ "${DEV_MODE_TRACE:-false}" == "true" ]]; then + set -x + export PS4='+(${BASH_SOURCE}:${LINENO}): ${FUNCNAME[0]:+${FUNCNAME[0]}(): }' + fi +} + +# ------------------------------------------------------------------------------ +# parse_dev_mode() +# +# - Parses comma-separated dev_mode variable (e.g., "motd,keep,trace") +# - Sets global flags for each mode: +# * DEV_MODE_MOTD: Setup SSH/MOTD before installation +# * DEV_MODE_KEEP: Never delete container on failure +# * DEV_MODE_TRACE: Enable bash set -x tracing +# * DEV_MODE_PAUSE: Pause after each msg_info step +# * DEV_MODE_BREAKPOINT: Open shell on error instead of cleanup +# * DEV_MODE_LOGS: Persist all logs to /var/log/community-scripts/ +# * DEV_MODE_DRYRUN: Show commands without executing +# - Call this early in script execution +# ------------------------------------------------------------------------------ +parse_dev_mode() { + local mode + # Initialize all flags to false + export DEV_MODE_MOTD=false + export DEV_MODE_KEEP=false + export DEV_MODE_TRACE=false + export DEV_MODE_PAUSE=false + export DEV_MODE_BREAKPOINT=false + export DEV_MODE_LOGS=false + export DEV_MODE_DRYRUN=false + + # Parse comma-separated modes + if [[ -n "${dev_mode:-}" ]]; then + IFS=',' read -ra MODES <<<"$dev_mode" + for mode in "${MODES[@]}"; do + mode="$(echo "$mode" | xargs)" # Trim whitespace + case "$mode" in + motd) export DEV_MODE_MOTD=true ;; + keep) export DEV_MODE_KEEP=true ;; + trace) export DEV_MODE_TRACE=true ;; + pause) export DEV_MODE_PAUSE=true ;; + breakpoint) export DEV_MODE_BREAKPOINT=true ;; + logs) export DEV_MODE_LOGS=true ;; + dryrun) export DEV_MODE_DRYRUN=true ;; + *) + if declare -f msg_warn >/dev/null 2>&1; then + msg_warn "Unknown dev_mode: '$mode' (ignored)" + else + echo "[WARN] Unknown dev_mode: '$mode' (ignored)" >&2 + fi + ;; + esac + done + + # Show active dev modes + local active_modes=() + [[ $DEV_MODE_MOTD == true ]] && active_modes+=("motd") + [[ $DEV_MODE_KEEP == true ]] && active_modes+=("keep") + [[ $DEV_MODE_TRACE == true ]] && active_modes+=("trace") + [[ $DEV_MODE_PAUSE == true ]] && active_modes+=("pause") + [[ $DEV_MODE_BREAKPOINT == true ]] && active_modes+=("breakpoint") + [[ $DEV_MODE_LOGS == true ]] && active_modes+=("logs") + [[ $DEV_MODE_DRYRUN == true ]] && active_modes+=("dryrun") + + if [[ ${#active_modes[@]} -gt 0 ]]; then + if declare -f msg_custom >/dev/null 2>&1; then + msg_custom "🔧" "${YWB}" "Dev modes active: ${active_modes[*]}" + else + echo "[DEV] Active modes: ${active_modes[*]}" >&2 + fi + fi + fi +} + +# ============================================================================== +# SECTION 2: VALIDATION CHECKS +# ============================================================================== + +# ------------------------------------------------------------------------------ +# shell_check() +# +# - Verifies that the script is running under Bash shell +# - Exits with error message if different shell is detected +# - Required because scripts use Bash-specific features +# ------------------------------------------------------------------------------ +shell_check() { + if [[ "$(ps -p $$ -o comm=)" != "bash" ]]; then + clear + msg_error "Your default shell is currently not set to Bash. To use these scripts, please switch to the Bash shell." + echo -e "\nExiting..." + sleep 2 + exit 103 + fi +} + +# ------------------------------------------------------------------------------ +# root_check() +# +# - Verifies script is running with root privileges +# - Detects if executed via sudo (which can cause issues) +# - Exits with error if not running as root directly +# ------------------------------------------------------------------------------ +root_check() { + if [[ "$(id -u)" -ne 0 || $(ps -o comm= -p $PPID) == "sudo" ]]; then + clear + msg_error "Please run this script as root." + echo -e "\nExiting..." + sleep 2 + exit 104 + fi +} + +# ------------------------------------------------------------------------------ +# pve_check() +# +# - Validates Proxmox VE version compatibility +# - Supported: PVE 8.0-8.9 and PVE 9.0-9.1 +# - Exits with error message if unsupported version detected +# ------------------------------------------------------------------------------ +pve_check() { + local PVE_VER + PVE_VER="$(pveversion | awk -F'/' '{print $2}' | awk -F'-' '{print $1}')" + + # Check for Proxmox VE 8.x: allow 8.0–8.9 + if [[ "$PVE_VER" =~ ^8\.([0-9]+) ]]; then + local MINOR="${BASH_REMATCH[1]}" + if ((MINOR < 0 || MINOR > 9)); then + msg_error "This version of Proxmox VE is not supported." + msg_error "Supported: Proxmox VE version 8.0 – 8.9" + exit 105 + fi + return 0 + fi + + # Check for Proxmox VE 9.x: allow 9.0–9.1 + if [[ "$PVE_VER" =~ ^9\.([0-9]+) ]]; then + local MINOR="${BASH_REMATCH[1]}" + if ((MINOR < 0 || MINOR > 1)); then + msg_error "This version of Proxmox VE is not yet supported." + msg_error "Supported: Proxmox VE version 9.0 – 9.1" + exit 105 + fi + return 0 + fi + + # All other unsupported versions + msg_error "This version of Proxmox VE is not supported." + msg_error "Supported versions: Proxmox VE 8.0 – 8.9 or 9.0 – 9.1" + exit 105 +} + +# ------------------------------------------------------------------------------ +# arch_check() +# +# - Validates system architecture is amd64/x86_64 +# - Exits with error message for unsupported architectures (e.g., ARM/PiMox) +# - Provides link to ARM64-compatible scripts +# ------------------------------------------------------------------------------ +arch_check() { + if [ "$(dpkg --print-architecture)" != "amd64" ]; then + msg_error "This script will not work with PiMox (ARM architecture detected)." + msg_warn "Visit https://github.com/asylumexp/Proxmox for ARM64 support." + sleep 2 + exit 106 + fi +} + +# ------------------------------------------------------------------------------ +# ssh_check() +# +# - Detects if script is running over SSH connection +# - Warns user for external SSH connections (recommends Proxmox shell) +# - Skips warning for local/same-subnet connections +# - Does not abort execution, only warns +# ------------------------------------------------------------------------------ +ssh_check() { + if [ -n "$SSH_CLIENT" ]; then + local client_ip=$(awk '{print $1}' <<<"$SSH_CLIENT") + local host_ip=$(hostname -I | awk '{print $1}') + + # Check if connection is local (Proxmox WebUI or same machine) + # - localhost (127.0.0.1, ::1) + # - same IP as host + # - local network range (10.x, 172.16-31.x, 192.168.x) + if [[ "$client_ip" == "127.0.0.1" || "$client_ip" == "::1" || "$client_ip" == "$host_ip" ]]; then + return + fi + + # Check if client is in same local network (optional, safer approach) + local host_subnet=$(echo "$host_ip" | cut -d. -f1-3) + local client_subnet=$(echo "$client_ip" | cut -d. -f1-3) + if [[ "$host_subnet" == "$client_subnet" ]]; then + return + fi + + # Only warn for truly external connections + msg_warn "Running via external SSH (client: $client_ip)." + msg_warn "For better stability, consider using the Proxmox Shell (Console) instead." + fi +} + +# ============================================================================== +# SECTION 3: EXECUTION HELPERS +# ============================================================================== + +# ------------------------------------------------------------------------------ +# get_active_logfile() +# +# - Returns the appropriate log file based on execution context +# - _HOST_LOGFILE: Override for host context (keeps host logging on BUILD_LOG +# even after INSTALL_LOG is exported for the container) +# - INSTALL_LOG: Container operations (application installation) +# - BUILD_LOG: Host operations (container creation) +# - Fallback to BUILD_LOG if neither is set +# ------------------------------------------------------------------------------ +get_active_logfile() { + # Host override: _HOST_LOGFILE is set (not exported) in build.func to keep + # host-side logging in BUILD_LOG after INSTALL_LOG is exported for the container. + # Without this, all host msg_info/msg_ok/msg_error would write to + # /root/.install-SESSION.log (a container path) instead of BUILD_LOG. + if [[ -n "${_HOST_LOGFILE:-}" ]]; then + echo "$_HOST_LOGFILE" + elif [[ -n "${INSTALL_LOG:-}" ]]; then + echo "$INSTALL_LOG" + elif [[ -n "${BUILD_LOG:-}" ]]; then + echo "$BUILD_LOG" + else + # Fallback for legacy scripts + echo "/tmp/build-$(date +%Y%m%d_%H%M%S).log" + fi +} + +# Legacy compatibility: SILENT_LOGFILE points to active log +SILENT_LOGFILE="$(get_active_logfile)" + +# ------------------------------------------------------------------------------ +# strip_ansi() +# +# - Removes ANSI escape sequences from input text +# - Used to clean colored output for log files +# - Handles both piped input and arguments +# ------------------------------------------------------------------------------ +strip_ansi() { + if [[ $# -gt 0 ]]; then + echo -e "$*" | sed 's/\x1b\[[0-9;]*m//g; s/\x1b\[[0-9;]*[a-zA-Z]//g' + else + sed 's/\x1b\[[0-9;]*m//g; s/\x1b\[[0-9;]*[a-zA-Z]//g' + fi +} + +# ------------------------------------------------------------------------------ +# log_msg() +# +# - Writes message to active log file without ANSI codes +# - Adds timestamp prefix for log correlation +# - Creates log file if it doesn't exist +# - Arguments: message text (can include ANSI codes, will be stripped) +# ------------------------------------------------------------------------------ +log_msg() { + local msg="$*" + local logfile + logfile="$(get_active_logfile)" + + [[ -z "$msg" ]] && return + [[ -z "$logfile" ]] && return + + # Ensure log directory exists + mkdir -p "$(dirname "$logfile")" 2>/dev/null || true + + # Strip ANSI codes and write with timestamp + local clean_msg + clean_msg=$(strip_ansi "$msg") + echo "[$(date '+%Y-%m-%d %H:%M:%S')] $clean_msg" >>"$logfile" +} + +# ------------------------------------------------------------------------------ +# log_section() +# +# - Writes a section header to the log file +# - Used for separating different phases of installation +# - Arguments: section name +# ------------------------------------------------------------------------------ +log_section() { + local section="$1" + local logfile + logfile="$(get_active_logfile)" + + [[ -z "$logfile" ]] && return + mkdir -p "$(dirname "$logfile")" 2>/dev/null || true + + { + echo "" + echo "================================================================================" + echo "[$(date '+%Y-%m-%d %H:%M:%S')] $section" + echo "================================================================================" + } >>"$logfile" +} + +# ------------------------------------------------------------------------------ +# silent() +# +# - Executes command with output redirected to active log file +# - On error: displays last 20 lines of log and exits with original exit code +# - Temporarily disables error trap to capture exit code correctly +# - Saves and restores previous error handling state (so callers that +# intentionally disabled error handling aren't silently re-enabled) +# - Sources explain_exit_code() for detailed error messages +# ------------------------------------------------------------------------------ +silent() { + local cmd="$*" + local caller_line="${BASH_LINENO[0]:-unknown}" + local logfile="$(get_active_logfile)" + + # Dryrun mode: Show command without executing + if [[ "${DEV_MODE_DRYRUN:-false}" == "true" ]]; then + if declare -f msg_custom >/dev/null 2>&1; then + msg_custom "🔍" "${BL}" "[DRYRUN] $cmd" + else + echo "[DRYRUN] $cmd" >&2 + fi + return 0 + fi + + # Save current error handling state before disabling + # This prevents re-enabling error handling when the caller intentionally + # disabled it (e.g. build_container recovery section) + local _restore_errexit=false + [[ "$-" == *e* ]] && _restore_errexit=true + + set +Eeuo pipefail + trap - ERR + + "$@" >>"$logfile" 2>&1 + local rc=$? + + # Restore error handling ONLY if it was active before this call + if $_restore_errexit; then + set -Eeuo pipefail + trap 'error_handler' ERR + fi + + if [[ $rc -ne 0 ]]; then + # Source explain_exit_code if needed + if ! declare -f explain_exit_code >/dev/null 2>&1; then + if ! source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/error_handler.func); then + explain_exit_code() { echo "unknown (error_handler.func download failed)"; } + fi + fi + + local explanation + explanation="$(explain_exit_code "$rc")" + + printf "\e[?25h" + msg_error "in line ${caller_line}: exit code ${rc} (${explanation})" + msg_custom "→" "${YWB}" "${cmd}" + + if [[ -s "$logfile" ]]; then + echo -e "\n${TAB}--- Last 20 lines of log ---" + tail -n 20 "$logfile" + echo -e "${TAB}-----------------------------------" + echo -e "${TAB}📋 Full log: ${logfile}\n" + fi + + exit "$rc" + fi +} + +# ------------------------------------------------------------------------------ +# apt_update_safe() +# +# - Runs apt-get update with graceful error handling +# - On failure: shows warning with common causes instead of aborting +# - Logs full output to active log file +# - Returns 0 even on failure so the caller can continue +# - Typical cause: enterprise repos returning 401 Unauthorized +# +# Usage: +# apt_update_safe # Warn on failure, continue without aborting +# ------------------------------------------------------------------------------ +apt_update_safe() { + local logfile + logfile="$(get_active_logfile)" + + local _restore_errexit=false + [[ "$-" == *e* ]] && _restore_errexit=true + + set +Eeuo pipefail + trap - ERR + + apt-get update >>"$logfile" 2>&1 + local rc=$? + + if $_restore_errexit; then + set -Eeuo pipefail + trap 'error_handler' ERR + fi + + if [[ $rc -ne 0 ]]; then + msg_warn "apt-get update exited with code ${rc} — some repositories may have failed." + + # Check log for common 401/403 enterprise repo issues + if grep -qiE '401\s*Unauthorized|403\s*Forbidden|enterprise\.proxmox\.com' "$logfile" 2>/dev/null; then + echo -e "${TAB}${INFO} ${YWB}Hint: Proxmox enterprise repository returned an auth error.${CL}" + echo -e "${TAB} If you don't have a subscription, you can disable the enterprise" + echo -e "${TAB} repo and use the no-subscription repo instead." + fi + + echo -e "${TAB}${INFO} ${YWB}Continuing despite partial update failure — packages may still be installable.${CL}" + echo "" + fi + + return 0 +} + +# ------------------------------------------------------------------------------ +# spinner() +# +# - Displays animated spinner with rotating characters (⠋ ⠙ ⠹ ⠸ ⠼ ⠴ ⠦ ⠧ ⠇ ⠏) +# - Shows SPINNER_MSG alongside animation +# - Runs in infinite loop until killed by stop_spinner() +# - Uses color_spinner() colors for output +# ------------------------------------------------------------------------------ +spinner() { + local chars=(⠋ ⠙ ⠹ ⠸ ⠼ ⠴ ⠦ ⠧ ⠇ ⠏) + local msg="${SPINNER_MSG:-Processing...}" + local i=0 + while true; do + local index=$((i++ % ${#chars[@]})) + printf "\r\033[2K%s %b" "${CS_YWB}${chars[$index]}${CS_CL}" "${CS_YWB}${msg}${CS_CL}" + sleep 0.1 + done +} + +# ------------------------------------------------------------------------------ +# clear_line() +# +# - Clears current terminal line using tput or ANSI escape codes +# - Moves cursor to beginning of line (carriage return) +# - Erases from cursor to end of line +# - Fallback to ANSI codes if tput not available +# ------------------------------------------------------------------------------ +clear_line() { + tput cr 2>/dev/null || echo -en "\r" + tput el 2>/dev/null || echo -en "\033[K" +} + +# ------------------------------------------------------------------------------ +# stop_spinner() +# +# - Stops running spinner process by PID +# - Reads PID from SPINNER_PID variable or /tmp/.spinner.pid file +# - Attempts graceful kill, then forced kill if needed +# - Cleans up temp file and resets terminal state +# - Unsets SPINNER_PID and SPINNER_MSG variables +# ------------------------------------------------------------------------------ +stop_spinner() { + local pid="${SPINNER_PID:-}" + [[ -z "$pid" && -f /tmp/.spinner.pid ]] && pid=$(/dev/null; then + sleep 0.05 + kill -9 "$pid" 2>/dev/null || true + wait "$pid" 2>/dev/null || true + fi + rm -f /tmp/.spinner.pid + fi + + unset SPINNER_PID SPINNER_MSG + stty sane 2>/dev/null || true + stty -tostop 2>/dev/null || true +} + +# ============================================================================== +# SECTION 4: MESSAGE OUTPUT +# ============================================================================== + +# ------------------------------------------------------------------------------ +# msg_info() +# +# - Displays informational message with spinner animation +# - Shows each unique message only once (tracked via MSG_INFO_SHOWN) +# - In verbose/Alpine mode: shows hourglass icon instead of spinner +# - Stops any existing spinner before starting new one +# - Backgrounds spinner process and stores PID for later cleanup +# ------------------------------------------------------------------------------ +msg_info() { + local msg="$1" + [[ -z "$msg" ]] && return + + if ! declare -p MSG_INFO_SHOWN &>/dev/null || ! declare -A MSG_INFO_SHOWN &>/dev/null; then + declare -gA MSG_INFO_SHOWN=() + fi + [[ -n "${MSG_INFO_SHOWN["$msg"]+x}" ]] && return + MSG_INFO_SHOWN["$msg"]=1 + + # Log to file + log_msg "[INFO] $msg" + + stop_spinner + SPINNER_MSG="$msg" + + if is_verbose_mode || is_alpine; then + local HOURGLASS="${TAB}⏳${TAB}" + printf "\r\e[2K%s %b" "$HOURGLASS" "${YW}${msg}${CL}" >&2 + + # Pause mode: Wait for Enter after each step + if [[ "${DEV_MODE_PAUSE:-false}" == "true" ]]; then + echo -en "\n${YWB}[PAUSE]${CL} Press Enter to continue..." >&2 + read -r + fi + return + fi + + color_spinner + spinner & + SPINNER_PID=$! + echo "$SPINNER_PID" >/tmp/.spinner.pid + disown "$SPINNER_PID" 2>/dev/null || true + + # Pause mode: Stop spinner and wait + if [[ "${DEV_MODE_PAUSE:-false}" == "true" ]]; then + stop_spinner + echo -en "\n${YWB}[PAUSE]${CL} Press Enter to continue..." >&2 + read -r + fi +} + +# ------------------------------------------------------------------------------ +# msg_ok() +# +# - Displays success message with checkmark icon +# - Stops spinner and clears line before output +# - Removes message from MSG_INFO_SHOWN to allow re-display +# - Uses green color for success indication +# ------------------------------------------------------------------------------ +msg_ok() { + local msg="$1" + [[ -z "$msg" ]] && return + stop_spinner + clear_line + echo -e "$CM ${GN}${msg}${CL}" + log_msg "[OK] $msg" + local sanitized_msg + sanitized_msg=$(printf '%s' "$msg" | sed 's/\x1b\[[0-9;]*m//g; s/[^a-zA-Z0-9_]/_/g') + unset 'MSG_INFO_SHOWN['"$sanitized_msg"']' 2>/dev/null || true +} + +# ------------------------------------------------------------------------------ +# msg_error() +# +# - Displays error message with cross/X icon +# - Stops spinner before output +# - Uses red color for error indication +# - Outputs to stderr +# ------------------------------------------------------------------------------ +msg_error() { + stop_spinner + local msg="$1" + echo -e "${BFR:-}${CROSS:-✖️} ${RD}${msg}${CL}" >&2 + log_msg "[ERROR] $msg" +} + +# ------------------------------------------------------------------------------ +# msg_warn() +# +# - Displays warning message with info/lightbulb icon +# - Stops spinner before output +# - Uses bright yellow color for warning indication +# - Outputs to stderr +# ------------------------------------------------------------------------------ +msg_warn() { + stop_spinner + local msg="$1" + echo -e "${BFR:-}${INFO:-ℹ️} ${YWB}${msg}${CL}" >&2 + log_msg "[WARN] $msg" +} + +# ------------------------------------------------------------------------------ +# msg_custom() +# +# - Displays custom message with user-defined symbol and color +# - Arguments: symbol, color code, message text +# - Stops spinner before output +# - Useful for specialized status messages +# ------------------------------------------------------------------------------ +msg_custom() { + local symbol="${1:-"[*]"}" + local color="${2:-"\e[36m"}" + local msg="${3:-}" + [[ -z "$msg" ]] && return + stop_spinner + echo -e "${BFR:-} ${symbol} ${color}${msg}${CL:-\e[0m}" + log_msg "$msg" +} + +# ------------------------------------------------------------------------------ +# msg_debug() +# +# - Displays debug message with timestamp when var_full_verbose=1 +# - Automatically enables var_verbose if not already set +# - Shows date/time prefix for log correlation +# - Uses bright yellow color for debug output +# ------------------------------------------------------------------------------ +msg_debug() { + if [[ "${var_full_verbose:-0}" == "1" ]]; then + [[ "${var_verbose:-0}" != "1" ]] && var_verbose=1 + echo -e "${YWB}[$(date '+%F %T')] [DEBUG]${CL} $*" + fi +} + +# ------------------------------------------------------------------------------ +# msg_dev() +# +# - Display development mode messages with 🔧 icon +# - Only shown when dev_mode is active +# - Useful for debugging and development-specific output +# - Format: [DEV] message with distinct formatting +# - Usage: msg_dev "Container ready for debugging" +# ------------------------------------------------------------------------------ +msg_dev() { + if [[ -n "${dev_mode:-}" ]]; then + echo -e "${SEARCH}${BOLD}${DGN}🔧 [DEV]${CL} $*" + fi +} +# +# - Displays error message and immediately terminates script +# - Sends SIGINT to current process to trigger error handler +# - Use for unrecoverable errors that require immediate exit +# ------------------------------------------------------------------------------ +fatal() { + msg_error "$1" + kill -INT $$ +} + +# ============================================================================== +# SECTION 5: UTILITY FUNCTIONS +# ============================================================================== + +# ------------------------------------------------------------------------------ +# exit_script() +# +# - Called when user cancels an action +# - Clears screen and displays exit message +# - Exits with default exit code +# ------------------------------------------------------------------------------ +exit_script() { + clear + msg_error "User exited script" + exit 0 +} + +# ------------------------------------------------------------------------------ +# get_header() +# +# - Downloads and caches application header ASCII art +# - Falls back to local cache if already downloaded +# - Determines app type (ct/vm) from APP_TYPE variable +# - Returns header content or empty string on failure +# ------------------------------------------------------------------------------ +get_header() { + local app_name=$(echo "${APP,,}" | tr -d ' ') + local app_type=${APP_TYPE:-ct} # Default to 'ct' if not set + local header_dir="${app_type}" + [[ "$app_type" == "addon" ]] && header_dir="tools" + local header_url="https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/${header_dir}/headers/${app_name}" + local local_header_path="/usr/local/community-scripts/headers/${app_type}/${app_name}" + + mkdir -p "$(dirname "$local_header_path")" + + if [ ! -s "$local_header_path" ]; then + if ! curl -fsSL "$header_url" -o "$local_header_path"; then + msg_warn "Failed to download header: $header_url" + return 1 + fi + fi + + cat "$local_header_path" 2>/dev/null || true +} + +# ------------------------------------------------------------------------------ +# header_info() +# +# - Displays application header ASCII art at top of screen +# - Clears screen before displaying header +# - Detects terminal width for formatting +# - Returns silently if header not available +# ------------------------------------------------------------------------------ +header_info() { + local app_name=$(echo "${APP,,}" | tr -d ' ') + local header_content + + header_content=$(get_header "$app_name") || header_content="" + + clear + local term_width + term_width=$(tput cols 2>/dev/null || echo 120) + + if [ -n "$header_content" ]; then + echo "$header_content" + fi +} + +# ------------------------------------------------------------------------------ +# ensure_tput() +# +# - Ensures tput command is available for terminal control +# - Installs ncurses-bin on Debian/Ubuntu or ncurses on Alpine +# - Required for clear_line() and terminal width detection +# ------------------------------------------------------------------------------ +ensure_tput() { + if ! command -v tput >/dev/null 2>&1; then + if grep -qi 'alpine' /etc/os-release; then + apk add --no-cache ncurses >/dev/null 2>&1 || msg_warn "Failed to install ncurses (tput may be unavailable)" + elif command -v apt-get >/dev/null 2>&1; then + apt-get update -qq >/dev/null + apt-get install -y -qq ncurses-bin >/dev/null 2>&1 || msg_warn "Failed to install ncurses-bin (tput may be unavailable)" + fi + fi +} + +# ------------------------------------------------------------------------------ +# is_alpine() +# +# - Detects if running on Alpine Linux +# - Checks var_os, PCT_OSTYPE, or /etc/os-release +# - Returns 0 if Alpine, 1 otherwise +# - Used to adjust behavior for Alpine-specific commands +# ------------------------------------------------------------------------------ +is_alpine() { + local os_id="${var_os:-${PCT_OSTYPE:-}}" + + if [[ -z "$os_id" && -f /etc/os-release ]]; then + os_id="$( + . /etc/os-release 2>/dev/null + echo "${ID:-}" + )" + fi + + [[ "$os_id" == "alpine" ]] +} + +# ------------------------------------------------------------------------------ +# is_verbose_mode() +# +# - Determines if script should run in verbose mode +# - Checks VERBOSE and var_verbose variables +# - Used by msg_info() to decide between spinner and static output +# - Note: Non-TTY (pipe) scenarios are handled separately in msg_info() +# to allow spinner output to pass through pipes (e.g. lxc-attach | tee) +# ------------------------------------------------------------------------------ +is_verbose_mode() { + local verbose="${VERBOSE:-${var_verbose:-no}}" + [[ "$verbose" != "no" ]] +} + +# ------------------------------------------------------------------------------ +# is_unattended() +# +# - Detects if script is running in unattended/non-interactive mode +# - Checks MODE variable first (primary method) +# - Falls back to legacy flags (PHS_SILENT, var_unattended) +# - Returns 0 (true) if unattended, 1 (false) otherwise +# - Used by prompt functions to auto-apply defaults +# +# Modes that are unattended: +# - default (1) : Use script defaults, no prompts +# - mydefaults (3) : Use user's default.vars, no prompts +# - appdefaults (4) : Use app-specific defaults, no prompts +# +# Modes that are interactive: +# - advanced (2) : Full wizard with all options +# +# Note: Even in advanced mode, install scripts run unattended because +# all values are already collected during the wizard phase. +# ------------------------------------------------------------------------------ +is_unattended() { + # Primary: Check MODE variable (case-insensitive) + local mode="${MODE:-${mode:-}}" + mode="${mode,,}" # lowercase + + case "$mode" in + default | 1) + return 0 + ;; + mydefaults | userdefaults | 3) + return 0 + ;; + appdefaults | 4) + return 0 + ;; + advanced | 2) + # Advanced mode is interactive ONLY during wizard + # Inside container (install scripts), it should be unattended + # Check if we're inside a container (no pveversion command) + if ! command -v pveversion &>/dev/null; then + # We're inside the container - all values already collected + return 0 + fi + # On host during wizard - interactive + return 1 + ;; + esac + + # Legacy fallbacks for compatibility + [[ "${PHS_SILENT:-0}" == "1" ]] && return 0 + [[ "${var_unattended:-}" =~ ^(yes|true|1)$ ]] && return 0 + [[ "${UNATTENDED:-}" =~ ^(yes|true|1)$ ]] && return 0 + + # No TTY available = unattended + [[ ! -t 0 ]] && return 0 + + # Default: interactive + return 1 +} + +# ------------------------------------------------------------------------------ +# show_missing_values_warning() +# +# - Displays a summary of required values that used fallback defaults +# - Should be called at the end of install scripts +# - Only shows warning if MISSING_REQUIRED_VALUES array has entries +# - Provides clear guidance on what needs manual configuration +# +# Global: +# MISSING_REQUIRED_VALUES - Array of variable names that need configuration +# +# Example: +# # At end of install script: +# show_missing_values_warning +# ------------------------------------------------------------------------------ +show_missing_values_warning() { + if [[ ${#MISSING_REQUIRED_VALUES[@]} -gt 0 ]]; then + echo "" + echo -e "${YW}╔════════════════════════════════════════════════════════════╗${CL}" + echo -e "${YW}║ ⚠️ MANUAL CONFIGURATION REQUIRED ║${CL}" + echo -e "${YW}╠════════════════════════════════════════════════════════════╣${CL}" + echo -e "${YW}║ The following values were not provided and need to be ║${CL}" + echo -e "${YW}║ configured manually for the service to work properly: ║${CL}" + echo -e "${YW}╟────────────────────────────────────────────────────────────╢${CL}" + for val in "${MISSING_REQUIRED_VALUES[@]}"; do + printf "${YW}║${CL} • %-56s ${YW}║${CL}\n" "$val" + done + echo -e "${YW}╟────────────────────────────────────────────────────────────╢${CL}" + echo -e "${YW}║ Check the service configuration files or environment ║${CL}" + echo -e "${YW}║ variables and update the placeholder values. ║${CL}" + echo -e "${YW}╚════════════════════════════════════════════════════════════╝${CL}" + echo "" + return 1 + fi + return 0 +} + +# ------------------------------------------------------------------------------ +# prompt_confirm() +# +# - Prompts user for yes/no confirmation with timeout and unattended support +# - In unattended mode: immediately returns default value +# - In interactive mode: waits for user input with configurable timeout +# - After timeout: auto-applies default value +# +# Arguments: +# $1 - Prompt message (required) +# $2 - Default value: "y" or "n" (optional, default: "n") +# $3 - Timeout in seconds (optional, default: 60) +# +# Returns: +# 0 - User confirmed (yes) +# 1 - User declined (no) or timeout with default "n" +# +# Example: +# if prompt_confirm "Proceed with installation?" "y" 30; then +# echo "Installing..." +# fi +# +# # Unattended: prompt_confirm will use default without waiting +# var_unattended=yes +# prompt_confirm "Delete files?" "n" && echo "Deleting" || echo "Skipped" +# ------------------------------------------------------------------------------ +prompt_confirm() { + local message="${1:-Confirm?}" + local default="${2:-n}" + local timeout="${3:-60}" + local response + + # Normalize default to lowercase + default="${default,,}" + [[ "$default" != "y" ]] && default="n" + + # Build prompt hint + local hint + if [[ "$default" == "y" ]]; then + hint="[Y/n]" + else + hint="[y/N]" + fi + + # Unattended mode: apply default immediately + if is_unattended; then + if [[ "$default" == "y" ]]; then + return 0 + else + return 1 + fi + fi + + # Check if running in a TTY + if [[ ! -t 0 ]]; then + # Not a TTY, use default + if [[ "$default" == "y" ]]; then + return 0 + else + return 1 + fi + fi + + # Interactive prompt with timeout + echo -en "${YW}${message} ${hint} (auto-${default} in ${timeout}s): ${CL}" + + if read -t "$timeout" -r response; then + # User provided input + response="${response,,}" # lowercase + case "$response" in + y | yes) + return 0 + ;; + n | no) + return 1 + ;; + "") + # Empty response, use default + if [[ "$default" == "y" ]]; then + return 0 + else + return 1 + fi + ;; + *) + # Invalid input, use default + echo -e "${YW}Invalid response, using default: ${default}${CL}" + if [[ "$default" == "y" ]]; then + return 0 + else + return 1 + fi + ;; + esac + else + # Timeout occurred + echo "" # Newline after timeout + echo -e "${YW}Timeout - auto-selecting: ${default}${CL}" + if [[ "$default" == "y" ]]; then + return 0 + else + return 1 + fi + fi +} + +# ------------------------------------------------------------------------------ +# prompt_input() +# +# - Prompts user for text input with timeout and unattended support +# - In unattended mode: immediately returns default value +# - In interactive mode: waits for user input with configurable timeout +# - After timeout: auto-applies default value +# +# Arguments: +# $1 - Prompt message (required) +# $2 - Default value (optional, default: "") +# $3 - Timeout in seconds (optional, default: 60) +# +# Output: +# Prints the user input or default value to stdout +# +# Example: +# username=$(prompt_input "Enter username:" "admin" 30) +# echo "Using username: $username" +# +# # With validation +# while true; do +# port=$(prompt_input "Enter port:" "8080" 30) +# [[ "$port" =~ ^[0-9]+$ ]] && break +# echo "Invalid port number" +# done +# ------------------------------------------------------------------------------ +prompt_input() { + local message="${1:-Enter value:}" + local default="${2:-}" + local timeout="${3:-60}" + local response + + # Build display default hint + local hint="" + [[ -n "$default" ]] && hint=" (default: ${default})" + + # Unattended mode: return default immediately + if is_unattended; then + echo "$default" + return 0 + fi + + # Check if running in a TTY + if [[ ! -t 0 ]]; then + # Not a TTY, use default + echo "$default" + return 0 + fi + + # Interactive prompt with timeout + echo -en "${YW}${message}${hint} (auto-default in ${timeout}s): ${CL}" >&2 + + if read -t "$timeout" -r response; then + # User provided input (or pressed Enter for empty) + if [[ -n "$response" ]]; then + echo "$response" + else + echo "$default" + fi + else + # Timeout occurred + echo "" >&2 # Newline after timeout + echo -e "${YW}Timeout - using default: ${default}${CL}" >&2 + echo "$default" + fi +} + +# ------------------------------------------------------------------------------ +# prompt_input_required() +# +# - Prompts user for REQUIRED text input with fallback support +# - In unattended mode: Uses fallback value if no env var set (with warning) +# - In interactive mode: loops until user provides non-empty input +# - Tracks missing required values for end-of-script summary +# +# Arguments: +# $1 - Prompt message (required) +# $2 - Fallback/example value for unattended mode (optional) +# $3 - Timeout in seconds (optional, default: 120) +# $4 - Environment variable name hint for error messages (optional) +# +# Output: +# Prints the user input or fallback value to stdout +# +# Returns: +# 0 - Success (value provided or fallback used) +# 1 - Failed (interactive timeout without input) +# +# Global: +# MISSING_REQUIRED_VALUES - Array tracking fields that used fallbacks +# +# Example: +# # With fallback - script continues even in unattended mode +# token=$(prompt_input_required "Enter API Token:" "YOUR_TOKEN_HERE" 60 "var_api_token") +# +# # Check at end of script if any values need manual configuration +# if [[ ${#MISSING_REQUIRED_VALUES[@]} -gt 0 ]]; then +# msg_warn "Please configure: ${MISSING_REQUIRED_VALUES[*]}" +# fi +# ------------------------------------------------------------------------------ +# Global array to track missing required values +declare -g -a MISSING_REQUIRED_VALUES=() + +prompt_input_required() { + local message="${1:-Enter required value:}" + local fallback="${2:-CHANGE_ME}" + local timeout="${3:-120}" + local env_var_hint="${4:-}" + local response="" + + # Check if value is already set via environment variable (if hint provided) + if [[ -n "$env_var_hint" ]]; then + local env_value="${!env_var_hint:-}" + if [[ -n "$env_value" ]]; then + echo "$env_value" + return 0 + fi + fi + + # Unattended mode: use fallback with warning + if is_unattended; then + if [[ -n "$env_var_hint" ]]; then + echo -e "${YW}⚠ Required value '${env_var_hint}' not set - using fallback: ${fallback}${CL}" >&2 + MISSING_REQUIRED_VALUES+=("$env_var_hint") + else + echo -e "${YW}⚠ Required value not provided - using fallback: ${fallback}${CL}" >&2 + MISSING_REQUIRED_VALUES+=("(unnamed)") + fi + echo "$fallback" + return 0 + fi + + # Check if running in a TTY + if [[ ! -t 0 ]]; then + echo -e "${YW}⚠ Not interactive - using fallback: ${fallback}${CL}" >&2 + MISSING_REQUIRED_VALUES+=("${env_var_hint:-unnamed}") + echo "$fallback" + return 0 + fi + + # Interactive prompt - loop until non-empty input or use fallback on timeout + local attempts=0 + while [[ -z "$response" ]]; do + attempts=$((attempts + 1)) + + if [[ $attempts -gt 3 ]]; then + echo -e "${YW}Too many empty inputs - using fallback: ${fallback}${CL}" >&2 + MISSING_REQUIRED_VALUES+=("${env_var_hint:-manual_input}") + echo "$fallback" + return 0 + fi + + echo -en "${YW}${message} (required, timeout ${timeout}s): ${CL}" >&2 + + if read -t "$timeout" -r response; then + if [[ -z "$response" ]]; then + echo -e "${YW}This field is required. Please enter a value. (attempt ${attempts}/3)${CL}" >&2 + fi + else + # Timeout occurred - use fallback + echo "" >&2 + echo -e "${YW}Timeout - using fallback value: ${fallback}${CL}" >&2 + MISSING_REQUIRED_VALUES+=("${env_var_hint:-timeout}") + echo "$fallback" + return 0 + fi + done + + echo "$response" +} + +# ------------------------------------------------------------------------------ +# prompt_select() +# +# - Prompts user to select from a list of options with timeout support +# - In unattended mode: immediately returns default selection +# - In interactive mode: displays numbered menu and waits for choice +# - After timeout: auto-applies default selection +# +# Arguments: +# $1 - Prompt message (required) +# $2 - Default option number, 1-based (optional, default: 1) +# $3 - Timeout in seconds (optional, default: 60) +# $4+ - Options to display (required, at least 2) +# +# Output: +# Prints the selected option value to stdout +# +# Returns: +# 0 - Success +# 1 - No options provided or invalid state +# +# Example: +# choice=$(prompt_select "Select database:" 1 30 "PostgreSQL" "MySQL" "SQLite") +# echo "Selected: $choice" +# +# # With array +# options=("Option A" "Option B" "Option C") +# selected=$(prompt_select "Choose:" 2 60 "${options[@]}") +# ------------------------------------------------------------------------------ +prompt_select() { + local message="${1:-Select option:}" + local default="${2:-1}" + local timeout="${3:-60}" + shift 3 + + local options=("$@") + local num_options=${#options[@]} + + # Validate options + if [[ $num_options -eq 0 ]]; then + msg_warn "prompt_select called with no options" + echo "" >&2 + return 1 + fi + + # Validate default + if [[ ! "$default" =~ ^[0-9]+$ ]] || [[ "$default" -lt 1 ]] || [[ "$default" -gt "$num_options" ]]; then + default=1 + fi + + # Unattended mode: return default immediately + if is_unattended; then + echo "${options[$((default - 1))]}" + return 0 + fi + + # Check if running in a TTY + if [[ ! -t 0 ]]; then + echo "${options[$((default - 1))]}" + return 0 + fi + + # Display menu + echo -e "${YW}${message}${CL}" >&2 + local i + for i in "${!options[@]}"; do + local num=$((i + 1)) + if [[ $num -eq $default ]]; then + echo -e " ${GN}${num})${CL} ${options[$i]} ${YW}(default)${CL}" >&2 + else + echo -e " ${GN}${num})${CL} ${options[$i]}" >&2 + fi + done + + # Interactive prompt with timeout + echo -en "${YW}Select [1-${num_options}] (auto-select ${default} in ${timeout}s): ${CL}" >&2 + + local response + if read -t "$timeout" -r response; then + if [[ -z "$response" ]]; then + # Empty response, use default + echo "${options[$((default - 1))]}" + elif [[ "$response" =~ ^[0-9]+$ ]] && [[ "$response" -ge 1 ]] && [[ "$response" -le "$num_options" ]]; then + # Valid selection + echo "${options[$((response - 1))]}" + else + # Invalid input, use default + echo -e "${YW}Invalid selection, using default: ${options[$((default - 1))]}${CL}" >&2 + echo "${options[$((default - 1))]}" + fi + else + # Timeout occurred + echo "" >&2 # Newline after timeout + echo -e "${YW}Timeout - auto-selecting: ${options[$((default - 1))]}${CL}" >&2 + echo "${options[$((default - 1))]}" + fi +} + +# ------------------------------------------------------------------------------ +# prompt_password() +# +# - Prompts user for password input with hidden characters +# - In unattended mode: returns default or generates random password +# - Supports auto-generation of secure passwords +# - After timeout: generates random password if allowed +# +# Arguments: +# $1 - Prompt message (required) +# $2 - Default value or "generate" for auto-generation (optional) +# $3 - Timeout in seconds (optional, default: 60) +# $4 - Minimum length for validation (optional, default: 0 = no minimum) +# +# Output: +# Prints the password to stdout +# +# Example: +# password=$(prompt_password "Enter password:" "generate" 30 8) +# echo "Password set" +# +# # Require user input (no default) +# db_pass=$(prompt_password "Database password:" "" 60 12) +# ------------------------------------------------------------------------------ +prompt_password() { + local message="${1:-Enter password:}" + local default="${2:-}" + local timeout="${3:-60}" + local min_length="${4:-0}" + local response + + # Generate random password if requested + local generated="" + if [[ "$default" == "generate" ]]; then + generated=$(openssl rand -base64 16 2>/dev/null | tr -dc 'a-zA-Z0-9' | head -c 16) + [[ -z "$generated" ]] && generated=$(head /dev/urandom | tr -dc 'a-zA-Z0-9' | head -c 16) + default="$generated" + fi + + # Unattended mode: return default immediately + if is_unattended; then + echo "$default" + return 0 + fi + + # Check if running in a TTY + if [[ ! -t 0 ]]; then + echo "$default" + return 0 + fi + + # Build hint + local hint="" + if [[ -n "$generated" ]]; then + hint=" (Enter for auto-generated)" + elif [[ -n "$default" ]]; then + hint=" (Enter for default)" + fi + [[ "$min_length" -gt 0 ]] && hint="${hint} [min ${min_length} chars]" + + # Interactive prompt with timeout (silent input) + echo -en "${YW}${message}${hint} (timeout ${timeout}s): ${CL}" >&2 + + if read -t "$timeout" -rs response; then + echo "" >&2 # Newline after hidden input + if [[ -n "$response" ]]; then + # Validate minimum length + if [[ "$min_length" -gt 0 ]] && [[ ${#response} -lt "$min_length" ]]; then + echo -e "${YW}Password too short (min ${min_length}), using default${CL}" >&2 + echo "$default" + else + echo "$response" + fi + else + echo "$default" + fi + else + # Timeout occurred + echo "" >&2 # Newline after timeout + echo -e "${YW}Timeout - using generated password${CL}" >&2 + echo "$default" + fi +} + +# ============================================================================== +# SECTION 6: CLEANUP & MAINTENANCE +# ============================================================================== + +# ------------------------------------------------------------------------------ +# cleanup_lxc() +# +# - Cleans package manager and language caches (safe for installs AND updates) +# - Supports Alpine (apk), Debian/Ubuntu (apt), Python, Node.js, Go, Rust, Ruby, PHP +# - Uses fallback error handling to prevent cleanup failures from breaking installs +# ------------------------------------------------------------------------------ +cleanup_lxc() { + msg_info "Cleaning up" + + if is_alpine; then + $STD apk cache clean || true + rm -rf /var/cache/apk/* + else + $STD apt -y autoremove 2>/dev/null || msg_warn "apt autoremove failed (non-critical)" + $STD apt -y autoclean 2>/dev/null || msg_warn "apt autoclean failed (non-critical)" + $STD apt -y clean 2>/dev/null || msg_warn "apt clean failed (non-critical)" + fi + + find /tmp /var/tmp -type f -name 'tmp*' -delete 2>/dev/null || true + find /tmp /var/tmp -type f -name 'tempfile*' -delete 2>/dev/null || true + + # Python + if command -v pip &>/dev/null; then + rm -rf /root/.cache/pip 2>/dev/null || true + fi + if command -v uv &>/dev/null; then + rm -rf /root/.cache/uv 2>/dev/null || true + fi + + # Node.js + if command -v npm &>/dev/null; then + rm -rf /root/.npm/_cacache /root/.npm/_logs 2>/dev/null || true + fi + if command -v yarn &>/dev/null; then + rm -rf /root/.cache/yarn /root/.yarn/cache 2>/dev/null || true + fi + if command -v pnpm &>/dev/null; then + pnpm store prune &>/dev/null || true + fi + + # Go (only build cache, not modules) + if command -v go &>/dev/null; then + $STD go clean -cache 2>/dev/null || true + fi + + # Rust (only registry cache, not build artifacts) + if command -v cargo &>/dev/null; then + rm -rf /root/.cargo/registry/cache /root/.cargo/.package-cache 2>/dev/null || true + fi + + # Ruby + if command -v gem &>/dev/null; then + rm -rf /root/.gem/cache 2>/dev/null || true + fi + + # PHP + if command -v composer &>/dev/null; then + rm -rf /root/.composer/cache 2>/dev/null || true + fi + + msg_ok "Cleaned" + + # Send progress ping if available (defined in install.func) + if declare -f post_progress_to_api &>/dev/null; then + post_progress_to_api + fi +} + +# ------------------------------------------------------------------------------ +# check_or_create_swap() +# +# - Checks if swap is active on system +# - Offers to create swap file if none exists +# - Prompts user for swap size in MB +# - Creates /swapfile with specified size +# - Activates swap immediately +# - Returns 0 if swap active or successfully created, 1 if declined/failed +# ------------------------------------------------------------------------------ +check_or_create_swap() { + msg_info "Checking for active swap" + + if swapon --noheadings --show | grep -q 'swap'; then + msg_ok "Swap is active" + return 0 + fi + + msg_error "No active swap detected" + + if ! prompt_confirm "Do you want to create a swap file?" "n" 60; then + msg_info "Skipping swap file creation" + return 1 + fi + + local swap_size_mb + swap_size_mb=$(prompt_input "Enter swap size in MB (e.g., 2048 for 2GB):" "2048" 60) + if ! [[ "$swap_size_mb" =~ ^[0-9]+$ ]]; then + msg_error "Invalid swap size: '${swap_size_mb}' (must be a number in MB)" + return 1 + fi + + local swap_file="/swapfile" + + msg_info "Creating ${swap_size_mb}MB swap file at $swap_file" + if ! dd if=/dev/zero of="$swap_file" bs=1M count="$swap_size_mb" status=progress; then + msg_error "Failed to allocate swap file (dd failed)" + return 1 + fi + if ! chmod 600 "$swap_file"; then + msg_error "Failed to set permissions on $swap_file" + return 1 + fi + if ! mkswap "$swap_file"; then + msg_error "Failed to format swap file (mkswap failed)" + return 1 + fi + if ! swapon "$swap_file"; then + msg_error "Failed to activate swap (swapon failed)" + return 1 + fi + msg_ok "Swap file created and activated successfully" +} + +# ------------------------------------------------------------------------------ +# Loads LOCAL_IP from persistent store or detects if missing. +# +# Description: +# - Loads from /run/local-ip.env or performs runtime lookup +# ------------------------------------------------------------------------------ + +function get_lxc_ip() { + local IP_FILE="/run/local-ip.env" + if [[ -f "$IP_FILE" ]]; then + # shellcheck disable=SC1090 + source "$IP_FILE" + fi + + if [[ -z "${LOCAL_IP:-}" ]]; then + get_current_ip() { + local ip + + # Try direct interface lookup for eth0 FIRST (most reliable for LXC) - IPv4 + ip=$(ip -4 addr show eth0 2>/dev/null | awk '/inet / {print $2}' | cut -d/ -f1 | head -n1) + if [[ -n "$ip" && "$ip" =~ ^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$ ]]; then + echo "$ip" + return 0 + fi + + # Fallback: Try hostname -I (returns IPv4 first if available) + if command -v hostname >/dev/null 2>&1; then + ip=$(hostname -I 2>/dev/null | awk '{print $1}') + if [[ -n "$ip" && "$ip" =~ ^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$ ]]; then + echo "$ip" + return 0 + fi + fi + + # Try routing table with IPv4 targets + local ipv4_targets=("8.8.8.8" "1.1.1.1" "default") + for target in "${ipv4_targets[@]}"; do + if [[ "$target" == "default" ]]; then + ip=$(ip route get 1 2>/dev/null | awk '{for(i=1;i<=NF;i++) if ($i=="src") print $(i+1)}') + else + ip=$(ip route get "$target" 2>/dev/null | awk '{for(i=1;i<=NF;i++) if ($i=="src") print $(i+1)}') + fi + if [[ -n "$ip" ]]; then + echo "$ip" + return 0 + fi + done + + # IPv6 fallback: Try direct interface lookup for eth0 + ip=$(ip -6 addr show eth0 scope global 2>/dev/null | awk '/inet6 / {print $2}' | cut -d/ -f1 | head -n1) + if [[ -n "$ip" && "$ip" =~ : ]]; then + echo "$ip" + return 0 + fi + + # IPv6 fallback: Try hostname -I for IPv6 + if command -v hostname >/dev/null 2>&1; then + ip=$(hostname -I 2>/dev/null | tr ' ' '\n' | grep -E ':' | head -n1) + if [[ -n "$ip" && "$ip" =~ : ]]; then + echo "$ip" + return 0 + fi + fi + + # IPv6 fallback: Use routing table with IPv6 targets + local ipv6_targets=("2001:4860:4860::8888" "2606:4700:4700::1111") + for target in "${ipv6_targets[@]}"; do + ip=$(ip -6 route get "$target" 2>/dev/null | awk '{for(i=1;i<=NF;i++) if ($i=="src") print $(i+1)}') + if [[ -n "$ip" && "$ip" =~ : ]]; then + echo "$ip" + return 0 + fi + done + + return 1 + } + + LOCAL_IP="$(get_current_ip || true)" + if [[ -z "$LOCAL_IP" ]]; then + msg_error "Could not determine LOCAL_IP (checked: eth0, hostname -I, ip route, IPv6 targets)" + return 1 + fi + fi + + export LOCAL_IP +} + +# ============================================================================== +# SIGNAL TRAPS +# ============================================================================== + +trap 'stop_spinner' EXIT INT TERM diff --git a/misc/error_handler.func b/misc/error_handler.func new file mode 100644 index 0000000..39e5e66 --- /dev/null +++ b/misc/error_handler.func @@ -0,0 +1,638 @@ +#!/usr/bin/env bash +# ------------------------------------------------------------------------------ +# ERROR HANDLER - ERROR & SIGNAL MANAGEMENT +# ------------------------------------------------------------------------------ +# Copyright (c) 2021-2026 community-scripts ORG +# Author: MickLesk (CanbiZ) +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# ------------------------------------------------------------------------------ +# +# Provides comprehensive error handling and signal management for all scripts. +# Includes: +# - Exit code explanations (shell, package managers, databases, custom codes) +# - Error handler with detailed logging +# - Signal handlers (EXIT, INT, TERM) +# - Initialization function for trap setup +# +# Usage: +# source <(curl -fsSL .../error_handler.func) +# catch_errors +# +# ------------------------------------------------------------------------------ + +# ============================================================================== +# SECTION 1: EXIT CODE EXPLANATIONS +# ============================================================================== + +# ------------------------------------------------------------------------------ +# explain_exit_code() +# +# - Canonical version is defined in api.func (sourced before this file) +# - This section only provides a fallback if api.func was not loaded +# - See api.func SECTION 1 for the authoritative exit code mappings +# ------------------------------------------------------------------------------ +if ! declare -f explain_exit_code &>/dev/null; then + explain_exit_code() { + local code="$1" + case "$code" in + 1) echo "General error / Operation not permitted" ;; + 2) echo "Misuse of shell builtins (e.g. syntax error)" ;; + 3) echo "General syntax or argument error" ;; + 10) echo "Docker / privileged mode required (unsupported environment)" ;; + 4) echo "curl: Feature not supported or protocol error" ;; + 5) echo "curl: Could not resolve proxy" ;; + 6) echo "curl: DNS resolution failed (could not resolve host)" ;; + 7) echo "curl: Failed to connect (network unreachable / host down)" ;; + 8) echo "curl: Server reply error (FTP/SFTP or apk untrusted key)" ;; + 16) echo "curl: HTTP/2 framing layer error" ;; + 18) echo "curl: Partial file (transfer not completed)" ;; + 22) echo "curl: HTTP error returned (404, 429, 500+)" ;; + 23) echo "curl: Write error (disk full or permissions)" ;; + 24) echo "curl: Write to local file failed" ;; + 25) echo "curl: Upload failed" ;; + 26) echo "curl: Read error on local file (I/O)" ;; + 27) echo "curl: Out of memory (memory allocation failed)" ;; + 28) echo "curl: Operation timeout (network slow or server not responding)" ;; + 30) echo "curl: FTP port command failed" ;; + 32) echo "curl: FTP SIZE command failed" ;; + 33) echo "curl: HTTP range error" ;; + 34) echo "curl: HTTP post error" ;; + 35) echo "curl: SSL/TLS handshake failed (certificate error)" ;; + 36) echo "curl: FTP bad download resume" ;; + 39) echo "curl: LDAP search failed" ;; + 44) echo "curl: Internal error (bad function call order)" ;; + 45) echo "curl: Interface error (failed to bind to specified interface)" ;; + 46) echo "curl: Bad password entered" ;; + 47) echo "curl: Too many redirects" ;; + 48) echo "curl: Unknown command line option specified" ;; + 51) echo "curl: SSL peer certificate or SSH host key verification failed" ;; + 52) echo "curl: Empty reply from server (got nothing)" ;; + 55) echo "curl: Failed sending network data" ;; + 56) echo "curl: Receive error (connection reset by peer)" ;; + 57) echo "curl: Unrecoverable poll/select error (system I/O failure)" ;; + 59) echo "curl: Couldn't use specified SSL cipher" ;; + 61) echo "curl: Bad/unrecognized transfer encoding" ;; + 63) echo "curl: Maximum file size exceeded" ;; + 75) echo "Temporary failure (retry later)" ;; + 78) echo "curl: Remote file not found (404 on FTP/file)" ;; + 79) echo "curl: SSH session error (key exchange/auth failed)" ;; + 92) echo "curl: HTTP/2 stream error (protocol violation)" ;; + 95) echo "curl: HTTP/3 layer error" ;; + 64) echo "Usage error (wrong arguments)" ;; + 65) echo "Data format error (bad input data)" ;; + 66) echo "Input file not found (cannot open input)" ;; + 67) echo "User not found (addressee unknown)" ;; + 68) echo "Host not found (hostname unknown)" ;; + 69) echo "Service unavailable" ;; + 70) echo "Internal software error" ;; + 71) echo "System error (OS-level failure)" ;; + 72) echo "Critical OS file missing" ;; + 73) echo "Cannot create output file" ;; + 74) echo "I/O error" ;; + 76) echo "Remote protocol error" ;; + 77) echo "Permission denied" ;; + 100) echo "APT: Package manager error (broken packages / dependency problems)" ;; + 101) echo "APT: Configuration error (bad sources.list, malformed config)" ;; + 102) echo "APT: Lock held by another process (dpkg/apt still running)" ;; + + # --- Script Validation & Setup (103-123) --- + 103) echo "Validation: Shell is not Bash" ;; + 104) echo "Validation: Not running as root (or invoked via sudo)" ;; + 105) echo "Validation: Proxmox VE version not supported" ;; + 106) echo "Validation: Architecture not supported (ARM / PiMox)" ;; + 107) echo "Validation: Kernel key parameters unreadable" ;; + 108) echo "Validation: Kernel key limits exceeded" ;; + 109) echo "Proxmox: No available container ID after max attempts" ;; + 110) echo "Proxmox: Failed to apply default.vars" ;; + 111) echo "Proxmox: App defaults file not available" ;; + 112) echo "Proxmox: Invalid install menu option" ;; + 113) echo "LXC: Under-provisioned — user aborted update" ;; + 114) echo "LXC: Storage too low — user aborted update" ;; + 115) echo "Download: install.func download failed or incomplete" ;; + 116) echo "Proxmox: Default bridge vmbr0 not found" ;; + 117) echo "LXC: Container did not reach running state" ;; + 118) echo "LXC: No IP assigned to container after timeout" ;; + 119) echo "Proxmox: No valid storage for rootdir content" ;; + 120) echo "Proxmox: No valid storage for vztmpl content" ;; + 121) echo "LXC: Container network not ready (no IP after retries)" ;; + 122) echo "LXC: No internet connectivity — user declined to continue" ;; + 123) echo "LXC: Local IP detection failed" ;; + 124) echo "Command timed out (timeout command)" ;; + 125) echo "Command failed to start (Docker daemon or execution error)" ;; + 126) echo "Command invoked cannot execute (permission problem?)" ;; + 127) echo "Command not found" ;; + 128) echo "Invalid argument to exit" ;; + 129) echo "Killed by SIGHUP (terminal closed / hangup)" ;; + 130) echo "Aborted by user (SIGINT)" ;; + 131) echo "Killed by SIGQUIT (core dumped)" ;; + 132) echo "Killed by SIGILL (illegal CPU instruction)" ;; + 134) echo "Process aborted (SIGABRT - possibly Node.js heap overflow)" ;; + 137) echo "Killed (SIGKILL / Out of memory?)" ;; + 139) echo "Segmentation fault (core dumped)" ;; + 141) echo "Broken pipe (SIGPIPE - output closed prematurely)" ;; + 143) echo "Terminated (SIGTERM)" ;; + 144) echo "Killed by signal 16 (SIGUSR1 / SIGSTKFLT)" ;; + 146) echo "Killed by signal 18 (SIGTSTP)" ;; + 150) echo "Systemd: Service failed to start" ;; + 151) echo "Systemd: Service unit not found" ;; + 152) echo "Permission denied (EACCES)" ;; + 153) echo "Build/compile failed (make/gcc/cmake)" ;; + 154) echo "Node.js: Native addon build failed (node-gyp)" ;; + 160) echo "Python: Virtualenv / uv environment missing or broken" ;; + 161) echo "Python: Dependency resolution failed" ;; + 162) echo "Python: Installation aborted (permissions or EXTERNALLY-MANAGED)" ;; + 170) echo "PostgreSQL: Connection failed (server not running / wrong socket)" ;; + 171) echo "PostgreSQL: Authentication failed (bad user/password)" ;; + 172) echo "PostgreSQL: Database does not exist" ;; + 173) echo "PostgreSQL: Fatal error in query / syntax" ;; + 180) echo "MySQL/MariaDB: Connection failed (server not running / wrong socket)" ;; + 181) echo "MySQL/MariaDB: Authentication failed (bad user/password)" ;; + 182) echo "MySQL/MariaDB: Database does not exist" ;; + 183) echo "MySQL/MariaDB: Fatal error in query / syntax" ;; + 190) echo "MongoDB: Connection failed (server not running)" ;; + 191) echo "MongoDB: Authentication failed (bad user/password)" ;; + 192) echo "MongoDB: Database not found" ;; + 193) echo "MongoDB: Fatal query error" ;; + 200) echo "Proxmox: Failed to create lock file" ;; + 203) echo "Proxmox: Missing CTID variable" ;; + 204) echo "Proxmox: Missing PCT_OSTYPE variable" ;; + 205) echo "Proxmox: Invalid CTID (<100)" ;; + 206) echo "Proxmox: CTID already in use" ;; + 207) echo "Proxmox: Password contains unescaped special characters" ;; + 208) echo "Proxmox: Invalid configuration (DNS/MAC/Network format)" ;; + 209) echo "Proxmox: Container creation failed" ;; + 210) echo "Proxmox: Cluster not quorate" ;; + 211) echo "Proxmox: Timeout waiting for template lock" ;; + 212) echo "Proxmox: Storage type 'iscsidirect' does not support containers (VMs only)" ;; + 213) echo "Proxmox: Storage type does not support 'rootdir' content" ;; + 214) echo "Proxmox: Not enough storage space" ;; + 215) echo "Proxmox: Container created but not listed (ghost state)" ;; + 216) echo "Proxmox: RootFS entry missing in config" ;; + 217) echo "Proxmox: Storage not accessible" ;; + 218) echo "Proxmox: Template file corrupted or incomplete" ;; + 219) echo "Proxmox: CephFS does not support containers - use RBD" ;; + 220) echo "Proxmox: Unable to resolve template path" ;; + 221) echo "Proxmox: Template file not readable" ;; + 222) echo "Proxmox: Template download failed" ;; + 223) echo "Proxmox: Template not available after download" ;; + 224) echo "Proxmox: PBS storage is for backups only" ;; + 225) echo "Proxmox: No template available for OS/Version" ;; + 231) echo "Proxmox: LXC stack upgrade failed" ;; + + # --- Tools & Addon Scripts (232-238) --- + 232) echo "Tools: Wrong execution environment (run on PVE host, not inside LXC)" ;; + 233) echo "Tools: Application not installed (update prerequisite missing)" ;; + 234) echo "Tools: No LXC containers found or available" ;; + 235) echo "Tools: Backup or restore operation failed" ;; + 236) echo "Tools: Required hardware not detected" ;; + 237) echo "Tools: Dependency package installation failed" ;; + 238) echo "Tools: OS or distribution not supported for this addon" ;; + + 239) echo "npm/Node.js: Unexpected runtime error or dependency failure" ;; + 243) echo "Node.js: Out of memory (JavaScript heap out of memory)" ;; + 245) echo "Node.js: Invalid command-line option" ;; + 246) echo "Node.js: Internal JavaScript Parse Error" ;; + 247) echo "Node.js: Fatal internal error" ;; + 248) echo "Node.js: Invalid C++ addon / N-API failure" ;; + 249) echo "npm/pnpm/yarn: Unknown fatal error" ;; + + # --- Application Install/Update Errors (250-254) --- + 250) echo "App: Download failed or version not determined" ;; + 251) echo "App: File extraction failed (corrupt or incomplete archive)" ;; + 252) echo "App: Required file or resource not found" ;; + 253) echo "App: Data migration required — update aborted" ;; + 254) echo "App: User declined prompt or input timed out" ;; + + 255) echo "DPKG: Fatal internal error" ;; + *) echo "Unknown error" ;; + esac + } +fi + +# ============================================================================== +# SECTION 2: ERROR HANDLERS +# ============================================================================== + +# ------------------------------------------------------------------------------ +# error_handler() +# +# - Main error handler triggered by ERR trap +# - Arguments: exit_code, command, line_number +# - Behavior: +# * Returns silently if exit_code is 0 (success) +# * Sources explain_exit_code() for detailed error description +# * Displays error message with: +# - Line number where error occurred +# - Exit code with explanation +# - Command that failed +# * Shows last 20 lines of SILENT_LOGFILE if available +# * Copies log to container /root for later inspection +# * Exits with original exit code +# ------------------------------------------------------------------------------ +error_handler() { + local exit_code=${1:-$?} + local command=${2:-${BASH_COMMAND:-unknown}} + local line_number=${BASH_LINENO[0]:-unknown} + + command="${command//\$STD/}" + + if [[ "$exit_code" -eq 0 ]]; then + return 0 + fi + + # Stop spinner and restore cursor FIRST — before any output + # This prevents spinner text overlapping with error messages + if declare -f stop_spinner >/dev/null 2>&1; then + stop_spinner 2>/dev/null || true + fi + printf "\e[?25h" + + local explanation + explanation="$(explain_exit_code "$exit_code")" + + # ALWAYS report failure to API immediately - don't wait for container checks + # This ensures we capture failures that occur before/after container exists + if declare -f post_update_to_api &>/dev/null; then + post_update_to_api "failed" "$exit_code" 2>/dev/null || true + else + # Container context: post_update_to_api not available (api.func not sourced) + # Send status directly via curl so container failures are never lost + _send_abort_telemetry "$exit_code" 2>/dev/null || true + fi + + # Use msg_error if available, fallback to echo + if declare -f msg_error >/dev/null 2>&1; then + msg_error "in line ${line_number}: exit code ${exit_code} (${explanation}): while executing command ${command}" + else + echo -e "\n${RD}[ERROR]${CL} in line ${RD}${line_number}${CL}: exit code ${RD}${exit_code}${CL} (${explanation}): while executing command ${YWB}${command}${CL}\n" + fi + + if [[ -n "${DEBUG_LOGFILE:-}" ]]; then + { + echo "------ ERROR ------" + echo "Timestamp : $(date '+%Y-%m-%d %H:%M:%S')" + echo "Exit Code : $exit_code ($explanation)" + echo "Line : $line_number" + echo "Command : $command" + echo "-------------------" + } >>"$DEBUG_LOGFILE" + fi + + # Get active log file (BUILD_LOG or INSTALL_LOG) + local active_log="" + if declare -f get_active_logfile >/dev/null 2>&1; then + active_log="$(get_active_logfile)" + elif [[ -n "${SILENT_LOGFILE:-}" ]]; then + active_log="$SILENT_LOGFILE" + fi + + # If active_log points to a container-internal path that doesn't exist on host, + # fall back to BUILD_LOG (host-side log) + if [[ -n "$active_log" && ! -s "$active_log" && -n "${BUILD_LOG:-}" && -s "${BUILD_LOG}" ]]; then + active_log="$BUILD_LOG" + fi + + # Show last log lines if available + if [[ -n "$active_log" && -s "$active_log" ]]; then + echo -e "\n${TAB}--- Last 20 lines of log ---" + tail -n 20 "$active_log" + echo -e "${TAB}-----------------------------------\n" + fi + + # Detect context: Container (INSTALL_LOG set + inside container /root) vs Host + if [[ -n "${INSTALL_LOG:-}" && -f "${INSTALL_LOG:-}" && -d /root ]]; then + # CONTAINER CONTEXT: Copy log and create flag file for host + local container_log="/root/.install-${SESSION_ID:-error}.log" + cp "${INSTALL_LOG}" "$container_log" 2>/dev/null || true + + # Create error flag file with exit code for host detection + echo "$exit_code" >"/root/.install-${SESSION_ID:-error}.failed" 2>/dev/null || true + # Log path is shown by host as combined log - no need to show container path + else + # HOST CONTEXT: Show local log path and offer container cleanup + if [[ -n "$active_log" && -s "$active_log" ]]; then + if declare -f msg_custom >/dev/null 2>&1; then + msg_custom "📋" "${YW}" "Full log: ${active_log}" + else + echo -e "${YW}Full log:${CL} ${BL}${active_log}${CL}" + fi + fi + + # Offer to remove container if it exists (build errors after container creation) + if [[ -n "${CTID:-}" ]] && command -v pct &>/dev/null && pct status "$CTID" &>/dev/null; then + echo "" + if declare -f msg_custom >/dev/null 2>&1; then + echo -en "${TAB}❓${TAB}${YW}Remove broken container ${CTID}? (Y/n) [auto-remove in 60s]: ${CL}" + else + echo -en "${YW}Remove broken container ${CTID}? (Y/n) [auto-remove in 60s]: ${CL}" + fi + + # Read user response + local response="" + if read -t 60 -r response; then + if [[ -z "$response" || "$response" =~ ^[Yy]$ ]]; then + echo "" + if declare -f msg_info >/dev/null 2>&1; then + msg_info "Removing container ${CTID}" + else + echo -e "${YW}Removing container ${CTID}${CL}" + fi + pct stop "$CTID" &>/dev/null || true + pct destroy "$CTID" &>/dev/null || true + if declare -f msg_ok >/dev/null 2>&1; then + msg_ok "Container ${CTID} removed" + else + echo -e "${GN}✔${CL} Container ${CTID} removed" + fi + elif [[ "$response" =~ ^[Nn]$ ]]; then + echo "" + if declare -f msg_warn >/dev/null 2>&1; then + msg_warn "Container ${CTID} kept for debugging" + else + echo -e "${YW}Container ${CTID} kept for debugging${CL}" + fi + fi + else + # Timeout - auto-remove + echo "" + if declare -f msg_info >/dev/null 2>&1; then + msg_info "No response - removing container ${CTID}" + else + echo -e "${YW}No response - removing container ${CTID}${CL}" + fi + pct stop "$CTID" &>/dev/null || true + pct destroy "$CTID" &>/dev/null || true + if declare -f msg_ok >/dev/null 2>&1; then + msg_ok "Container ${CTID} removed" + else + echo -e "${GN}✔${CL} Container ${CTID} removed" + fi + fi + + # Force one final status update attempt after cleanup + # This ensures status is updated even if the first attempt failed (e.g., HTTP 400) + if declare -f post_update_to_api &>/dev/null; then + post_update_to_api "failed" "$exit_code" "force" + fi + fi + fi + + exit "$exit_code" +} + +# ============================================================================== +# SECTION 3: TELEMETRY & CLEANUP HELPERS FOR SIGNAL HANDLERS +# ============================================================================== + +# ------------------------------------------------------------------------------ +# _send_abort_telemetry() +# +# - Sends failure/abort status to telemetry API +# - Works in BOTH host context (post_update_to_api available) and +# container context (only curl available, api.func not sourced) +# - Container context is critical: without this, container-side failures +# and signal exits are never reported, leaving records stuck in +# "installing" or "configuring" forever +# - Arguments: $1 = exit_code +# ------------------------------------------------------------------------------ +_send_abort_telemetry() { + local exit_code="${1:-1}" + # Try full API function first (host context - api.func sourced) + if declare -f post_update_to_api &>/dev/null; then + post_update_to_api "failed" "$exit_code" 2>/dev/null || true + return + fi + # Fallback: direct curl (container context - api.func NOT sourced) + # This is the ONLY way containers can report failures to telemetry + command -v curl &>/dev/null || return 0 + [[ "${DIAGNOSTICS:-no}" == "no" ]] && return 0 + [[ -z "${RANDOM_UUID:-}" ]] && return 0 + + # Collect last 200 log lines for error diagnosis (best-effort) + # Container context has no get_full_log(), so we gather as much as possible + local error_text="" + local logfile="" + if [[ -n "${INSTALL_LOG:-}" && -s "${INSTALL_LOG}" ]]; then + logfile="${INSTALL_LOG}" + elif [[ -n "${SILENT_LOGFILE:-}" && -s "${SILENT_LOGFILE}" ]]; then + logfile="${SILENT_LOGFILE}" + fi + + if [[ -n "$logfile" ]]; then + error_text=$(tail -n 200 "$logfile" 2>/dev/null | sed 's/\x1b\[[0-9;]*[a-zA-Z]//g; s/\\/\\\\/g; s/"/\\"/g; s/\r//g' | tr '\n' '|' | sed 's/|$//' | head -c 16384 | tr -d '\000-\010\013\014\016-\037\177') || true + fi + + # Prepend exit code explanation header (like build_error_string does on host) + local explanation="" + if declare -f explain_exit_code &>/dev/null; then + explanation=$(explain_exit_code "$exit_code" 2>/dev/null) || true + fi + if [[ -n "$explanation" && -n "$error_text" ]]; then + error_text="exit_code=${exit_code} | ${explanation}|---|${error_text}" + elif [[ -n "$explanation" && -z "$error_text" ]]; then + error_text="exit_code=${exit_code} | ${explanation}" + fi + + # Calculate duration if start time is available + local duration="" + if [[ -n "${DIAGNOSTICS_START_TIME:-}" ]]; then + duration=$(($(date +%s) - DIAGNOSTICS_START_TIME)) + fi + + # Categorize error if function is available (may not be in minimal container context) + local error_category="" + if declare -f categorize_error &>/dev/null; then + error_category=$(categorize_error "$exit_code" 2>/dev/null) || true + fi + + # Build JSON payload with error context + local payload + payload="{\"random_id\":\"${RANDOM_UUID}\",\"execution_id\":\"${EXECUTION_ID:-${RANDOM_UUID}}\",\"type\":\"${TELEMETRY_TYPE:-lxc}\",\"nsapp\":\"${NSAPP:-${app:-unknown}}\",\"status\":\"failed\",\"exit_code\":${exit_code}" + [[ -n "$error_text" ]] && payload="${payload},\"error\":\"${error_text}\"" + [[ -n "$error_category" ]] && payload="${payload},\"error_category\":\"${error_category}\"" + [[ -n "$duration" ]] && payload="${payload},\"duration\":${duration}" + payload="${payload}}" + + local api_url="${TELEMETRY_URL:-https://telemetry.community-scripts.org/telemetry}" + + # 2 attempts (retry once on failure) — original had no retry + local attempt + for attempt in 1 2; do + if curl -fsS -m 5 -X POST "$api_url" \ + -H "Content-Type: application/json" \ + -d "$payload" &>/dev/null; then + return 0 + fi + [[ $attempt -eq 1 ]] && sleep 1 + done + return 0 +} + +# ------------------------------------------------------------------------------ +# _stop_container_if_installing() +# +# - Stops the LXC container if we're in the install phase +# - Prevents orphaned container processes when the host exits due to a signal +# (SSH disconnect, Ctrl+C, SIGTERM) — without this, the container keeps +# running and may send "configuring" status AFTER the host already sent +# "failed", leaving records permanently stuck in "configuring" +# - Only acts when: +# * CONTAINER_INSTALLING flag is set (during lxc-attach in build_container) +# * CTID is set (container was created) +# * pct command is available (we're on the Proxmox host, not inside a container) +# - Does NOT destroy the container — just stops it for potential debugging +# ------------------------------------------------------------------------------ +_stop_container_if_installing() { + [[ "${CONTAINER_INSTALLING:-}" == "true" ]] || return 0 + [[ -n "${CTID:-}" ]] || return 0 + command -v pct &>/dev/null || return 0 + pct stop "$CTID" 2>/dev/null || true +} + +# ============================================================================== +# SECTION 4: SIGNAL HANDLERS +# ============================================================================== + +# ------------------------------------------------------------------------------ +# on_exit() +# +# - EXIT trap handler — runs on EVERY script termination +# - Catches orphaned "installing"/"configuring" records: +# * If post_to_api sent "installing" but post_update_to_api never ran +# * Reports final status to prevent records stuck forever +# - Best-effort log collection for failed installs +# - Stops orphaned container processes on failure +# - Cleans up lock files +# ------------------------------------------------------------------------------ +on_exit() { + local exit_code=$? + + # Report orphaned "installing" records to telemetry API + # Catches ALL exit paths: errors, signals, AND clean exits where + # post_to_api was called but post_update_to_api was never called + if [[ "${POST_TO_API_DONE:-}" == "true" && "${POST_UPDATE_DONE:-}" != "true" ]]; then + if [[ $exit_code -ne 0 ]]; then + _send_abort_telemetry "$exit_code" + elif declare -f post_update_to_api >/dev/null 2>&1; then + post_update_to_api "done" "0" 2>/dev/null || true + fi + fi + + # Best-effort log collection on failure (non-critical, telemetry already sent) + if [[ $exit_code -ne 0 ]] && declare -f ensure_log_on_host >/dev/null 2>&1; then + ensure_log_on_host 2>/dev/null || true + fi + + # Stop orphaned container if we're in the install phase and exiting with error + if [[ $exit_code -ne 0 ]]; then + _stop_container_if_installing + fi + + [[ -n "${lockfile:-}" && -e "$lockfile" ]] && rm -f "$lockfile" + exit "$exit_code" +} + +# ------------------------------------------------------------------------------ +# on_interrupt() +# +# - SIGINT (Ctrl+C) trap handler +# - Reports status FIRST (time-critical: container may be dying) +# - Stops orphaned container to prevent "configuring" ghost records +# - Exits with code 130 (128 + SIGINT=2) +# ------------------------------------------------------------------------------ +on_interrupt() { + # Stop spinner and restore cursor before any output + if declare -f stop_spinner >/dev/null 2>&1; then + stop_spinner 2>/dev/null || true + fi + printf "\e[?25h" 2>/dev/null || true + + _send_abort_telemetry "130" + _stop_container_if_installing + if declare -f msg_error >/dev/null 2>&1; then + msg_error "Interrupted by user (SIGINT)" 2>/dev/null || true + else + echo -e "\n${RD}Interrupted by user (SIGINT)${CL}" 2>/dev/null || true + fi + exit 130 +} + +# ------------------------------------------------------------------------------ +# on_terminate() +# +# - SIGTERM trap handler +# - Reports status FIRST (time-critical: process being killed) +# - Stops orphaned container to prevent "configuring" ghost records +# - Exits with code 143 (128 + SIGTERM=15) +# ------------------------------------------------------------------------------ +on_terminate() { + # Stop spinner and restore cursor before any output + if declare -f stop_spinner >/dev/null 2>&1; then + stop_spinner 2>/dev/null || true + fi + printf "\e[?25h" 2>/dev/null || true + + _send_abort_telemetry "143" + _stop_container_if_installing + if declare -f msg_error >/dev/null 2>&1; then + msg_error "Terminated by signal (SIGTERM)" 2>/dev/null || true + else + echo -e "\n${RD}Terminated by signal (SIGTERM)${CL}" 2>/dev/null || true + fi + exit 143 +} + +# ------------------------------------------------------------------------------ +# on_hangup() +# +# - SIGHUP trap handler (SSH disconnect, terminal closed) +# - CRITICAL: This was previously MISSING from catch_errors(), causing +# container processes to become orphans on SSH disconnect — the #1 cause +# of records stuck in "installing" and "configuring" states +# - Reports status via direct curl (terminal is already closed, no output) +# - Stops orphaned container to prevent ghost records +# - Exits with code 129 (128 + SIGHUP=1) +# ------------------------------------------------------------------------------ +on_hangup() { + # Stop spinner (no cursor restore needed — terminal is already gone) + if declare -f stop_spinner >/dev/null 2>&1; then + stop_spinner 2>/dev/null || true + fi + + _send_abort_telemetry "129" + _stop_container_if_installing + exit 129 +} + +# ============================================================================== +# SECTION 5: INITIALIZATION +# ============================================================================== + +# ------------------------------------------------------------------------------ +# catch_errors() +# +# - Initializes error handling and signal traps +# - Enables strict error handling: +# * set -Ee: Exit on error, inherit ERR trap in functions +# * set -o pipefail: Pipeline fails if any command fails +# * set -u: (optional) Exit on undefined variable (if STRICT_UNSET=1) +# - Sets up traps: +# * ERR → error_handler (script errors) +# * EXIT → on_exit (any termination — cleanup + orphan detection) +# * INT → on_interrupt (Ctrl+C) +# * TERM → on_terminate (kill / systemd stop) +# * HUP → on_hangup (SSH disconnect / terminal closed) +# - Call this function early in every script +# ------------------------------------------------------------------------------ +catch_errors() { + set -Ee -o pipefail + if [ "${STRICT_UNSET:-0}" = "1" ]; then + set -u + fi + + trap 'error_handler' ERR + trap on_exit EXIT + trap on_interrupt INT + trap on_terminate TERM + trap on_hangup HUP +} diff --git a/misc/install.func b/misc/install.func new file mode 100644 index 0000000..94f005b --- /dev/null +++ b/misc/install.func @@ -0,0 +1,332 @@ +# Copyright (c) 2021-2026 community-scripts ORG +# Author: tteck (tteckster) +# Co-Author: MickLesk +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE + +# ============================================================================== +# INSTALL.FUNC - CONTAINER INSTALLATION & SETUP +# ============================================================================== +# +# This file provides installation functions executed inside LXC containers +# after creation. Handles: +# +# - Network connectivity verification (IPv4/IPv6) +# - OS updates and package installation +# - DNS resolution checks +# - MOTD and SSH configuration +# - Container customization and auto-login +# +# Usage: +# - Sourced by -install.sh scripts +# - Executes via pct exec inside container +# - Requires internet connectivity +# +# ============================================================================== + +# ============================================================================== +# SECTION 1: INITIALIZATION +# ============================================================================== + +if ! command -v curl >/dev/null 2>&1; then + printf "\r\e[2K%b" '\033[93m Setup Source \033[m' >&2 + apt update >/dev/null 2>&1 + apt install -y curl >/dev/null 2>&1 +fi +source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/core.func) +source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/error_handler.func) +load_functions +catch_errors + +# Persist diagnostics setting inside container (exported from build.func) +# so addon scripts running later can find the user's choice +if [[ ! -f /usr/local/community-scripts/diagnostics ]]; then + mkdir -p /usr/local/community-scripts + echo "DIAGNOSTICS=${DIAGNOSTICS:-no}" >/usr/local/community-scripts/diagnostics +fi + +# Get LXC IP address (must be called INSIDE container, after network is up) +get_lxc_ip + +# ------------------------------------------------------------------------------ +# post_progress_to_api() +# +# - Lightweight progress ping from inside the container +# - Updates the existing telemetry record status +# - Arguments: +# * $1: status (optional, default: "configuring") +# - Signals that the installation is actively progressing (not stuck) +# - Fire-and-forget: never blocks or fails the script +# - Only executes if DIAGNOSTICS=yes and RANDOM_UUID is set +# ------------------------------------------------------------------------------ +post_progress_to_api() { + command -v curl &>/dev/null || return 0 + [[ "${DIAGNOSTICS:-no}" == "no" ]] && return 0 + [[ -z "${RANDOM_UUID:-}" ]] && return 0 + + local progress_status="${1:-configuring}" + + curl -fsS -m 5 -X POST "https://telemetry.community-scripts.org/telemetry" \ + -H "Content-Type: application/json" \ + -d "{\"random_id\":\"${RANDOM_UUID}\",\"execution_id\":\"${EXECUTION_ID:-${RANDOM_UUID}}\",\"type\":\"lxc\",\"nsapp\":\"${app:-unknown}\",\"status\":\"${progress_status}\"}" &>/dev/null || true +} + +# ============================================================================== +# SECTION 2: NETWORK & CONNECTIVITY +# ============================================================================== + +# ------------------------------------------------------------------------------ +# verb_ip6() +# +# - Configures IPv6 based on DISABLEIPV6 variable +# - If DISABLEIPV6=yes: disables IPv6 via sysctl +# - Sets verbose mode via set_std_mode() +# ------------------------------------------------------------------------------ +verb_ip6() { + set_std_mode # Set STD mode based on VERBOSE + + if [ "${IPV6_METHOD:-}" = "disable" ]; then + msg_info "Disabling IPv6 (this may affect some services)" + mkdir -p /etc/sysctl.d + $STD tee /etc/sysctl.d/99-disable-ipv6.conf >/dev/null </dev/null) || true + fi + + for ((i = RETRY_NUM; i > 0; i--)); do + if [ "$(hostname -I)" != "" ]; then + break + fi + echo 1>&2 -en "${CROSS}${RD} No Network! " + sleep $RETRY_EVERY + done + if [ "$(hostname -I)" = "" ]; then + echo 1>&2 -e "\n${CROSS}${RD} No Network After $RETRY_NUM Tries${CL}" + echo -e "${NETWORK}Check Network Settings" + exit 121 + fi + rm -rf /usr/lib/python3.*/EXTERNALLY-MANAGED + systemctl disable -q --now systemd-networkd-wait-online.service + msg_ok "Set up Container OS" + #msg_custom "${CM}" "${GN}" "Network Connected: ${BL}$(hostname -I)" + msg_ok "Network Connected: ${BL}$(hostname -I)" + post_progress_to_api +} + +# ------------------------------------------------------------------------------ +# network_check() +# +# - Comprehensive network connectivity check for IPv4 and IPv6 +# - Tests connectivity to multiple DNS servers: +# * IPv4: 1.1.1.1 (Cloudflare), 8.8.8.8 (Google), 9.9.9.9 (Quad9) +# * IPv6: 2606:4700:4700::1111, 2001:4860:4860::8888, 2620:fe::fe +# - Verifies DNS resolution for GitHub and Community-Scripts domains +# - Prompts user to continue if no internet detected +# - Uses fatal() on DNS resolution failure for critical hosts +# ------------------------------------------------------------------------------ +network_check() { + set +e + trap - ERR + ipv4_connected=false + ipv6_connected=false + sleep 1 + + # Check IPv4 connectivity to Google, Cloudflare & Quad9 DNS servers. + if ping -c 1 -W 1 1.1.1.1 &>/dev/null || ping -c 1 -W 1 8.8.8.8 &>/dev/null || ping -c 1 -W 1 9.9.9.9 &>/dev/null; then + msg_ok "IPv4 Internet Connected" + ipv4_connected=true + else + msg_error "IPv4 Internet Not Connected" + fi + + # Check IPv6 connectivity to Google, Cloudflare & Quad9 DNS servers. + if ping6 -c 1 -W 1 2606:4700:4700::1111 &>/dev/null || ping6 -c 1 -W 1 2001:4860:4860::8888 &>/dev/null || ping6 -c 1 -W 1 2620:fe::fe &>/dev/null; then + msg_ok "IPv6 Internet Connected" + ipv6_connected=true + else + msg_error "IPv6 Internet Not Connected" + fi + + # If both IPv4 and IPv6 checks fail, prompt the user + if [[ $ipv4_connected == false && $ipv6_connected == false ]]; then + read -r -p "No Internet detected, would you like to continue anyway? " prompt + if [[ "${prompt,,}" =~ ^(y|yes)$ ]]; then + echo -e "${INFO}${RD}Expect Issues Without Internet${CL}" + else + echo -e "${NETWORK}Check Network Settings" + exit 122 + fi + fi + + # DNS resolution checks for GitHub-related domains (IPv4 and/or IPv6) + GIT_HOSTS=("github.com" "raw.githubusercontent.com" "api.github.com" "git.community-scripts.org") + GIT_STATUS="Git DNS:" + DNS_FAILED=false + + for HOST in "${GIT_HOSTS[@]}"; do + RESOLVEDIP=$(getent hosts "$HOST" | awk '{ print $1 }' | grep -E '(^([0-9]{1,3}\.){3}[0-9]{1,3}$)|(^[a-fA-F0-9:]+$)' | head -n1) + if [[ -z "$RESOLVEDIP" ]]; then + GIT_STATUS+="$HOST:($DNSFAIL)" + DNS_FAILED=true + else + GIT_STATUS+=" $HOST:($DNSOK)" + fi + done + + if [[ "$DNS_FAILED" == true ]]; then + fatal "$GIT_STATUS" + else + msg_ok "$GIT_STATUS" + fi + + set -e + trap 'error_handler' ERR +} + +# ============================================================================== +# SECTION 3: OS UPDATE & PACKAGE MANAGEMENT +# ============================================================================== + +# ------------------------------------------------------------------------------ +# update_os() +# +# - Updates container OS via apt-get update and dist-upgrade +# - Configures APT cacher proxy if CACHER=yes (accelerates package downloads) +# - Removes Python EXTERNALLY-MANAGED restrictions for pip +# - Sources tools.func for additional setup functions after update +# - Uses $STD wrapper to suppress output unless VERBOSE=yes +# ------------------------------------------------------------------------------ +update_os() { + msg_info "Updating Container OS" + if [[ "$CACHER" == "yes" ]]; then + echo 'Acquire::http::Proxy-Auto-Detect "/usr/local/bin/apt-proxy-detect.sh";' >/etc/apt/apt.conf.d/00aptproxy + cat </usr/local/bin/apt-proxy-detect.sh +#!/bin/bash +if nc -w1 -z "${CACHER_IP}" 3142; then + echo -n "http://${CACHER_IP}:3142" +else + echo -n "DIRECT" +fi +EOF + chmod +x /usr/local/bin/apt-proxy-detect.sh + fi + apt_update_safe + $STD apt-get -o Dpkg::Options::="--force-confold" -y dist-upgrade + rm -rf /usr/lib/python3.*/EXTERNALLY-MANAGED + msg_ok "Updated Container OS" + post_progress_to_api + + local tools_content + tools_content=$(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/tools.func) || { + msg_error "Failed to download tools.func" + exit 115 + } + source /dev/stdin <<<"$tools_content" + if ! declare -f fetch_and_deploy_gh_release >/dev/null 2>&1; then + msg_error "tools.func loaded but incomplete — missing expected functions" + exit 115 + fi +} + +# ============================================================================== +# SECTION 4: MOTD & SSH CONFIGURATION +# ============================================================================== + +# ------------------------------------------------------------------------------ +# motd_ssh() +# +# - Configures Message of the Day (MOTD) with container information +# - Creates /etc/profile.d/00_lxc-details.sh with: +# * Application name +# * Warning banner (DEV repository) +# * OS name and version +# * Hostname and IP address +# * GitHub repository link +# - Disables executable flag on /etc/update-motd.d/* scripts +# - Enables root SSH access if SSH_ROOT=yes +# - Configures TERM environment variable for better terminal support +# ------------------------------------------------------------------------------ +motd_ssh() { + # Set terminal to 256-color mode + grep -qxF "export TERM='xterm-256color'" /root/.bashrc || echo "export TERM='xterm-256color'" >>/root/.bashrc + + PROFILE_FILE="/etc/profile.d/00_lxc-details.sh" + echo "echo -e \"\"" >"$PROFILE_FILE" + echo -e "echo -e \"${BOLD}${APPLICATION} LXC Container${CL}"\" >>"$PROFILE_FILE" + echo -e "echo -e \"${TAB}${GATEWAY}${YW} Provided by: ${GN}community-scripts ORG ${YW}| GitHub: ${GN}https://github.com/community-scripts/ProxmoxVE${CL}\"" >>"$PROFILE_FILE" + echo "echo \"\"" >>"$PROFILE_FILE" + echo -e "echo -e \"${TAB}${OS}${YW} OS: ${GN}\$(grep ^NAME /etc/os-release | cut -d= -f2 | tr -d '\"') - Version: \$(grep ^VERSION_ID /etc/os-release | cut -d= -f2 | tr -d '\"')${CL}\"" >>"$PROFILE_FILE" + echo -e "echo -e \"${TAB}${HOSTNAME}${YW} Hostname: ${GN}\$(hostname)${CL}\"" >>"$PROFILE_FILE" + echo -e "echo -e \"${TAB}${INFO}${YW} IP Address: ${GN}\$(hostname -I | awk '{print \$1}')${CL}\"" >>"$PROFILE_FILE" + + # Disable default MOTD scripts + chmod -x /etc/update-motd.d/* + + if [[ "${SSH_ROOT}" == "yes" ]]; then + sed -i "s/#PermitRootLogin prohibit-password/PermitRootLogin yes/g" /etc/ssh/sshd_config + systemctl restart sshd + fi + post_progress_to_api +} + +# ============================================================================== +# SECTION 5: CONTAINER CUSTOMIZATION +# ============================================================================== + +# ------------------------------------------------------------------------------ +# customize() +# +# - Customizes container for passwordless root login if PASSWORD is empty +# - Configures getty for auto-login via /etc/systemd/system/container-getty@1.service.d/override.conf +# - Creates /usr/bin/update script for easy application updates +# - Injects SSH authorized keys if SSH_AUTHORIZED_KEY variable is set +# - Sets proper permissions on SSH directories and key files +# ------------------------------------------------------------------------------ +customize() { + if [[ "$PASSWORD" == "" ]]; then + msg_info "Customizing Container" + GETTY_OVERRIDE="/etc/systemd/system/container-getty@1.service.d/override.conf" + mkdir -p $(dirname $GETTY_OVERRIDE) + cat <$GETTY_OVERRIDE + [Service] + ExecStart= + ExecStart=-/sbin/agetty --autologin root --noclear --keep-baud tty%I 115200,38400,9600 \$TERM +EOF + systemctl daemon-reload + systemctl restart $(basename $(dirname $GETTY_OVERRIDE) | sed 's/\.d//') + msg_ok "Customized Container" + fi + echo "bash -c \"\$(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/ct/${app}.sh)\"" >/usr/bin/update + chmod +x /usr/bin/update + + if [[ -n "${SSH_AUTHORIZED_KEY}" ]]; then + mkdir -p /root/.ssh + echo "${SSH_AUTHORIZED_KEY}" >/root/.ssh/authorized_keys + chmod 700 /root/.ssh + chmod 600 /root/.ssh/authorized_keys + fi + post_progress_to_api +} diff --git a/misc/tools.func b/misc/tools.func new file mode 100644 index 0000000..253222b --- /dev/null +++ b/misc/tools.func @@ -0,0 +1,8395 @@ +#!/bin/bash + +# ============================================================================== +# HELPER FUNCTIONS FOR PACKAGE MANAGEMENT +# ============================================================================== +# +# This file provides unified helper functions for robust package installation +# and repository management across Debian/Ubuntu OS upgrades. +# +# Key Features: +# - Automatic retry logic for transient APT/network failures +# - Unified keyring cleanup from all 3 locations +# - Legacy installation cleanup (nvm, rbenv, rustup) +# - OS-upgrade-safe repository preparation +# - Service pattern matching for multi-version tools +# - Debug mode for troubleshooting (TOOLS_DEBUG=true) +# +# Usage in install scripts: +# source /dev/stdin <<< "$FUNCTIONS" # Load from build.func +# prepare_repository_setup "mysql" +# install_packages_with_retry "mysql-server" "mysql-client" +# +# Quick Reference (Core Helpers): +# cleanup_tool_keyrings() - Remove keyrings from all 3 locations +# stop_all_services() - Stop services by pattern (e.g. "php*-fpm") +# verify_tool_version() - Validate installed version matches expected +# cleanup_legacy_install() - Remove nvm, rbenv, rustup, etc. +# prepare_repository_setup() - Cleanup repos + keyrings + validate APT +# install_packages_with_retry() - Install with 3 retries and APT refresh +# upgrade_packages_with_retry() - Upgrade with 3 retries and APT refresh +# curl_with_retry() - Curl with retry logic and timeouts +# +# Debug Mode: +# TOOLS_DEBUG=true ./script.sh - Enable verbose output for troubleshooting +# +# ============================================================================== + +# ------------------------------------------------------------------------------ +# Debug helper - outputs to stderr when TOOLS_DEBUG is enabled +# Usage: debug_log "message" +# ------------------------------------------------------------------------------ +debug_log() { + if [[ "${TOOLS_DEBUG:-false}" == "true" || "${TOOLS_DEBUG:-0}" == "1" || "${DEBUG:-0}" == "1" ]]; then + echo "[DEBUG] $*" >&2 + fi +} + +# ------------------------------------------------------------------------------ +# Robust curl wrapper with retry logic, timeouts, and error handling +# +# Usage: +# curl_with_retry "https://example.com/file" "/tmp/output" +# curl_with_retry "https://api.github.com/..." "-" | jq . +# CURL_RETRIES=5 curl_with_retry "https://slow.server/file" "/tmp/out" +# +# Parameters: +# $1 - URL to download +# $2 - Output file path (use "-" for stdout) +# $3 - (optional) Additional curl options as string +# +# Variables: +# CURL_RETRIES - Number of retries (default: 3) +# CURL_TIMEOUT - Max time per attempt in seconds (default: 60) +# CURL_CONNECT_TO - Connection timeout in seconds (default: 10) +# +# Returns: 0 on success, 1 on failure after all retries +# ------------------------------------------------------------------------------ +curl_with_retry() { + local url="$1" + local output="${2:--}" + local extra_opts="${3:-}" + local retries="${CURL_RETRIES:-3}" + local timeout="${CURL_TIMEOUT:-60}" + local connect_timeout="${CURL_CONNECT_TO:-10}" + + local attempt=1 + local success=false + local backoff=1 + + # Extract hostname for DNS pre-check + local host + host=$(echo "$url" | sed -E 's|^https?://([^/:]+).*|\1|') + + # DNS pre-check - fail fast if host is unresolvable + if ! getent hosts "$host" &>/dev/null; then + debug_log "DNS resolution failed for $host" + return 1 + fi + + while [[ $attempt -le $retries ]]; do + debug_log "curl attempt $attempt/$retries: $url" + + local curl_cmd="curl -fsSL --connect-timeout $connect_timeout --max-time $timeout" + [[ -n "$extra_opts" ]] && curl_cmd="$curl_cmd $extra_opts" + + if [[ "$output" == "-" ]]; then + if $curl_cmd "$url"; then + success=true + break + fi + else + if $curl_cmd -o "$output" "$url"; then + success=true + break + fi + fi + + debug_log "curl attempt $attempt failed (timeout=${timeout}s), waiting ${backoff}s before retry..." + sleep "$backoff" + # Exponential backoff: 1, 2, 4, 8... capped at 30s + backoff=$((backoff * 2)) + ((backoff > 30)) && backoff=30 + # Double --max-time on each retry so slow connections can finish + timeout=$((timeout * 2)) + ((attempt++)) + done + + if [[ "$success" == "true" ]]; then + debug_log "curl successful: $url" + return 0 + else + debug_log "curl FAILED after $retries attempts: $url" + return 1 + fi +} + +# ------------------------------------------------------------------------------ +# Robust curl wrapper for API calls (returns HTTP code + body) +# +# Usage: +# response=$(curl_api_with_retry "https://api.github.com/repos/owner/repo/releases/latest") +# http_code=$(curl_api_with_retry "https://api.github.com/..." "/tmp/body.json") +# +# Parameters: +# $1 - URL to call +# $2 - (optional) Output file for body (default: stdout) +# $3 - (optional) Additional curl options as string +# +# Returns: HTTP status code, body in file or stdout +# ------------------------------------------------------------------------------ +curl_api_with_retry() { + local url="$1" + local body_file="${2:-}" + local extra_opts="${3:-}" + local retries="${CURL_RETRIES:-3}" + local timeout="${CURL_TIMEOUT:-60}" + local connect_timeout="${CURL_CONNECT_TO:-10}" + + local attempt=1 + local http_code="" + + while [[ $attempt -le $retries ]]; do + debug_log "curl API attempt $attempt/$retries: $url" + + local curl_cmd="curl -fsSL --connect-timeout $connect_timeout --max-time $timeout -w '%{http_code}'" + [[ -n "$extra_opts" ]] && curl_cmd="$curl_cmd $extra_opts" + + if [[ -n "$body_file" ]]; then + http_code=$($curl_cmd -o "$body_file" "$url" 2>/dev/null) || true + else + # Capture body and http_code separately + local tmp_body="/tmp/curl_api_body_$$" + http_code=$($curl_cmd -o "$tmp_body" "$url" 2>/dev/null) || true + if [[ -f "$tmp_body" ]]; then + cat "$tmp_body" + rm -f "$tmp_body" + fi + fi + + # Success on 2xx codes + if [[ "$http_code" =~ ^2[0-9]{2}$ ]]; then + debug_log "curl API successful: $url (HTTP $http_code)" + echo "$http_code" + return 0 + fi + + debug_log "curl API attempt $attempt failed (HTTP $http_code, timeout=${timeout}s), waiting ${attempt}s..." + sleep "$attempt" + # Double --max-time on each retry so slow connections can finish + timeout=$((timeout * 2)) + ((attempt++)) + done + + debug_log "curl API FAILED after $retries attempts: $url" + echo "$http_code" + return 1 +} + +# ------------------------------------------------------------------------------ +# Download and install GPG key with retry logic and validation +# +# Usage: +# download_gpg_key "https://example.com/key.gpg" "/etc/apt/keyrings/example.gpg" +# download_gpg_key "https://example.com/key.asc" "/etc/apt/keyrings/example.gpg" "dearmor" +# +# Parameters: +# $1 - URL to GPG key +# $2 - Output path for keyring file +# $3 - (optional) "dearmor" to convert ASCII-armored key to binary +# +# Features: +# - Auto-detects key format (binary vs armored) +# - Validates downloaded key +# - Multiple mirror fallback support +# +# Returns: 0 on success, 1 on failure +# ------------------------------------------------------------------------------ +download_gpg_key() { + local url="$1" + local output="$2" + local mode="${3:-auto}" # auto, dearmor, or binary + local retries="${CURL_RETRIES:-3}" + local timeout="${CURL_TIMEOUT:-30}" + local temp_key + temp_key=$(mktemp) + + mkdir -p "$(dirname "$output")" + + local attempt=1 + while [[ $attempt -le $retries ]]; do + debug_log "GPG key download attempt $attempt/$retries: $url" + + # Download to temp file first + if ! curl -fsSL --connect-timeout 10 --max-time "$timeout" -o "$temp_key" "$url" 2>/dev/null; then + debug_log "GPG key download attempt $attempt failed, waiting ${attempt}s..." + sleep "$attempt" + ((attempt++)) + continue + fi + + # Auto-detect key format if mode is auto + if [[ "$mode" == "auto" ]]; then + if file "$temp_key" 2>/dev/null | grep -qi "pgp\\|gpg\\|public key"; then + mode="binary" + elif grep -q "BEGIN PGP" "$temp_key" 2>/dev/null; then + mode="dearmor" + else + # Try to detect by extension + [[ "$url" == *.asc || "$url" == *.txt ]] && mode="dearmor" || mode="binary" + fi + fi + + # Process based on mode + if [[ "$mode" == "dearmor" ]]; then + if gpg --dearmor --yes -o "$output" <"$temp_key" 2>/dev/null; then + rm -f "$temp_key" + debug_log "GPG key installed (dearmored): $output" + return 0 + fi + else + if mv "$temp_key" "$output" 2>/dev/null; then + chmod 644 "$output" + debug_log "GPG key installed: $output" + return 0 + fi + fi + + debug_log "GPG key processing attempt $attempt failed" + sleep "$attempt" + ((attempt++)) + done + + rm -f "$temp_key" + debug_log "GPG key download FAILED after $retries attempts: $url" + return 1 +} + +# ------------------------------------------------------------------------------ +# Cache installed version to avoid repeated checks +# ------------------------------------------------------------------------------ +cache_installed_version() { + local app="$1" + local version="$2" + mkdir -p /var/cache/app-versions + echo "$version" >"/var/cache/app-versions/${app}_version.txt" +} + +get_cached_version() { + local app="$1" + mkdir -p /var/cache/app-versions + if [[ -f "/var/cache/app-versions/${app}_version.txt" ]]; then + cat "/var/cache/app-versions/${app}_version.txt" + return 0 + fi + return 0 +} + +# ------------------------------------------------------------------------------ +# Clean up ALL keyring locations for a tool (unified helper) +# Usage: cleanup_tool_keyrings "mariadb" "mysql" "postgresql" +# ------------------------------------------------------------------------------ +cleanup_tool_keyrings() { + local tool_patterns=("$@") + + for pattern in "${tool_patterns[@]}"; do + rm -f /usr/share/keyrings/${pattern}*.gpg \ + /etc/apt/keyrings/${pattern}*.gpg \ + /etc/apt/trusted.gpg.d/${pattern}*.gpg 2>/dev/null || true + done +} + +# ------------------------------------------------------------------------------ +# Stop and disable all service instances matching a pattern +# Usage: stop_all_services "php*-fpm" "mysql" "mariadb" +# ------------------------------------------------------------------------------ +stop_all_services() { + local service_patterns=("$@") + + for pattern in "${service_patterns[@]}"; do + # Find all matching services (grep || true to handle no matches) + local services + services=$(systemctl list-units --type=service --all 2>/dev/null | + grep -oE "${pattern}[^ ]*\.service" 2>/dev/null | sort -u) || true + + if [[ -n "$services" ]]; then + while read -r service; do + $STD systemctl stop "$service" 2>/dev/null || true + $STD systemctl disable "$service" 2>/dev/null || true + done <<<"$services" + fi + done + +} + +# ------------------------------------------------------------------------------ +# Verify installed tool version matches expected version +# Returns: 0 if match, 1 if mismatch (with warning) +# Usage: verify_tool_version "nodejs" "22" "$(node -v | grep -oP '^v\K[0-9]+')" +# ------------------------------------------------------------------------------ +verify_tool_version() { + local tool_name="$1" + local expected_version="$2" + local installed_version="$3" + + # Extract major version for comparison + local expected_major="${expected_version%%.*}" + local installed_major="${installed_version%%.*}" + + if [[ "$installed_major" != "$expected_major" ]]; then + msg_warn "$tool_name version mismatch: expected $expected_version, got $installed_version" + return 1 + fi + + return 0 +} + +# ------------------------------------------------------------------------------ +# Clean up legacy installation methods (nvm, rbenv, rustup, etc.) +# Usage: cleanup_legacy_install "nodejs" -> removes nvm +# ------------------------------------------------------------------------------ +cleanup_legacy_install() { + local tool_name="$1" + + case "$tool_name" in + nodejs | node) + if [[ -d "$HOME/.nvm" ]]; then + msg_info "Removing legacy nvm installation" + rm -rf "$HOME/.nvm" "$HOME/.npm" "$HOME/.bower" "$HOME/.config/yarn" 2>/dev/null || true + sed -i '/NVM_DIR/d' "$HOME/.bashrc" "$HOME/.profile" 2>/dev/null || true + msg_ok "Legacy nvm installation removed" + fi + ;; + ruby) + if [[ -d "$HOME/.rbenv" ]]; then + msg_info "Removing legacy rbenv installation" + rm -rf "$HOME/.rbenv" 2>/dev/null || true + sed -i '/rbenv/d' "$HOME/.bashrc" "$HOME/.profile" 2>/dev/null || true + msg_ok "Legacy rbenv installation removed" + fi + ;; + rust) + if [[ -d "$HOME/.cargo" ]] || [[ -d "$HOME/.rustup" ]]; then + msg_info "Removing legacy rustup installation" + rm -rf "$HOME/.cargo" "$HOME/.rustup" 2>/dev/null || true + sed -i '/cargo/d' "$HOME/.bashrc" "$HOME/.profile" 2>/dev/null || true + msg_ok "Legacy rustup installation removed" + fi + ;; + go | golang) + if [[ -d "$HOME/go" ]]; then + msg_info "Removing legacy Go workspace" + # Keep user code, just remove GOPATH env + sed -i '/GOPATH/d' "$HOME/.bashrc" "$HOME/.profile" 2>/dev/null || true + msg_ok "Legacy Go workspace cleaned" + fi + ;; + esac +} + +# ------------------------------------------------------------------------------ +# Unified repository preparation before setup +# Cleans up old repos, keyrings, and ensures APT is working +# Usage: prepare_repository_setup "mariadb" "mysql" +# ------------------------------------------------------------------------------ +prepare_repository_setup() { + local repo_names=("$@") + + # Clean up all old repository files + for repo in "${repo_names[@]}"; do + cleanup_old_repo_files "$repo" + done + + # Clean up all keyrings + cleanup_tool_keyrings "${repo_names[@]}" + + # Ensure APT is in working state + ensure_apt_working || return 1 + + return 0 +} + +# ------------------------------------------------------------------------------ +# Install packages with retry logic +# Usage: install_packages_with_retry "mysql-server" "mysql-client" +# Features: +# - Automatic dpkg recovery on failure +# - Individual package fallback if batch fails +# - Dependency resolution with apt-get -f install +# ------------------------------------------------------------------------------ +install_packages_with_retry() { + local packages=("$@") + local max_retries=3 + local retry=0 + + # Pre-check: ensure dpkg is not in a broken state + if dpkg --audit 2>&1 | grep -q .; then + $STD dpkg --configure -a 2>/dev/null || true + fi + + while [[ $retry -le $max_retries ]]; do + if DEBIAN_FRONTEND=noninteractive $STD apt install -y \ + -o Dpkg::Options::="--force-confdef" \ + -o Dpkg::Options::="--force-confold" \ + "${packages[@]}" 2>/dev/null; then + return 0 + fi + + retry=$((retry + 1)) + if [[ $retry -le $max_retries ]]; then + msg_warn "Package installation failed, retrying ($retry/$max_retries)..." + + # Progressive recovery steps based on retry count + case $retry in + 1) + # First retry: just fix dpkg and update + $STD dpkg --configure -a 2>/dev/null || true + $STD apt update 2>/dev/null || true + ;; + 2) + # Second retry: fix broken dependencies + $STD apt --fix-broken install -y 2>/dev/null || true + $STD apt update 2>/dev/null || true + ;; + 3) + # Third retry: try installing packages one by one + local failed=() + for pkg in "${packages[@]}"; do + if ! $STD apt install -y "$pkg" 2>/dev/null; then + # Try with --fix-missing + if ! $STD apt install -y --fix-missing "$pkg" 2>/dev/null; then + failed+=("$pkg") + fi + fi + done + # If some packages installed, consider partial success + if [[ ${#failed[@]} -lt ${#packages[@]} ]]; then + if [[ ${#failed[@]} -gt 0 ]]; then + msg_warn "Partially installed. Failed packages: ${failed[*]}" + fi + return 0 + fi + ;; + esac + + sleep $((retry * 2)) + fi + done + + msg_error "Failed to install packages after $((max_retries + 1)) attempts: ${packages[*]}" + return 1 +} + +# ------------------------------------------------------------------------------ +# Upgrade specific packages with retry logic +# Usage: upgrade_packages_with_retry "mariadb-server" "mariadb-client" +# ------------------------------------------------------------------------------ +upgrade_packages_with_retry() { + local packages=("$@") + local max_retries=2 + local retry=0 + + while [[ $retry -le $max_retries ]]; do + if DEBIAN_FRONTEND=noninteractive $STD apt install --only-upgrade -y \ + -o Dpkg::Options::="--force-confdef" \ + -o Dpkg::Options::="--force-confold" \ + "${packages[@]}" 2>/dev/null; then + return 0 + fi + + retry=$((retry + 1)) + if [[ $retry -le $max_retries ]]; then + msg_warn "Package upgrade failed, retrying ($retry/$max_retries)..." + sleep 2 + # Fix any interrupted dpkg operations before retry + $STD dpkg --configure -a 2>/dev/null || true + $STD apt update 2>/dev/null || true + fi + done + + msg_error "Failed to upgrade packages after $((max_retries + 1)) attempts: ${packages[*]}" + return 1 +} + +# ------------------------------------------------------------------------------ +# Check if tool is already installed and optionally verify exact version +# Returns: 0 if installed (with optional version match), 1 if not installed +# Usage: is_tool_installed "mariadb" "11.4" || echo "Not installed" +# ------------------------------------------------------------------------------ +is_tool_installed() { + local tool_name="$1" + local required_version="${2:-}" + local installed_version="" + + case "$tool_name" in + mariadb) + if command -v mariadb >/dev/null 2>&1; then + installed_version=$(mariadb --version 2>/dev/null | grep -oE '[0-9]+\.[0-9]+\.[0-9]+' | head -1) + fi + ;; + mysql) + if command -v mysql >/dev/null 2>&1; then + installed_version=$(mysql --version 2>/dev/null | grep -oE '[0-9]+\.[0-9]+\.[0-9]+' | head -1) + fi + ;; + mongodb | mongod) + if command -v mongod >/dev/null 2>&1; then + installed_version=$(mongod --version 2>/dev/null | awk '/db version/{print $3}' | cut -d. -f1,2) + fi + ;; + node | nodejs) + if command -v node >/dev/null 2>&1; then + installed_version=$(node -v 2>/dev/null | grep -oP '^v\K[0-9]+') + fi + ;; + php) + if command -v php >/dev/null 2>&1; then + installed_version=$(php -v 2>/dev/null | awk '/^PHP/{print $2}' | cut -d. -f1,2) + fi + ;; + postgres | postgresql) + if command -v psql >/dev/null 2>&1; then + installed_version=$(psql --version 2>/dev/null | awk '{print $3}' | cut -d. -f1) + fi + ;; + ruby) + if command -v ruby >/dev/null 2>&1; then + installed_version=$(ruby --version 2>/dev/null | awk '{print $2}' | cut -d. -f1,2) + fi + ;; + rust | rustc) + if command -v rustc >/dev/null 2>&1; then + installed_version=$(rustc --version 2>/dev/null | awk '{print $2}') + fi + ;; + go | golang) + if command -v go >/dev/null 2>&1; then + installed_version=$(go version 2>/dev/null | awk '{print $3}' | sed 's/go//') + fi + ;; + clickhouse) + if command -v clickhouse >/dev/null 2>&1; then + installed_version=$(clickhouse --version 2>/dev/null | awk '{print $2}') + fi + ;; + esac + + if [[ -z "$installed_version" ]]; then + return 1 # Not installed + fi + + if [[ -n "$required_version" && "$installed_version" != "$required_version" ]]; then + echo "$installed_version" + return 1 # Version mismatch + fi + + echo "$installed_version" + return 0 # Installed and version matches (if specified) +} + +# ------------------------------------------------------------------------------ +# Remove old tool version completely (purge + cleanup repos) +# Usage: remove_old_tool_version "mariadb" "repository-name" +# ------------------------------------------------------------------------------ +remove_old_tool_version() { + local tool_name="$1" + local repo_name="${2:-$tool_name}" + + case "$tool_name" in + mariadb) + stop_all_services "mariadb" + $STD apt purge -y 'mariadb*' >/dev/null 2>&1 || true + cleanup_tool_keyrings "mariadb" + ;; + mysql) + stop_all_services "mysql" + $STD apt purge -y 'mysql*' >/dev/null 2>&1 || true + rm -rf /var/lib/mysql 2>/dev/null || true + cleanup_tool_keyrings "mysql" + ;; + mongodb) + stop_all_services "mongod" + $STD apt purge -y 'mongodb*' >/dev/null 2>&1 || true + rm -rf /var/lib/mongodb 2>/dev/null || true + cleanup_tool_keyrings "mongodb" + ;; + node | nodejs) + $STD apt purge -y nodejs npm >/dev/null 2>&1 || true + # Clean up npm global modules + if command -v npm >/dev/null 2>&1; then + npm list -g 2>/dev/null | grep -oE '^ \S+' | awk '{print $1}' 2>/dev/null | while read -r module; do + npm uninstall -g "$module" >/dev/null 2>&1 || true + done || true + fi + cleanup_legacy_install "nodejs" + cleanup_tool_keyrings "nodesource" + ;; + php) + stop_all_services "php.*-fpm" + $STD apt purge -y 'php*' >/dev/null 2>&1 || true + rm -rf /etc/php 2>/dev/null || true + cleanup_tool_keyrings "deb.sury.org-php" "php" + ;; + postgresql) + stop_all_services "postgresql" + $STD apt purge -y 'postgresql*' >/dev/null 2>&1 || true + # Keep data directory for safety (can be removed manually if needed) + # rm -rf /var/lib/postgresql 2>/dev/null || true + cleanup_tool_keyrings "postgresql" "pgdg" + ;; + java) + $STD apt purge -y 'temurin*' 'adoptium*' 'openjdk*' >/dev/null 2>&1 || true + cleanup_tool_keyrings "adoptium" + ;; + ruby) + cleanup_legacy_install "ruby" + $STD apt purge -y 'ruby*' >/dev/null 2>&1 || true + ;; + rust) + cleanup_legacy_install "rust" + ;; + go | golang) + rm -rf /usr/local/go 2>/dev/null || true + cleanup_legacy_install "golang" + ;; + clickhouse) + stop_all_services "clickhouse-server" + $STD apt purge -y 'clickhouse*' >/dev/null 2>&1 || true + rm -rf /var/lib/clickhouse 2>/dev/null || true + cleanup_tool_keyrings "clickhouse" + ;; + esac + + # Clean up old repository files (both .list and .sources) + cleanup_old_repo_files "$repo_name" + + return 0 +} + +# ------------------------------------------------------------------------------ +# Determine if tool update/upgrade is needed +# Returns: 0 (update needed), 1 (already up-to-date) +# Usage: if should_update_tool "mariadb" "11.4"; then ... fi +# ------------------------------------------------------------------------------ +should_update_tool() { + local tool_name="$1" + local target_version="$2" + local current_version="" + + # Get currently installed version + current_version=$(is_tool_installed "$tool_name" 2>/dev/null) || return 0 # Not installed = needs install + + # If versions are identical, no update needed + if [[ "$current_version" == "$target_version" ]]; then + return 1 # No update needed + fi + + return 0 # Update needed +} + +# ------------------------------------------------------------------------------ +# Unified repository management for tools +# Handles adding, updating, and verifying tool repositories +# Usage: manage_tool_repository "mariadb" "11.4" "https://repo..." "GPG_key_url" +# Supports: mariadb, mongodb, nodejs, postgresql, php, mysql +# ------------------------------------------------------------------------------ +manage_tool_repository() { + local tool_name="$1" + local version="$2" + local repo_url="$3" + local gpg_key_url="${4:-}" + local distro_id repo_component suite + + distro_id=$(awk -F= '/^ID=/{print $2}' /etc/os-release | tr -d '"') + + case "$tool_name" in + mariadb) + if [[ -z "$repo_url" || -z "$gpg_key_url" ]]; then + msg_error "MariaDB repository requires repo_url and gpg_key_url" + return 1 + fi + + # Clean old repos first + cleanup_old_repo_files "mariadb" + + # Get suite for fallback handling + local distro_codename + distro_codename=$(awk -F= '/^VERSION_CODENAME=/{print $2}' /etc/os-release) + suite=$(get_fallback_suite "$distro_id" "$distro_codename" "$repo_url/$distro_id") + + # Setup new repository using deb822 format + setup_deb822_repo \ + "mariadb" \ + "$gpg_key_url" \ + "$repo_url/$distro_id" \ + "$suite" \ + "main" + return 0 + ;; + + mongodb) + if [[ -z "$repo_url" || -z "$gpg_key_url" ]]; then + msg_error "MongoDB repository requires repo_url and gpg_key_url" + return 1 + fi + + # Clean old repos first + cleanup_old_repo_files "mongodb" + + # Import GPG key with retry logic + if ! download_gpg_key "$gpg_key_url" "/etc/apt/keyrings/mongodb-server-${version}.gpg" "dearmor"; then + msg_error "Failed to download MongoDB GPG key" + return 1 + fi + chmod 644 "/etc/apt/keyrings/mongodb-server-${version}.gpg" + + # Setup repository + local distro_codename + distro_codename=$(awk -F= '/^VERSION_CODENAME=/{print $2}' /etc/os-release) + + # Suite mapping with fallback for newer releases not yet supported by upstream + if [[ "$distro_id" == "debian" ]]; then + case "$distro_codename" in + trixie | forky | sid) + # Testing/unstable releases fallback to latest stable suite + suite="bookworm" + ;; + bookworm) + suite="bookworm" + ;; + bullseye) + suite="bullseye" + ;; + *) + # Unknown release: fallback to latest stable suite + msg_warn "Unknown Debian release '${distro_codename}', using bookworm" + suite="bookworm" + ;; + esac + elif [[ "$distro_id" == "ubuntu" ]]; then + case "$distro_codename" in + oracular | plucky) + # Newer releases fallback to latest LTS + suite="noble" + ;; + noble) + suite="noble" + ;; + jammy) + suite="jammy" + ;; + focal) + suite="focal" + ;; + *) + # Unknown release: fallback to latest LTS + msg_warn "Unknown Ubuntu release '${distro_codename}', using noble" + suite="noble" + ;; + esac + else + # For other distros, try generic fallback + suite=$(get_fallback_suite "$distro_id" "$distro_codename" "$repo_url") + fi + + repo_component="main" + [[ "$distro_id" == "ubuntu" ]] && repo_component="multiverse" + + cat </etc/apt/sources.list.d/mongodb-org-${version}.sources +Types: deb +URIs: ${repo_url} +Suites: ${suite}/mongodb-org/${version} +Components: ${repo_component} +Architectures: $(dpkg --print-architecture) +Signed-By: /etc/apt/keyrings/mongodb-server-${version}.gpg +EOF + return 0 + ;; + + nodejs) + if [[ -z "$repo_url" || -z "$gpg_key_url" ]]; then + msg_error "Node.js repository requires repo_url and gpg_key_url" + return 1 + fi + + cleanup_old_repo_files "nodesource" + + # NodeSource uses deb822 format with GPG from repo + local distro_codename + distro_codename=$(awk -F= '/^VERSION_CODENAME=/{print $2}' /etc/os-release) + + # Download GPG key from NodeSource with retry logic + if ! download_gpg_key "$gpg_key_url" "/etc/apt/keyrings/nodesource.gpg" "dearmor"; then + msg_error "Failed to import NodeSource GPG key" + return 1 + fi + + cat </etc/apt/sources.list.d/nodesource.sources +Types: deb +URIs: $repo_url +Suites: nodistro +Components: main +Architectures: $(dpkg --print-architecture) +Signed-By: /etc/apt/keyrings/nodesource.gpg +EOF + return 0 + ;; + + php) + if [[ -z "$gpg_key_url" ]]; then + msg_error "PHP repository requires gpg_key_url" + return 1 + fi + + cleanup_old_repo_files "php" + + # Download and install keyring with retry logic + if ! curl_with_retry "$gpg_key_url" "/tmp/debsuryorg-archive-keyring.deb"; then + msg_error "Failed to download PHP keyring" + return 1 + fi + # Don't use /dev/null redirection for dpkg as it may use background processes + dpkg -i /tmp/debsuryorg-archive-keyring.deb >>"$(get_active_logfile)" 2>&1 || { + msg_error "Failed to install PHP keyring" + rm -f /tmp/debsuryorg-archive-keyring.deb + return 1 + } + rm -f /tmp/debsuryorg-archive-keyring.deb + + # Setup repository + local distro_codename + distro_codename=$(awk -F= '/^VERSION_CODENAME=/{print $2}' /etc/os-release) + cat </etc/apt/sources.list.d/php.sources +Types: deb +URIs: https://packages.sury.org/php +Suites: $distro_codename +Components: main +Architectures: $(dpkg --print-architecture) +Signed-By: /usr/share/keyrings/deb.sury.org-php.gpg +EOF + return 0 + ;; + + postgresql) + if [[ -z "$gpg_key_url" ]]; then + msg_error "PostgreSQL repository requires gpg_key_url" + return 1 + fi + + cleanup_old_repo_files "postgresql" + + # Import PostgreSQL key with retry logic + if ! download_gpg_key "$gpg_key_url" "/etc/apt/keyrings/postgresql.gpg" "dearmor"; then + msg_error "Failed to import PostgreSQL GPG key" + return 1 + fi + + # Setup repository + local distro_codename + distro_codename=$(awk -F= '/^VERSION_CODENAME=/{print $2}' /etc/os-release) + cat </etc/apt/sources.list.d/postgresql.sources +Types: deb +URIs: http://apt.postgresql.org/pub/repos/apt +Suites: $distro_codename-pgdg +Components: main +Architectures: $(dpkg --print-architecture) +Signed-By: /etc/apt/keyrings/postgresql.gpg +EOF + return 0 + ;; + + *) + msg_error "Unknown tool repository: $tool_name" + return 1 + ;; + esac + + return 0 +} + +# ------------------------------------------------------------------------------ +# Unified package upgrade function (with apt update caching) +# ------------------------------------------------------------------------------ +upgrade_package() { + local package="$1" + + # Use same caching logic as ensure_dependencies + local apt_cache_file="/var/cache/apt-update-timestamp" + local current_time=$(date +%s) + local last_update=0 + + if [[ -f "$apt_cache_file" ]]; then + last_update=$(cat "$apt_cache_file" 2>/dev/null || echo 0) + fi + + if ((current_time - last_update > 300)); then + $STD apt update || { + msg_warn "APT update failed in upgrade_package - continuing with cached packages" + } + echo "$current_time" >"$apt_cache_file" + fi + + $STD apt install --only-upgrade -y "$package" || { + msg_warn "Failed to upgrade $package" + return 1 + } +} + +# ------------------------------------------------------------------------------ +# Repository availability check with caching +# ------------------------------------------------------------------------------ +# Note: Must use -gA (global) because tools.func is sourced inside update_os() +# function scope. Plain 'declare -A' would create a local variable that gets +# destroyed when update_os() returns, causing "unbound variable" errors later +# when setup_postgresql/verify_repo_available tries to access the cache key. +declare -gA _REPO_CACHE 2>/dev/null || declare -A _REPO_CACHE 2>/dev/null || true + +verify_repo_available() { + local repo_url="$1" + local suite="$2" + local cache_key="${repo_url}|${suite}" + local cache_ttl=300 # 5 minutes + + # Check cache first (avoid repeated HTTP requests) + if [[ -n "${_REPO_CACHE[$cache_key]:-}" ]]; then + local cached_time cached_result + cached_time=$(echo "${_REPO_CACHE[$cache_key]}" | cut -d'|' -f1) + cached_result=$(echo "${_REPO_CACHE[$cache_key]}" | cut -d'|' -f2) + if (($(date +%s) - cached_time < cache_ttl)); then + [[ "$cached_result" == "1" ]] && return 0 || return 1 + fi + fi + + # Perform actual check with short timeout + local result=1 + if curl -fsSL --max-time 5 --connect-timeout 3 "${repo_url}/dists/${suite}/Release" &>/dev/null; then + result=0 + fi + + # Cache the result + _REPO_CACHE[$cache_key]="$(date +%s)|$result" + + return $result +} + +# ------------------------------------------------------------------------------ +# Ensure dependencies are installed (with apt/apk update caching) +# Supports both Debian (apt/dpkg) and Alpine (apk) systems +# ------------------------------------------------------------------------------ +ensure_dependencies() { + local deps=("$@") + local missing=() + + # Detect Alpine Linux + if [[ -f /etc/alpine-release ]]; then + for dep in "${deps[@]}"; do + if command -v "$dep" &>/dev/null; then + continue + fi + if apk info -e "$dep" &>/dev/null; then + continue + fi + missing+=("$dep") + done + + if [[ ${#missing[@]} -gt 0 ]]; then + $STD apk add --no-cache "${missing[@]}" || { + local failed=() + for pkg in "${missing[@]}"; do + if ! $STD apk add --no-cache "$pkg" 2>/dev/null; then + failed+=("$pkg") + fi + done + if [[ ${#failed[@]} -gt 0 ]]; then + msg_error "Failed to install dependencies: ${failed[*]}" + return 1 + fi + } + fi + return 0 + fi + + # Debian/Ubuntu: Fast batch check using dpkg-query + local installed_pkgs + installed_pkgs=$(dpkg-query -W -f='${Package}\n' 2>/dev/null | sort -u) + + for dep in "${deps[@]}"; do + # First check if command exists (for binaries like jq, curl) + if command -v "$dep" &>/dev/null; then + continue + fi + # Then check if package is installed + if echo "$installed_pkgs" | grep -qx "$dep"; then + continue + fi + missing+=("$dep") + done + + if [[ ${#missing[@]} -gt 0 ]]; then + # Only run apt update if not done recently (within last 5 minutes) + local apt_cache_file="/var/cache/apt-update-timestamp" + local current_time + current_time=$(date +%s) + local last_update=0 + + if [[ -f "$apt_cache_file" ]]; then + last_update=$(cat "$apt_cache_file" 2>/dev/null || echo 0) + fi + + if ((current_time - last_update > 300)); then + # Ensure orphaned sources are cleaned before updating + cleanup_orphaned_sources 2>/dev/null || true + + if ! $STD apt update; then + ensure_apt_working || return 1 + fi + echo "$current_time" >"$apt_cache_file" + fi + + $STD apt install -y "${missing[@]}" || { + # Fallback: try installing one by one to identify problematic package + local failed=() + for pkg in "${missing[@]}"; do + if ! $STD apt install -y "$pkg" 2>/dev/null; then + failed+=("$pkg") + fi + done + if [[ ${#failed[@]} -gt 0 ]]; then + msg_error "Failed to install dependencies: ${failed[*]}" + return 1 + fi + } + fi +} + +# ------------------------------------------------------------------------------ +# Smart version comparison +# ------------------------------------------------------------------------------ +version_gt() { + test "$(printf '%s\n' "$@" | sort -V | head -n 1)" != "$1" +} + +# ------------------------------------------------------------------------------ +# Get system architecture (normalized) +# ------------------------------------------------------------------------------ +get_system_arch() { + local arch_type="${1:-dpkg}" # dpkg, uname, or both + local arch + + case "$arch_type" in + dpkg) + arch=$(dpkg --print-architecture 2>/dev/null) + ;; + uname) + arch=$(uname -m) + [[ "$arch" == "x86_64" ]] && arch="amd64" + [[ "$arch" == "aarch64" ]] && arch="arm64" + ;; + both | *) + arch=$(dpkg --print-architecture 2>/dev/null || uname -m) + [[ "$arch" == "x86_64" ]] && arch="amd64" + [[ "$arch" == "aarch64" ]] && arch="arm64" + ;; + esac + + echo "$arch" +} + +# ------------------------------------------------------------------------------ +# Create temporary directory with automatic cleanup +# ------------------------------------------------------------------------------ +create_temp_dir() { + local tmp_dir=$(mktemp -d) + # Set trap to cleanup on EXIT, ERR, INT, TERM + trap "rm -rf '$tmp_dir'" EXIT ERR INT TERM + echo "$tmp_dir" +} + +# ------------------------------------------------------------------------------ +# Check if package is installed (supports both Debian and Alpine) +# ------------------------------------------------------------------------------ +is_package_installed() { + local package="$1" + if [[ -f /etc/alpine-release ]]; then + apk info -e "$package" &>/dev/null + else + dpkg-query -W -f='${Status}' "$package" 2>/dev/null | grep -q "^install ok installed$" + fi +} + +# ------------------------------------------------------------------------------ +# Prompt user to enter a GitHub Personal Access Token (PAT) interactively +# Returns 0 if a valid token was provided, 1 otherwise +# ------------------------------------------------------------------------------ +prompt_for_github_token() { + if [[ ! -t 0 ]]; then + return 1 + fi + + local reply + read -rp "${TAB}Would you like to enter a GitHub Personal Access Token (PAT)? [y/N]: " reply + reply="${reply:-n}" + + if [[ ! "${reply,,}" =~ ^(y|yes)$ ]]; then + return 1 + fi + + local token + while true; do + read -rp "${TAB}Enter your GitHub PAT: " token + # Trim leading/trailing whitespace + token="$(echo "$token" | xargs)" + if [[ -z "$token" ]]; then + msg_warn "Token cannot be empty. Please try again." + continue + fi + if [[ "$token" =~ [[:space:]] ]]; then + msg_warn "Token must not contain spaces. Please try again." + continue + fi + break + done + + export GITHUB_TOKEN="$token" + msg_ok "GitHub token has been set." + return 0 +} + +# ------------------------------------------------------------------------------ +# GitHub API call with authentication and rate limit handling +# ------------------------------------------------------------------------------ +github_api_call() { + local url="$1" + local output_file="${2:-/dev/stdout}" + local max_retries=3 + local retry_delay=2 + + local header_args=() + [[ -n "${GITHUB_TOKEN:-}" ]] && header_args=(-H "Authorization: Bearer $GITHUB_TOKEN") + + local attempt=1 + while ((attempt <= max_retries)); do + local http_code + http_code=$(curl -sSL -w "%{http_code}" -o "$output_file" \ + -H "Accept: application/vnd.github+json" \ + -H "X-GitHub-Api-Version: 2022-11-28" \ + "${header_args[@]}" \ + "$url" 2>/dev/null) || true + + case "$http_code" in + 200) + return 0 + ;; + 401) + msg_error "GitHub API authentication failed (HTTP 401)." + if [[ -n "${GITHUB_TOKEN:-}" ]]; then + msg_error "Your GITHUB_TOKEN appears to be invalid or expired." + else + msg_error "The repository may require authentication." + fi + if prompt_for_github_token; then + header_args=(-H "Authorization: Bearer $GITHUB_TOKEN") + continue + fi + return 1 + ;; + 403) + # Rate limit - check if we can retry + if [[ $attempt -lt $max_retries ]]; then + msg_warn "GitHub API rate limit, waiting ${retry_delay}s... (attempt $attempt/$max_retries)" + sleep "$retry_delay" + retry_delay=$((retry_delay * 2)) + ((attempt++)) + continue + fi + msg_error "GitHub API rate limit exceeded (HTTP 403)." + if prompt_for_github_token; then + header_args=(-H "Authorization: Bearer $GITHUB_TOKEN") + retry_delay=2 + attempt=1 + continue + fi + msg_error "To increase the limit, export a GitHub token before running the script:" + msg_error " export GITHUB_TOKEN=\"ghp_your_token_here\"" + return 1 + ;; + 404) + msg_error "GitHub repository or release not found (HTTP 404): $url" + return 1 + ;; + 000 | "") + if [[ $attempt -lt $max_retries ]]; then + sleep "$retry_delay" + ((attempt++)) + continue + fi + msg_error "GitHub API connection failed (no response)." + msg_error "Check your network/DNS: curl -sSL https://api.github.com/rate_limit" + return 1 + ;; + *) + if [[ $attempt -lt $max_retries ]]; then + sleep "$retry_delay" + ((attempt++)) + continue + fi + msg_error "GitHub API call failed (HTTP $http_code)." + return 1 + ;; + esac + ((attempt++)) + done + + msg_error "GitHub API call failed after ${max_retries} attempts: ${url}" + return 1 +} + +# ------------------------------------------------------------------------------ +# Codeberg API call with retry logic +# ------------------------------------------------------------------------------ +codeberg_api_call() { + local url="$1" + local output_file="${2:-/dev/stdout}" + local max_retries=3 + local retry_delay=2 + + for attempt in $(seq 1 $max_retries); do + local http_code + http_code=$(curl -sSL -w "%{http_code}" -o "$output_file" \ + -H "Accept: application/json" \ + "$url" 2>/dev/null) || true + + case "$http_code" in + 200) + return 0 + ;; + 401) + msg_error "Codeberg API authentication failed (HTTP 401)." + return 1 + ;; + 403) + # Rate limit - retry + if [[ $attempt -lt $max_retries ]]; then + msg_warn "Codeberg API rate limit, waiting ${retry_delay}s... (attempt $attempt/$max_retries)" + sleep "$retry_delay" + retry_delay=$((retry_delay * 2)) + continue + fi + msg_error "Codeberg API rate limit exceeded (HTTP 403)." + return 1 + ;; + 404) + msg_error "Codeberg repository or release not found (HTTP 404): $url" + return 1 + ;; + 000 | "") + if [[ $attempt -lt $max_retries ]]; then + sleep "$retry_delay" + continue + fi + msg_error "Codeberg API connection failed (no response)." + msg_error "Check your network/DNS: curl -sSL https://codeberg.org" + return 1 + ;; + *) + if [[ $attempt -lt $max_retries ]]; then + sleep "$retry_delay" + continue + fi + msg_error "Codeberg API call failed (HTTP $http_code)." + return 1 + ;; + esac + done + + msg_error "Codeberg API call failed after ${max_retries} attempts: ${url}" + return 1 +} + +should_upgrade() { + local current="$1" + local target="$2" + + [[ -z "$current" ]] && return 0 + version_gt "$target" "$current" && return 0 + return 1 +} + +# ------------------------------------------------------------------------------ +# Get OS information (cached for performance) +# ------------------------------------------------------------------------------ +get_os_info() { + local field="${1:-all}" # id, codename, version, version_id, all + + # Cache OS info to avoid repeated file reads + if [[ -z "${_OS_ID:-}" ]]; then + export _OS_ID=$(awk -F= '/^ID=/{gsub(/"/,"",$2); print $2}' /etc/os-release) + export _OS_CODENAME=$(awk -F= '/^VERSION_CODENAME=/{gsub(/"/,"",$2); print $2}' /etc/os-release) + export _OS_VERSION=$(awk -F= '/^VERSION_ID=/{gsub(/"/,"",$2); print $2}' /etc/os-release) + export _OS_VERSION_FULL=$(awk -F= '/^VERSION=/{gsub(/"/,"",$2); print $2}' /etc/os-release) + fi + + case "$field" in + id) echo "$_OS_ID" ;; + codename) echo "$_OS_CODENAME" ;; + version) echo "$_OS_VERSION" ;; + version_id) echo "$_OS_VERSION" ;; + version_full) echo "$_OS_VERSION_FULL" ;; + all) echo "ID=$_OS_ID CODENAME=$_OS_CODENAME VERSION=$_OS_VERSION" ;; + *) echo "$_OS_ID" ;; + esac +} + +# ------------------------------------------------------------------------------ +# Check if running on specific OS +# ------------------------------------------------------------------------------ +is_debian() { + [[ "$(get_os_info id)" == "debian" ]] +} + +is_ubuntu() { + [[ "$(get_os_info id)" == "ubuntu" ]] +} + +is_alpine() { + [[ "$(get_os_info id)" == "alpine" ]] +} + +# ------------------------------------------------------------------------------ +# Get Debian/Ubuntu major version +# ------------------------------------------------------------------------------ +get_os_version_major() { + local version=$(get_os_info version) + echo "${version%%.*}" +} + +# ------------------------------------------------------------------------------ +# Download file with retry logic and progress +# ------------------------------------------------------------------------------ +download_file() { + local url="$1" + local output="$2" + local max_retries="${3:-3}" + local show_progress="${4:-false}" + + local curl_opts=(-fsSL) + [[ "$show_progress" == "true" ]] && curl_opts=(-fL#) + + for attempt in $(seq 1 $max_retries); do + if curl "${curl_opts[@]}" -o "$output" "$url"; then + return 0 + fi + + if [[ $attempt -lt $max_retries ]]; then + msg_warn "Download failed, retrying... (attempt $attempt/$max_retries)" + sleep 2 + fi + done + + msg_error "Failed to download: $url" + return 1 +} + +# ------------------------------------------------------------------------------ +# Get fallback suite for repository (comprehensive mapping) +# ------------------------------------------------------------------------------ +get_fallback_suite() { + local distro_id="$1" + local distro_codename="$2" + local repo_base_url="$3" + + # Check if current codename works + if verify_repo_available "$repo_base_url" "$distro_codename"; then + echo "$distro_codename" + return 0 + fi + + # Build fallback chain based on distro + local fallback_chain=() + case "$distro_id" in + debian) + case "$distro_codename" in + trixie | forky | sid) + fallback_chain=("bookworm" "bullseye") + ;; + bookworm) + fallback_chain=("bookworm" "bullseye") + ;; + bullseye) + fallback_chain=("bullseye" "buster") + ;; + *) + fallback_chain=("bookworm" "bullseye") + ;; + esac + ;; + ubuntu) + case "$distro_codename" in + oracular | plucky) + fallback_chain=("noble" "jammy" "focal") + ;; + noble) + fallback_chain=("noble" "jammy") + ;; + mantic | lunar) + fallback_chain=("jammy" "focal") + ;; + jammy) + fallback_chain=("jammy" "focal") + ;; + focal) + fallback_chain=("focal" "bionic") + ;; + *) + fallback_chain=("jammy" "focal") + ;; + esac + ;; + *) + echo "$distro_codename" + return 0 + ;; + esac + + # Try each fallback suite with actual HTTP check + for suite in "${fallback_chain[@]}"; do + if verify_repo_available "$repo_base_url" "$suite"; then + debug_log "Fallback suite found: $suite for $distro_codename" + echo "$suite" + return 0 + fi + done + + # Last resort: return first fallback without verification + echo "${fallback_chain[0]:-$distro_codename}" + return 0 +} + +# ------------------------------------------------------------------------------ +# Verify package source and version +# ------------------------------------------------------------------------------ +verify_package_source() { + local package="$1" + local expected_version="$2" + + if apt-cache policy "$package" 2>/dev/null | grep -q "$expected_version"; then + return 0 + fi + return 1 +} + +# ------------------------------------------------------------------------------ +# Check if running on LTS version +# ------------------------------------------------------------------------------ +is_lts_version() { + local os_id=$(get_os_info id) + local codename=$(get_os_info codename) + + if [[ "$os_id" == "ubuntu" ]]; then + case "$codename" in + focal | jammy | noble) return 0 ;; # 20.04, 22.04, 24.04 + *) return 1 ;; + esac + elif [[ "$os_id" == "debian" ]]; then + # Debian releases are all "stable" + case "$codename" in + bullseye | bookworm | trixie) return 0 ;; + *) return 1 ;; + esac + fi + + return 1 +} + +# ------------------------------------------------------------------------------ +# Get optimal number of parallel jobs (cached) +# Features: +# - CPU count detection +# - Memory-based limiting (1.5GB per job for safety) +# - Current load awareness +# - Container/VM detection for conservative limits +# ------------------------------------------------------------------------------ +get_parallel_jobs() { + if [[ -z "${_PARALLEL_JOBS:-}" ]]; then + local cpu_count + cpu_count=$(nproc 2>/dev/null || grep -c ^processor /proc/cpuinfo 2>/dev/null || echo 1) + + local mem_mb + mem_mb=$(free -m 2>/dev/null | awk '/^Mem:/{print $2}' || echo 1024) + + # Assume 1.5GB per compilation job for safety margin + local max_by_mem=$((mem_mb / 1536)) + ((max_by_mem < 1)) && max_by_mem=1 + + # Check current system load - reduce jobs if already loaded + local load_1m + load_1m=$(awk '{print int($1)}' /proc/loadavg 2>/dev/null || echo 0) + local available_cpus=$((cpu_count - load_1m)) + ((available_cpus < 1)) && available_cpus=1 + + # Take minimum of: available CPUs, memory-limited, and total CPUs + local max_jobs=$cpu_count + ((max_by_mem < max_jobs)) && max_jobs=$max_by_mem + ((available_cpus < max_jobs)) && max_jobs=$available_cpus + + # Container detection - be more conservative in containers + if [[ -f /.dockerenv ]] || grep -q 'lxc\|docker\|container' /proc/1/cgroup 2>/dev/null; then + # Reduce by 25% in containers to leave headroom + max_jobs=$((max_jobs * 3 / 4)) + ((max_jobs < 1)) && max_jobs=1 + fi + + # Final bounds check + ((max_jobs < 1)) && max_jobs=1 + ((max_jobs > cpu_count)) && max_jobs=$cpu_count + + export _PARALLEL_JOBS=$max_jobs + debug_log "Parallel jobs: $_PARALLEL_JOBS (CPUs: $cpu_count, mem-limit: $max_by_mem, load: $load_1m)" + fi + echo "$_PARALLEL_JOBS" +} + +# ------------------------------------------------------------------------------ +# Get default PHP version for OS +# Updated for latest distro releases +# ------------------------------------------------------------------------------ +get_default_php_version() { + local os_id + os_id=$(get_os_info id) + local os_version + os_version=$(get_os_version_major) + + case "$os_id" in + debian) + case "$os_version" in + 14) echo "8.4" ;; # Debian 14 (Forky) - future + 13) echo "8.3" ;; # Debian 13 (Trixie) + 12) echo "8.2" ;; # Debian 12 (Bookworm) + 11) echo "7.4" ;; # Debian 11 (Bullseye) + *) echo "8.3" ;; # Default to latest stable + esac + ;; + ubuntu) + case "$os_version" in + 26) echo "8.4" ;; # Ubuntu 26.04 - future + 24) echo "8.3" ;; # Ubuntu 24.04 LTS (Noble) + 22) echo "8.1" ;; # Ubuntu 22.04 LTS (Jammy) + 20) echo "7.4" ;; # Ubuntu 20.04 LTS (Focal) + *) echo "8.3" ;; # Default to latest stable + esac + ;; + *) + echo "8.3" + ;; + esac +} + +# ------------------------------------------------------------------------------ +# Get default Python version for OS +# Updated for latest distro releases +# ------------------------------------------------------------------------------ +get_default_python_version() { + local os_id + os_id=$(get_os_info id) + local os_version + os_version=$(get_os_version_major) + + case "$os_id" in + debian) + case "$os_version" in + 14) echo "3.13" ;; # Debian 14 (Forky) - future + 13) echo "3.12" ;; # Debian 13 (Trixie) + 12) echo "3.11" ;; # Debian 12 (Bookworm) + 11) echo "3.9" ;; # Debian 11 (Bullseye) + *) echo "3.12" ;; # Default to latest stable + esac + ;; + ubuntu) + case "$os_version" in + 26) echo "3.13" ;; # Ubuntu 26.04 - future + 24) echo "3.12" ;; # Ubuntu 24.04 LTS + 22) echo "3.10" ;; # Ubuntu 22.04 LTS + 20) echo "3.8" ;; # Ubuntu 20.04 LTS + *) echo "3.12" ;; # Default to latest stable + esac + ;; + *) + echo "3.12" + ;; + esac +} + +# ------------------------------------------------------------------------------ +# Get default Node.js LTS version +# ------------------------------------------------------------------------------ +get_default_nodejs_version() { + # Current LTS as of January 2026 (Node.js 24 LTS) + echo "24" +} + +# ------------------------------------------------------------------------------ +# Check if package manager is locked +# ------------------------------------------------------------------------------ +is_apt_locked() { + if fuser /var/lib/dpkg/lock-frontend &>/dev/null || + fuser /var/lib/apt/lists/lock &>/dev/null || + fuser /var/cache/apt/archives/lock &>/dev/null; then + return 0 + fi + return 1 +} + +# ------------------------------------------------------------------------------ +# Wait for apt to be available +# ------------------------------------------------------------------------------ +wait_for_apt() { + local max_wait="${1:-300}" # 5 minutes default + local waited=0 + + while is_apt_locked; do + if [[ $waited -ge $max_wait ]]; then + msg_error "Timeout waiting for apt to be available" + return 1 + fi + + sleep 5 + waited=$((waited + 5)) + done + + return 0 +} + +# ------------------------------------------------------------------------------ +# Cleanup old repository files (migration helper) +# ------------------------------------------------------------------------------ +cleanup_old_repo_files() { + local app="$1" + + # Remove old-style .list files (including backups) + rm -f /etc/apt/sources.list.d/"${app}"*.list + rm -f /etc/apt/sources.list.d/"${app}"*.list.save + rm -f /etc/apt/sources.list.d/"${app}"*.list.distUpgrade + rm -f /etc/apt/sources.list.d/"${app}"*.list.dpkg-* + + # Remove old GPG keys from trusted.gpg.d + rm -f /etc/apt/trusted.gpg.d/"${app}"*.gpg + + # Remove keyrings from /etc/apt/keyrings + rm -f /etc/apt/keyrings/"${app}"*.gpg + + # Remove ALL .sources files for this app (including the main one) + # This ensures no orphaned .sources files reference deleted keyrings + rm -f /etc/apt/sources.list.d/"${app}"*.sources +} + +# ------------------------------------------------------------------------------ +# Cleanup orphaned .sources files that reference missing keyrings +# This prevents APT signature verification errors +# Call this at the start of any setup function to ensure APT is in a clean state +# ------------------------------------------------------------------------------ +cleanup_orphaned_sources() { + local sources_dir="/etc/apt/sources.list.d" + local keyrings_dir="/etc/apt/keyrings" + + [[ ! -d "$sources_dir" ]] && return 0 + + while IFS= read -r -d '' sources_file; do + local basename_file + basename_file=$(basename "$sources_file") + + # NEVER remove debian.sources - this is the standard Debian repository + if [[ "$basename_file" == "debian.sources" ]]; then + continue + fi + + # Extract Signed-By path from .sources file + local keyring_path + keyring_path=$(grep -E '^Signed-By:' "$sources_file" 2>/dev/null | awk '{print $2}' 2>/dev/null || true) + + # If keyring doesn't exist, remove the .sources file + if [[ -n "$keyring_path" ]] && [[ ! -f "$keyring_path" ]]; then + rm -f "$sources_file" + fi + done < <(find "$sources_dir" -name "*.sources" -print0 2>/dev/null) + + # Also check for broken symlinks in keyrings directory + if [[ -d "$keyrings_dir" ]]; then + find "$keyrings_dir" -type l ! -exec test -e {} \; -delete 2>/dev/null || true + fi +} + +# ------------------------------------------------------------------------------ +# Ensure APT is in a working state before installing packages +# This should be called at the start of any setup function +# Features: +# - Fixes interrupted dpkg operations +# - Removes orphaned sources +# - Handles lock file contention +# - Progressive recovery with fallbacks +# ------------------------------------------------------------------------------ +ensure_apt_working() { + local max_wait=60 # Maximum seconds to wait for apt lock + + # Wait for any existing apt/dpkg processes to finish + local waited=0 + while fuser /var/lib/dpkg/lock-frontend &>/dev/null || + fuser /var/lib/apt/lists/lock &>/dev/null || + fuser /var/cache/apt/archives/lock &>/dev/null; do + if ((waited >= max_wait)); then + msg_warn "APT lock held for ${max_wait}s, attempting to continue anyway" + break + fi + debug_log "Waiting for APT lock (${waited}s)..." + sleep 2 + ((waited += 2)) + done + + # Fix interrupted dpkg operations first + # This can happen if a previous installation was interrupted (e.g., by script error) + if dpkg --audit 2>&1 | grep -q .; then + debug_log "Fixing interrupted dpkg operations" + $STD dpkg --configure -a 2>/dev/null || true + fi + + # Clean up orphaned sources first + cleanup_orphaned_sources + + # Try to update package lists + if ! $STD apt update 2>/dev/null; then + debug_log "First apt update failed, trying recovery steps" + + # Step 1: Clear apt lists cache + rm -rf /var/lib/apt/lists/* 2>/dev/null || true + mkdir -p /var/lib/apt/lists/partial + + # Step 2: Clean up potentially broken sources + cleanup_orphaned_sources + + # Step 3: Try again + if ! $STD apt update 2>/dev/null; then + # Step 4: More aggressive - remove all third-party sources + msg_warn "APT update still failing, removing third-party sources" + find /etc/apt/sources.list.d/ -type f \( -name "*.sources" -o -name "*.list" \) \ + ! -name "debian.sources" -delete 2>/dev/null || true + + # Final attempt + if ! $STD apt update; then + msg_error "Cannot update package lists - APT is critically broken" + return 1 + fi + fi + fi + + return 0 +} + +# ------------------------------------------------------------------------------ +# Standardized deb822 repository setup (with optional Architectures) +# Always runs apt update after repo creation to ensure package availability +# ------------------------------------------------------------------------------ +setup_deb822_repo() { + local name="$1" + local gpg_url="$2" + local repo_url="$3" + local suite="$4" + local component="${5:-main}" + local architectures="${6-}" # optional + local enabled="${7-}" # optional: "true" or "false" + + # Validate required parameters + if [[ -z "$name" || -z "$gpg_url" || -z "$repo_url" || -z "$suite" ]]; then + msg_error "setup_deb822_repo: missing required parameters (name=$name repo=$repo_url suite=$suite)" + return 1 + fi + + # Cleanup + cleanup_old_repo_files "$name" + cleanup_orphaned_sources + + mkdir -p /etc/apt/keyrings || { + msg_error "Failed to create /etc/apt/keyrings" + return 1 + } + + # Import GPG key (auto-detect binary vs ASCII-armored format) + local tmp_gpg + tmp_gpg=$(mktemp) || return 1 + curl -fsSL "$gpg_url" -o "$tmp_gpg" || { + msg_error "Failed to download GPG key for ${name}" + rm -f "$tmp_gpg" + return 1 + } + + if grep -q "BEGIN PGP" "$tmp_gpg" 2>/dev/null; then + # ASCII-armored — dearmor to binary + gpg --dearmor --yes -o "/etc/apt/keyrings/${name}.gpg" <"$tmp_gpg" || { + msg_error "Failed to install GPG key for ${name}" + rm -f "$tmp_gpg" + return 1 + } + else + # Already binary — copy directly + cp -f "$tmp_gpg" "/etc/apt/keyrings/${name}.gpg" || { + msg_error "Failed to install GPG key for ${name}" + rm -f "$tmp_gpg" + return 1 + } + fi + rm -f "$tmp_gpg" + chmod 644 "/etc/apt/keyrings/${name}.gpg" + + # Write deb822 + { + echo "Types: deb" + echo "URIs: $repo_url" + echo "Suites: $suite" + # Flat repositories (suite="./" or absolute path) must not have Components + if [[ "$suite" != "./" && -n "$component" ]]; then + echo "Components: $component" + fi + [[ -n "$architectures" ]] && echo "Architectures: $architectures" + echo "Signed-By: /etc/apt/keyrings/${name}.gpg" + [[ -n "$enabled" ]] && echo "Enabled: $enabled" + } >/etc/apt/sources.list.d/${name}.sources + + $STD apt update || { + msg_warn "apt update failed after adding repository: ${name}" + } +} + +# ------------------------------------------------------------------------------ +# Package version hold/unhold helpers +# ------------------------------------------------------------------------------ +hold_package_version() { + local package="$1" + $STD apt-mark hold "$package" || { + msg_warn "Failed to hold package version: ${package}" + } +} + +unhold_package_version() { + local package="$1" + $STD apt-mark unhold "$package" || { + msg_warn "Failed to unhold package version: ${package}" + } +} + +# ------------------------------------------------------------------------------ +# Safe service restart with verification +# ------------------------------------------------------------------------------ +# ------------------------------------------------------------------------------ +# Safe service restart with retry logic and wait-for-ready +# Usage: safe_service_restart "nginx" [timeout_seconds] +# ------------------------------------------------------------------------------ +safe_service_restart() { + local service="$1" + local timeout="${2:-30}" # Default 30 second timeout + local max_retries=2 + local retry=0 + + while [[ $retry -le $max_retries ]]; do + if systemctl is-active --quiet "$service"; then + $STD systemctl restart "$service" + else + $STD systemctl start "$service" + fi + + # Wait for service to become active with timeout + local waited=0 + while [[ $waited -lt $timeout ]]; do + if systemctl is-active --quiet "$service"; then + return 0 + fi + sleep 1 + ((waited++)) + done + + retry=$((retry + 1)) + if [[ $retry -le $max_retries ]]; then + debug_log "Service $service failed to start, retrying ($retry/$max_retries)..." + # Try to stop completely before retry + systemctl stop "$service" 2>/dev/null || true + sleep 2 + fi + done + + msg_error "Failed to start $service after $max_retries retries" + systemctl status "$service" --no-pager -l 2>/dev/null | head -20 || true + return 1 +} + +# ------------------------------------------------------------------------------ +# Enable and start service (with error handling) +# ------------------------------------------------------------------------------ +enable_and_start_service() { + local service="$1" + + if ! systemctl enable "$service" &>/dev/null; then + msg_error "Failed to enable service: $service" + return 1 + fi + + if ! systemctl start "$service" &>/dev/null; then + msg_error "Failed to start $service" + systemctl status "$service" --no-pager + return 1 + fi + + return 0 +} + +# ------------------------------------------------------------------------------ +# Check if service is enabled +# ------------------------------------------------------------------------------ +is_service_enabled() { + local service="$1" + systemctl is-enabled --quiet "$service" 2>/dev/null +} + +# ------------------------------------------------------------------------------ +# Check if service is running +# ------------------------------------------------------------------------------ +is_service_running() { + local service="$1" + systemctl is-active --quiet "$service" 2>/dev/null +} + +# ------------------------------------------------------------------------------ +# Extract version from JSON (GitHub releases) +# ------------------------------------------------------------------------------ +extract_version_from_json() { + local json="$1" + local field="${2:-tag_name}" + local strip_v="${3:-true}" + + ensure_dependencies jq + + local version + version=$(echo "$json" | jq -r ".${field} // empty") + + if [[ -z "$version" ]]; then + msg_warn "JSON field '${field}' is empty in API response" + return 1 + fi + + if [[ "$strip_v" == "true" ]]; then + echo "${version#v}" + else + echo "$version" + fi +} + +# ------------------------------------------------------------------------------ +# Get latest GitHub release version with fallback to tags +# Usage: get_latest_github_release "owner/repo" [strip_v] [include_prerelease] +# ------------------------------------------------------------------------------ +get_latest_github_release() { + local repo="$1" + local strip_v="${2:-true}" + local temp_file=$(mktemp) + + if ! github_api_call "https://api.github.com/repos/${repo}/releases/latest" "$temp_file"; then + msg_warn "GitHub API call failed for ${repo}" + rm -f "$temp_file" + return 1 + fi + + local version + version=$(extract_version_from_json "$(cat "$temp_file")" "tag_name" "$strip_v") + rm -f "$temp_file" + + if [[ -z "$version" ]]; then + msg_error "Could not determine latest version for ${repo}" + return 1 + fi + + echo "$version" +} + +# ------------------------------------------------------------------------------ +# Get latest Codeberg release version +# ------------------------------------------------------------------------------ +get_latest_codeberg_release() { + local repo="$1" + local strip_v="${2:-true}" + local temp_file=$(mktemp) + + # Codeberg API: get all releases and pick the first non-draft/non-prerelease + if ! codeberg_api_call "https://codeberg.org/api/v1/repos/${repo}/releases" "$temp_file"; then + msg_warn "Codeberg API call failed for ${repo}" + rm -f "$temp_file" + return 1 + fi + + local version + # Codeberg uses same JSON structure but releases endpoint returns array + version=$(jq -r '[.[] | select(.draft==false and .prerelease==false)][0].tag_name // empty' "$temp_file") + + if [[ "$strip_v" == "true" ]]; then + version="${version#v}" + fi + + rm -f "$temp_file" + + if [[ -z "$version" ]]; then + msg_error "Could not determine latest version for ${repo}" + return 1 + fi + + echo "$version" +} + +# ------------------------------------------------------------------------------ +# Debug logging - using main debug_log function (line 40) +# Supports both TOOLS_DEBUG and DEBUG environment variables +# ------------------------------------------------------------------------------ + +# ------------------------------------------------------------------------------ +# Performance timing helper +# ------------------------------------------------------------------------------ +start_timer() { + echo $(date +%s) +} + +end_timer() { + local start_time="$1" + local label="${2:-Operation}" + local end_time=$(date +%s) + local duration=$((end_time - start_time)) +} + +# ------------------------------------------------------------------------------ +# GPG key fingerprint verification +# ------------------------------------------------------------------------------ +verify_gpg_fingerprint() { + local key_file="$1" + local expected_fingerprint="$2" + + local actual_fingerprint + actual_fingerprint=$(gpg --show-keys --with-fingerprint --with-colons "$key_file" 2>&1 | grep -m1 '^fpr:' | cut -d: -f10) + + if [[ "$actual_fingerprint" == "$expected_fingerprint" ]]; then + return 0 + fi + + msg_error "GPG fingerprint mismatch! Expected: $expected_fingerprint, Got: $actual_fingerprint" + return 1 +} + +# ------------------------------------------------------------------------------ +# Get latest GitHub tag for a repository. +# +# Description: +# - Queries the GitHub API for tags (not releases) +# - Useful for repos that only create tags, not full releases +# - Supports optional prefix filter and version-only extraction +# - Returns the latest tag name (printed to stdout) +# +# Usage: +# MONGO_VERSION=$(get_latest_gh_tag "mongodb/mongo-tools") +# LATEST=$(get_latest_gh_tag "owner/repo" "v") # only tags starting with "v" +# LATEST=$(get_latest_gh_tag "owner/repo" "" "true") # strip leading "v" +# +# Arguments: +# $1 - GitHub repo (owner/repo) +# $2 - Tag prefix filter (optional, e.g. "v" or "100.") +# $3 - Strip prefix from result (optional, "true" to strip $2 prefix) +# +# Returns: +# 0 on success (tag printed to stdout), 1 on failure +# +# Notes: +# - Skips tags containing "rc", "alpha", "beta", "dev", "test" +# - Sorts by version number (sort -V) to find the latest +# - Respects GITHUB_TOKEN for rate limiting +# ------------------------------------------------------------------------------ +get_latest_gh_tag() { + local repo="$1" + local prefix="${2:-}" + local strip_prefix="${3:-false}" + + local header_args=() + [[ -n "${GITHUB_TOKEN:-}" ]] && header_args=(-H "Authorization: Bearer $GITHUB_TOKEN") + + local http_code="" + http_code=$(curl -sSL --max-time 20 -w "%{http_code}" -o /tmp/gh_tags.json \ + -H 'Accept: application/vnd.github+json' \ + -H 'X-GitHub-Api-Version: 2022-11-28' \ + "${header_args[@]}" \ + "https://api.github.com/repos/${repo}/tags?per_page=100" 2>/dev/null) || true + + if [[ "$http_code" == "401" ]]; then + msg_error "GitHub API authentication failed (HTTP 401)." + if [[ -n "${GITHUB_TOKEN:-}" ]]; then + msg_error "Your GITHUB_TOKEN appears to be invalid or expired." + else + msg_error "The repository may require authentication. Try: export GITHUB_TOKEN=\"ghp_your_token\"" + fi + rm -f /tmp/gh_tags.json + return 1 + fi + + if [[ "$http_code" == "403" ]]; then + msg_error "GitHub API rate limit exceeded (HTTP 403)." + msg_error "To increase the limit, export a GitHub token before running the script:" + msg_error " export GITHUB_TOKEN=\"ghp_your_token_here\"" + rm -f /tmp/gh_tags.json + return 1 + fi + + if [[ "$http_code" == "000" || -z "$http_code" ]]; then + msg_error "GitHub API connection failed (no response)." + msg_error "Check your network/DNS: curl -sSL https://api.github.com/rate_limit" + rm -f /tmp/gh_tags.json + return 1 + fi + + if [[ "$http_code" != "200" ]] || [[ ! -s /tmp/gh_tags.json ]]; then + msg_error "Unable to fetch tags for ${repo} (HTTP ${http_code})" + rm -f /tmp/gh_tags.json + return 1 + fi + + local tags_json + tags_json=$() +# - If newer, sets global CHECK_UPDATE_RELEASE and returns 0 +# +# Usage: +# if check_for_gh_release "flaresolverr" "FlareSolverr/FlareSolverr" [optional] "v1.1.1"; then +# # trigger update... +# fi +# exit 0 +# } (end of update_script not from the function) +# +# Notes: +# - Requires `jq` (auto-installed if missing) +# - Does not modify anything, only checks version state +# - Does not support pre-releases +# ------------------------------------------------------------------------------ +check_for_gh_release() { + local app="$1" + local source="$2" + local pinned_version_in="${3:-}" # optional + local app_lc="" + app_lc="$(echo "${app,,}" | tr -d ' ')" + local current_file="$HOME/.${app_lc}" + + msg_info "Checking for update: ${app}" + + # DNS check + if ! getent hosts api.github.com >/dev/null 2>&1; then + msg_error "Network error: cannot resolve api.github.com" + return 1 + fi + + ensure_dependencies jq + + # Build auth header if token is available + local header_args=() + [[ -n "${GITHUB_TOKEN:-}" ]] && header_args=(-H "Authorization: Bearer $GITHUB_TOKEN") + + # Try /latest endpoint for non-pinned versions (most efficient) + local releases_json="" http_code="" + + # For pinned versions, query the specific release tag directly + if [[ -n "$pinned_version_in" ]]; then + http_code=$(curl -sSL --max-time 20 -w "%{http_code}" -o /tmp/gh_check.json \ + -H 'Accept: application/vnd.github+json' \ + -H 'X-GitHub-Api-Version: 2022-11-28' \ + "${header_args[@]}" \ + "https://api.github.com/repos/${source}/releases/tags/${pinned_version_in}" 2>/dev/null) || true + + if [[ "$http_code" == "200" ]] && [[ -s /tmp/gh_check.json ]]; then + releases_json="[$(/dev/null) || true + + if [[ "$http_code" == "200" ]] && [[ -s /tmp/gh_check.json ]]; then + releases_json="[$(/dev/null) || true + + if [[ "$http_code" == "200" ]] && [[ -s /tmp/gh_check.json ]]; then + releases_json=$(/dev/null) + if ((${#legacy_files[@]} == 1)); then + current="$(<"${legacy_files[0]}")" + echo "${current#v}" >"$current_file" + rm -f "${legacy_files[0]}" + fi + fi + current="${current#v}" + + # Pinned version handling + if [[ -n "$pinned_version_in" ]]; then + local pin_clean="${pinned_version_in#v}" + local match_raw="" + for i in "${!clean_tags[@]}"; do + if [[ "${clean_tags[$i]}" == "$pin_clean" ]]; then + match_raw="${raw_tags[$i]}" + break + fi + done + + if [[ -z "$match_raw" ]]; then + msg_error "Pinned version ${pinned_version_in} not found upstream" + return 1 + fi + + if [[ "$current" != "$pin_clean" ]]; then + CHECK_UPDATE_RELEASE="$match_raw" + msg_ok "Update available: ${app} ${current:-not installed} → ${pin_clean}" + return 0 + fi + + msg_ok "No update available: ${app} is already on pinned version (${current})" + return 1 + fi + + # No pinning → use latest + if [[ -z "$current" || "$current" != "$latest_clean" ]]; then + CHECK_UPDATE_RELEASE="$latest_raw" + msg_ok "Update available: ${app} ${current:-not installed} → ${latest_clean}" + return 0 + fi + + msg_ok "No update available: ${app} (${latest_clean})" + return 1 +} + +# ------------------------------------------------------------------------------ +# Checks for new Codeberg release (latest tag). +# +# Description: +# - Queries the Codeberg API for the latest release tag +# - Compares it to a local cached version (~/.) +# - If newer, sets global CHECK_UPDATE_RELEASE and returns 0 +# +# Usage: +# if check_for_codeberg_release "autocaliweb" "gelbphoenix/autocaliweb" [optional] "v0.11.3"; then +# # trigger update... +# fi +# exit 0 +# } (end of update_script not from the function) +# +# Notes: +# - Requires `jq` (auto-installed if missing) +# - Does not modify anything, only checks version state +# - Does not support pre-releases +# ------------------------------------------------------------------------------ +check_for_codeberg_release() { + local app="$1" + local source="$2" + local pinned_version_in="${3:-}" # optional + local app_lc="${app,,}" + local current_file="$HOME/.${app_lc}" + + msg_info "Checking for update: ${app}" + + # DNS check + if ! getent hosts codeberg.org >/dev/null 2>&1; then + msg_error "Network error: cannot resolve codeberg.org" + return 1 + fi + + ensure_dependencies jq + + # Fetch releases from Codeberg API + local releases_json="" + releases_json=$(curl -fsSL --max-time 20 \ + -H 'Accept: application/json' \ + "https://codeberg.org/api/v1/repos/${source}/releases" 2>/dev/null) || { + msg_error "Unable to fetch releases for ${app} (codeberg.org/api/v1/repos/${source}/releases)" + return 1 + } + + mapfile -t raw_tags < <(jq -r '.[] | select(.draft==false and .prerelease==false) | .tag_name' <<<"$releases_json") + if ((${#raw_tags[@]} == 0)); then + msg_error "No stable releases found for ${app}" + return 1 + fi + + local clean_tags=() + for t in "${raw_tags[@]}"; do + # Only strip leading 'v' when followed by a digit (e.g. v1.2.3) + if [[ "$t" =~ ^v[0-9] ]]; then + clean_tags+=("${t:1}") + else + clean_tags+=("$t") + fi + done + + local latest_raw="${raw_tags[0]}" + local latest_clean="${clean_tags[0]}" + + # current installed (stored without v) + local current="" + if [[ -f "$current_file" ]]; then + current="$(<"$current_file")" + else + # Migration: search for any /opt/*_version.txt + local legacy_files + mapfile -t legacy_files < <(find /opt -maxdepth 1 -type f -name "*_version.txt" 2>/dev/null) + if ((${#legacy_files[@]} == 1)); then + current="$(<"${legacy_files[0]}")" + echo "${current#v}" >"$current_file" + rm -f "${legacy_files[0]}" + fi + fi + current="${current#v}" + + # Pinned version handling + if [[ -n "$pinned_version_in" ]]; then + local pin_clean="${pinned_version_in#v}" + local match_raw="" + for i in "${!clean_tags[@]}"; do + if [[ "${clean_tags[$i]}" == "$pin_clean" ]]; then + match_raw="${raw_tags[$i]}" + break + fi + done + + if [[ -z "$match_raw" ]]; then + msg_error "Pinned version ${pinned_version_in} not found upstream" + return 1 + fi + + if [[ "$current" != "$pin_clean" ]]; then + CHECK_UPDATE_RELEASE="$match_raw" + msg_ok "Update available: ${app} ${current:-not installed} → ${pin_clean}" + return 0 + fi + + msg_ok "No update available: ${app} is already on pinned version (${current})" + return 1 + fi + + # No pinning → use latest + if [[ -z "$current" || "$current" != "$latest_clean" ]]; then + CHECK_UPDATE_RELEASE="$latest_raw" + msg_ok "Update available: ${app} ${current:-not installed} → ${latest_clean}" + return 0 + fi + + msg_ok "No update available: ${app} (${latest_clean})" + return 1 +} + +# ------------------------------------------------------------------------------ +# Creates and installs self-signed certificates. +# +# Description: +# - Create a self-signed certificate with option to override application name +# +# Variables: +# APP - Application name (default: $APPLICATION variable) +# ------------------------------------------------------------------------------ +create_self_signed_cert() { + local APP_NAME="${1:-${APPLICATION}}" + local APP_NAME_LC=$(echo "${APP_NAME,,}" | tr -d ' ') + local CERT_DIR="/etc/ssl/${APP_NAME_LC}" + local CERT_KEY="${CERT_DIR}/${APP_NAME_LC}.key" + local CERT_CRT="${CERT_DIR}/${APP_NAME_LC}.crt" + + if [[ -f "$CERT_CRT" && -f "$CERT_KEY" ]]; then + return 0 + fi + + # Use ensure_dependencies for cleaner handling + ensure_dependencies openssl || { + msg_error "Failed to install OpenSSL" + return 1 + } + + mkdir -p "$CERT_DIR" + $STD openssl req -new -newkey rsa:2048 -days 365 -nodes -x509 \ + -subj "/CN=${APP_NAME}" \ + -addext "subjectAltName=DNS:${APP_NAME}" \ + -keyout "$CERT_KEY" \ + -out "$CERT_CRT" || { + msg_error "Failed to create self-signed certificate" + return 1 + } + + chmod 600 "$CERT_KEY" + chmod 644 "$CERT_CRT" +} + +# ------------------------------------------------------------------------------ +# Downloads file with optional progress indicator using pv. +# +# Arguments: +# $1 - URL +# $2 - Destination path +# ------------------------------------------------------------------------------ + +function download_with_progress() { + local url="$1" + local output="$2" + if [ -n "$SPINNER_PID" ] && ps -p "$SPINNER_PID" >/dev/null; then kill "$SPINNER_PID" >/dev/null; fi + + ensure_dependencies pv + set -o pipefail + + # Content-Length aus HTTP-Header holen + local content_length + content_length=$(curl -fsSLI "$url" | awk '/Content-Length/ {print $2}' | tr -d '\r' || true) + + if [[ -z "$content_length" ]]; then + if ! curl -fL# -o "$output" "$url"; then + msg_error "Download failed: $url" + return 1 + fi + else + if ! curl -fsSL "$url" | pv -s "$content_length" >"$output"; then + msg_error "Download failed: $url" + return 1 + fi + fi +} + +# ------------------------------------------------------------------------------ +# Ensures /usr/local/bin is permanently in system PATH. +# +# Description: +# - Adds to /etc/profile.d for login shells (SSH, noVNC) +# - Adds to /root/.bashrc for non-login shells (pct enter) +# ------------------------------------------------------------------------------ + +function ensure_usr_local_bin_persist() { + # Skip on Proxmox host + command -v pveversion &>/dev/null && return + + # Login shells: /etc/profile.d/ + local PROFILE_FILE="/etc/profile.d/custom_path.sh" + if [[ ! -f "$PROFILE_FILE" ]]; then + echo 'export PATH="/usr/local/bin:$PATH"' >"$PROFILE_FILE" + chmod +x "$PROFILE_FILE" + fi + + # Non-login shells (pct enter): /root/.bashrc + local BASHRC="/root/.bashrc" + if [[ -f "$BASHRC" ]] && ! grep -q '/usr/local/bin' "$BASHRC"; then + echo 'export PATH="/usr/local/bin:$PATH"' >>"$BASHRC" + fi +} + +# ------------------------------------------------------------------------------ +# curl_download - Downloads a file with automatic retry and exponential backoff. +# +# Usage: curl_download +# +# Retries up to 5 times with increasing --max-time (60/120/240/480/960s). +# Returns 0 on success, 1 if all attempts fail. +# ------------------------------------------------------------------------------ +function curl_download() { + local output="$1" + local url="$2" + local timeouts=(60 120 240 480 960) + + for i in "${!timeouts[@]}"; do + if curl --connect-timeout 15 --max-time "${timeouts[$i]}" -fsSL -o "$output" "$url"; then + return 0 + fi + if ((i < ${#timeouts[@]} - 1)); then + msg_warn "Download timed out after ${timeouts[$i]}s, retrying... (attempt $((i + 2))/${#timeouts[@]})" + fi + done + return 1 +} + +# ------------------------------------------------------------------------------ +# Downloads and deploys latest Codeberg release (source, binary, tarball, asset). +# +# Description: +# - Fetches latest release metadata from Codeberg API +# - Supports the following modes: +# - tarball: Source code tarball (default if omitted) +# - source: Alias for tarball (same behavior) +# - binary: .deb package install (arch-dependent) +# - prebuild: Prebuilt .tar.gz archive (e.g. Go binaries) +# - singlefile: Standalone binary (no archive, direct chmod +x install) +# - tag: Direct tag download (bypasses Release API) +# - Handles download, extraction/installation and version tracking in ~/. +# +# Parameters: +# $1 APP - Application name (used for install path and version file) +# $2 REPO - Codeberg repository in form user/repo +# $3 MODE - Release type: +# tarball → source tarball (.tar.gz) +# binary → .deb file (auto-arch matched) +# prebuild → prebuilt archive (e.g. tar.gz) +# singlefile→ standalone binary (chmod +x) +# tag → direct tag (bypasses Release API) +# $4 VERSION - Optional release tag (default: latest) +# $5 TARGET_DIR - Optional install path (default: /opt/) +# $6 ASSET_FILENAME - Required for: +# - prebuild → archive filename or pattern +# - singlefile→ binary filename or pattern +# +# Examples: +# # 1. Minimal: Fetch and deploy source tarball +# fetch_and_deploy_codeberg_release "autocaliweb" "gelbphoenix/autocaliweb" +# +# # 2. Binary install via .deb asset (architecture auto-detected) +# fetch_and_deploy_codeberg_release "myapp" "myuser/myapp" "binary" +# +# # 3. Prebuilt archive (.tar.gz) with asset filename match +# fetch_and_deploy_codeberg_release "myapp" "myuser/myapp" "prebuild" "latest" "/opt/myapp" "myapp_Linux_x86_64.tar.gz" +# +# # 4. Single binary (chmod +x) +# fetch_and_deploy_codeberg_release "myapp" "myuser/myapp" "singlefile" "v1.0.0" "/opt/myapp" "myapp-linux-amd64" +# +# # 5. Explicit tag version +# fetch_and_deploy_codeberg_release "autocaliweb" "gelbphoenix/autocaliweb" "tag" "v0.11.3" "/opt/autocaliweb" +# ------------------------------------------------------------------------------ + +function fetch_and_deploy_codeberg_release() { + local app="$1" + local repo="$2" + local mode="${3:-tarball}" # tarball | binary | prebuild | singlefile | tag + local version="${var_appversion:-${4:-latest}}" + local target="${5:-/opt/$app}" + local asset_pattern="${6:-}" + + local app_lc=$(echo "${app,,}" | tr -d ' ') + local version_file="$HOME/.${app_lc}" + + local api_timeouts=(60 120 240) + + local current_version="" + [[ -f "$version_file" ]] && current_version=$(<"$version_file") + + ensure_dependencies jq + + ### Tag Mode (bypass Release API) ### + if [[ "$mode" == "tag" ]]; then + if [[ "$version" == "latest" ]]; then + msg_error "Mode 'tag' requires explicit version (not 'latest')" + return 1 + fi + + local tag_name="$version" + [[ "$tag_name" =~ ^v ]] && version="${tag_name:1}" || version="$tag_name" + + if [[ "$current_version" == "$version" ]]; then + $STD msg_ok "$app is already up-to-date (v$version)" + return 0 + fi + + # DNS check + if ! getent hosts "codeberg.org" &>/dev/null; then + msg_error "DNS resolution failed for codeberg.org – check /etc/resolv.conf or networking" + return 1 + fi + + local tmpdir + tmpdir=$(mktemp -d) || return 1 + + msg_info "Fetching Codeberg tag: $app ($tag_name)" + + local safe_version="${version//@/_}" + safe_version="${safe_version//\//_}" + local filename="${app_lc}-${safe_version}.tar.gz" + local download_success=false + + # Codeberg archive URL format: https://codeberg.org/{owner}/{repo}/archive/{tag}.tar.gz + local archive_url="https://codeberg.org/$repo/archive/${tag_name}.tar.gz" + if curl_download "$tmpdir/$filename" "$archive_url"; then + download_success=true + fi + + if [[ "$download_success" != "true" ]]; then + msg_error "Download failed for $app ($tag_name)" + rm -rf "$tmpdir" + return 1 + fi + + mkdir -p "$target" + if [[ "${CLEAN_INSTALL:-0}" == "1" ]]; then + rm -rf "${target:?}/"* + fi + + tar --no-same-owner -xzf "$tmpdir/$filename" -C "$tmpdir" || { + msg_error "Failed to extract tarball" + rm -rf "$tmpdir" + return 1 + } + + local unpack_dir + unpack_dir=$(find "$tmpdir" -mindepth 1 -maxdepth 1 -type d | head -n1) + + shopt -s dotglob nullglob + cp -r "$unpack_dir"/* "$target/" + shopt -u dotglob nullglob + + echo "$version" >"$version_file" + msg_ok "Deployed: $app ($version)" + rm -rf "$tmpdir" + return 0 + fi + + # Codeberg API: https://codeberg.org/api/v1/repos/{owner}/{repo}/releases + local api_url="https://codeberg.org/api/v1/repos/$repo/releases" + if [[ "$version" != "latest" ]]; then + # Get release by tag: /repos/{owner}/{repo}/releases/tags/{tag} + api_url="https://codeberg.org/api/v1/repos/$repo/releases/tags/$version" + fi + + # dns pre check + if ! getent hosts "codeberg.org" &>/dev/null; then + msg_error "DNS resolution failed for codeberg.org – check /etc/resolv.conf or networking" + return 1 + fi + + local attempt=0 success=false resp http_code + + while ((attempt < ${#api_timeouts[@]})); do + resp=$(curl --connect-timeout 10 --max-time "${api_timeouts[$attempt]}" -fsSL -w "%{http_code}" -o /tmp/codeberg_rel.json "$api_url") && success=true && break + ((attempt++)) + if ((attempt < ${#api_timeouts[@]})); then + msg_warn "API request timed out after ${api_timeouts[$((attempt - 1))]}s, retrying... (attempt $((attempt + 1))/${#api_timeouts[@]})" + fi + done + + if ! $success; then + msg_error "Failed to fetch release metadata from $api_url after ${#api_timeouts[@]} attempts" + return 1 + fi + + http_code="${resp:(-3)}" + [[ "$http_code" != "200" ]] && { + msg_error "Codeberg API returned HTTP $http_code" + return 1 + } + + local json tag_name + json=$(/dev/null || uname -m) + [[ "$arch" == "x86_64" ]] && arch="amd64" + [[ "$arch" == "aarch64" ]] && arch="arm64" + + local assets url_match="" + # Codeberg assets are in .assets[].browser_download_url + assets=$(echo "$json" | jq -r '.assets[].browser_download_url') + + # If explicit filename pattern is provided, match that first + if [[ -n "$asset_pattern" ]]; then + for u in $assets; do + case "${u##*/}" in + $asset_pattern) + url_match="$u" + break + ;; + esac + done + fi + + # Fall back to architecture heuristic + if [[ -z "$url_match" ]]; then + for u in $assets; do + if [[ "$u" =~ ($arch|amd64|x86_64|aarch64|arm64).*\.deb$ ]]; then + url_match="$u" + break + fi + done + fi + + # Fallback: any .deb file + if [[ -z "$url_match" ]]; then + for u in $assets; do + [[ "$u" =~ \.deb$ ]] && url_match="$u" && break + done + fi + + if [[ -z "$url_match" ]]; then + msg_error "No suitable .deb asset found for $app" + rm -rf "$tmpdir" + return 1 + fi + + filename="${url_match##*/}" + curl_download "$tmpdir/$filename" "$url_match" || { + msg_error "Download failed: $url_match" + rm -rf "$tmpdir" + return 1 + } + + chmod 644 "$tmpdir/$filename" + $STD apt install -y "$tmpdir/$filename" || { + $STD dpkg -i "$tmpdir/$filename" || { + msg_error "Both apt and dpkg installation failed" + rm -rf "$tmpdir" + return 1 + } + } + + ### Prebuild Mode ### + elif [[ "$mode" == "prebuild" ]]; then + local pattern="${6%\"}" + pattern="${pattern#\"}" + [[ -z "$pattern" ]] && { + msg_error "Mode 'prebuild' requires 6th parameter (asset filename pattern)" + rm -rf "$tmpdir" + return 1 + } + + local asset_url="" + for u in $(echo "$json" | jq -r '.assets[].browser_download_url'); do + filename_candidate="${u##*/}" + case "$filename_candidate" in + $pattern) + asset_url="$u" + break + ;; + esac + done + + [[ -z "$asset_url" ]] && { + msg_error "No asset matching '$pattern' found" + rm -rf "$tmpdir" + return 1 + } + + filename="${asset_url##*/}" + curl_download "$tmpdir/$filename" "$asset_url" || { + msg_error "Download failed: $asset_url" + rm -rf "$tmpdir" + return 1 + } + + local unpack_tmp + unpack_tmp=$(mktemp -d) + mkdir -p "$target" + if [[ "${CLEAN_INSTALL:-0}" == "1" ]]; then + rm -rf "${target:?}/"* + fi + + if [[ "$filename" == *.zip ]]; then + ensure_dependencies unzip + unzip -q "$tmpdir/$filename" -d "$unpack_tmp" || { + msg_error "Failed to extract ZIP archive" + rm -rf "$tmpdir" "$unpack_tmp" + return 1 + } + elif [[ "$filename" == *.tar.* || "$filename" == *.tgz ]]; then + tar --no-same-owner -xf "$tmpdir/$filename" -C "$unpack_tmp" || { + msg_error "Failed to extract TAR archive" + rm -rf "$tmpdir" "$unpack_tmp" + return 1 + } + else + msg_error "Unsupported archive format: $filename" + rm -rf "$tmpdir" "$unpack_tmp" + return 1 + fi + + local top_dirs + top_dirs=$(find "$unpack_tmp" -mindepth 1 -maxdepth 1 -type d | wc -l) + local top_entries inner_dir + top_entries=$(find "$unpack_tmp" -mindepth 1 -maxdepth 1) + if [[ "$(echo "$top_entries" | wc -l)" -eq 1 && -d "$top_entries" ]]; then + inner_dir="$top_entries" + shopt -s dotglob nullglob + if compgen -G "$inner_dir/*" >/dev/null; then + cp -r "$inner_dir"/* "$target/" || { + msg_error "Failed to copy contents from $inner_dir to $target" + rm -rf "$tmpdir" "$unpack_tmp" + return 1 + } + else + msg_error "Inner directory is empty: $inner_dir" + rm -rf "$tmpdir" "$unpack_tmp" + return 1 + fi + shopt -u dotglob nullglob + else + shopt -s dotglob nullglob + if compgen -G "$unpack_tmp/*" >/dev/null; then + cp -r "$unpack_tmp"/* "$target/" || { + msg_error "Failed to copy contents to $target" + rm -rf "$tmpdir" "$unpack_tmp" + return 1 + } + else + msg_error "Unpacked archive is empty" + rm -rf "$tmpdir" "$unpack_tmp" + return 1 + fi + shopt -u dotglob nullglob + fi + + ### Singlefile Mode ### + elif [[ "$mode" == "singlefile" ]]; then + local pattern="${6%\"}" + pattern="${pattern#\"}" + [[ -z "$pattern" ]] && { + msg_error "Mode 'singlefile' requires 6th parameter (asset filename pattern)" + rm -rf "$tmpdir" + return 1 + } + + local asset_url="" + for u in $(echo "$json" | jq -r '.assets[].browser_download_url'); do + filename_candidate="${u##*/}" + case "$filename_candidate" in + $pattern) + asset_url="$u" + break + ;; + esac + done + + [[ -z "$asset_url" ]] && { + msg_error "No asset matching '$pattern' found" + rm -rf "$tmpdir" + return 1 + } + + filename="${asset_url##*/}" + mkdir -p "$target" + + local use_filename="${USE_ORIGINAL_FILENAME:-false}" + local target_file="$app" + [[ "$use_filename" == "true" ]] && target_file="$filename" + + curl_download "$target/$target_file" "$asset_url" || { + msg_error "Download failed: $asset_url" + rm -rf "$tmpdir" + return 1 + } + + if [[ "$target_file" != *.jar && -f "$target/$target_file" ]]; then + chmod +x "$target/$target_file" + fi + + else + msg_error "Unknown mode: $mode" + rm -rf "$tmpdir" + return 1 + fi + + echo "$version" >"$version_file" + msg_ok "Deployed: $app ($version)" + rm -rf "$tmpdir" +} + +# ------------------------------------------------------------------------------ +# Downloads and deploys latest GitHub release (source, binary, tarball, asset). +# +# Description: +# - Fetches latest release metadata from GitHub API +# - Supports the following modes: +# - tarball: Source code tarball (default if omitted) +# - source: Alias for tarball (same behavior) +# - binary: .deb package install (arch-dependent) +# - prebuild: Prebuilt .tar.gz archive (e.g. Go binaries) +# - singlefile: Standalone binary (no archive, direct chmod +x install) +# - Handles download, extraction/installation and version tracking in ~/. +# +# Parameters: +# $1 APP - Application name (used for install path and version file) +# $2 REPO - GitHub repository in form user/repo +# $3 MODE - Release type: +# tarball → source tarball (.tar.gz) +# binary → .deb file (auto-arch matched) +# prebuild → prebuilt archive (e.g. tar.gz) +# singlefile→ standalone binary (chmod +x) +# $4 VERSION - Optional release tag (default: latest) +# $5 TARGET_DIR - Optional install path (default: /opt/) +# $6 ASSET_FILENAME - Required for: +# - prebuild → archive filename or pattern +# - singlefile→ binary filename or pattern +# +# Optional: +# - Set GITHUB_TOKEN env var to increase API rate limit (recommended for CI/CD). +# +# Examples: +# # 1. Minimal: Fetch and deploy source tarball +# fetch_and_deploy_gh_release "myapp" "myuser/myapp" +# +# # 2. Binary install via .deb asset (architecture auto-detected) +# fetch_and_deploy_gh_release "myapp" "myuser/myapp" "binary" +# +# # 3. Prebuilt archive (.tar.gz) with asset filename match +# fetch_and_deploy_gh_release "hanko" "teamhanko/hanko" "prebuild" "latest" "/opt/hanko" "hanko_Linux_x86_64.tar.gz" +# +# # 4. Single binary (chmod +x) like Argus, Promtail etc. +# fetch_and_deploy_gh_release "argus" "release-argus/Argus" "singlefile" "0.26.3" "/opt/argus" "Argus-.*linux-amd64" +# +# Notes: +# - For binary/prebuild/singlefile modes: if the target release has no +# matching asset, the function scans older releases and prompts the user +# (60s timeout, default yes) to use a previous version that has the asset. +# ------------------------------------------------------------------------------ + +# ------------------------------------------------------------------------------ +# Scans older GitHub releases for a matching asset when the latest release +# is missing the expected file. Used internally by fetch_and_deploy_gh_release. +# +# Arguments: +# $1 - GitHub repo (owner/repo) +# $2 - mode (binary|prebuild|singlefile) +# $3 - asset_pattern (glob pattern for asset filename) +# $4 - tag to skip (the already-checked release) +# +# Output: +# Prints the release JSON of the first older release that has a matching asset. +# Returns 0 on success, 1 if no matching release found or user declined. +# ------------------------------------------------------------------------------ +_gh_scan_older_releases() { + local repo="$1" + local mode="$2" + local asset_pattern="$3" + local skip_tag="$4" + + local header=() + [[ -n "${GITHUB_TOKEN:-}" ]] && header=(-H "Authorization: token $GITHUB_TOKEN") + + local releases_list + releases_list=$(curl --connect-timeout 10 --max-time 30 -fsSL \ + -H 'Accept: application/vnd.github+json' \ + -H 'X-GitHub-Api-Version: 2022-11-28' \ + "${header[@]}" \ + "https://api.github.com/repos/${repo}/releases?per_page=15" 2>/dev/null) || { + msg_warn "Failed to fetch older releases for ${repo}" + return 1 + } + + local count + count=$(echo "$releases_list" | jq 'length') + + for ((i = 0; i < count; i++)); do + local rel_tag rel_draft rel_prerelease + rel_tag=$(echo "$releases_list" | jq -r ".[$i].tag_name") + rel_draft=$(echo "$releases_list" | jq -r ".[$i].draft") + rel_prerelease=$(echo "$releases_list" | jq -r ".[$i].prerelease") + + # Skip drafts, prereleases, and the tag we already checked + [[ "$rel_draft" == "true" || "$rel_prerelease" == "true" ]] && continue + [[ "$rel_tag" == "$skip_tag" ]] && continue + + local has_match=false + + if [[ "$mode" == "binary" ]]; then + local arch + arch=$(dpkg --print-architecture 2>/dev/null || uname -m) + [[ "$arch" == "x86_64" ]] && arch="amd64" + [[ "$arch" == "aarch64" ]] && arch="arm64" + + # Check with explicit pattern first, then arch heuristic, then any .deb + if [[ -n "$asset_pattern" ]]; then + has_match=$(echo "$releases_list" | jq -r --arg pat "$asset_pattern" ".[$i].assets[].name" | while read -r name; do + case "$name" in $asset_pattern) + echo true + break + ;; + esac + done) + fi + if [[ "$has_match" != "true" ]]; then + has_match=$(echo "$releases_list" | jq -r ".[$i].assets[].browser_download_url" | grep -qE "($arch|amd64|x86_64|aarch64|arm64).*\.deb$" && echo true) + fi + if [[ "$has_match" != "true" ]]; then + has_match=$(echo "$releases_list" | jq -r ".[$i].assets[].browser_download_url" | grep -qE '\.deb$' && echo true) + fi + + elif [[ "$mode" == "prebuild" || "$mode" == "singlefile" ]]; then + has_match=$(echo "$releases_list" | jq -r ".[$i].assets[].name" | while read -r name; do + case "$name" in $asset_pattern) + echo true + break + ;; + esac + done) + fi + + if [[ "$has_match" == "true" ]]; then + local rel_version="$rel_tag" + [[ "$rel_tag" =~ ^v ]] && rel_version="${rel_tag:1}" + + local use_fallback="y" + if [[ -t 0 ]]; then + msg_warn "Release ${skip_tag} has no matching asset. Previous release ${rel_tag} has a compatible asset." + read -rp "Use version ${rel_tag} instead? [Y/n] (auto-yes in 60s): " -t 60 use_fallback || use_fallback="y" + use_fallback="${use_fallback:-y}" + fi + + if [[ "${use_fallback,,}" == "y" || "${use_fallback,,}" == "yes" ]]; then + echo "$releases_list" | jq ".[$i]" + return 0 + else + return 1 + fi + fi + done + + return 1 +} + +function fetch_and_deploy_gh_release() { + local app="$1" + local repo="$2" + local mode="${3:-tarball}" # tarball | binary | prebuild | singlefile + local version="${var_appversion:-${4:-latest}}" + local target="${5:-/opt/$app}" + local asset_pattern="${6:-}" + + # Validate app name to prevent /root/. directory issues + if [[ -z "$app" ]]; then + # Derive app name from repo if not provided + app="${repo##*/}" + if [[ -z "$app" ]]; then + msg_error "fetch_and_deploy_gh_release requires app name or valid repo" + return 1 + fi + fi + + local app_lc=$(echo "${app,,}" | tr -d ' ') + local version_file="$HOME/.${app_lc}" + + local api_timeouts=(60 120 240) + + local current_version="" + [[ -f "$version_file" ]] && current_version=$(<"$version_file") + + ensure_dependencies jq + + local api_url="https://api.github.com/repos/$repo/releases" + [[ "$version" != "latest" ]] && api_url="$api_url/tags/$version" || api_url="$api_url/latest" + local header=() + [[ -n "${GITHUB_TOKEN:-}" ]] && header=(-H "Authorization: token $GITHUB_TOKEN") + + # dns pre check + local gh_host + gh_host=$(awk -F/ '{print $3}' <<<"$api_url") + if ! getent hosts "$gh_host" &>/dev/null; then + msg_error "DNS resolution failed for $gh_host – check /etc/resolv.conf or networking" + return 1 + fi + + local max_retries=${#api_timeouts[@]} retry_delay=2 attempt=1 success=false http_code + + while ((attempt <= max_retries)); do + http_code=$(curl --connect-timeout 10 --max-time "${api_timeouts[$((attempt - 1))]:-240}" -sSL -w "%{http_code}" -o /tmp/gh_rel.json "${header[@]}" "$api_url" 2>/dev/null) || true + if [[ "$http_code" == "200" ]]; then + success=true + break + elif [[ "$http_code" == "401" ]]; then + msg_error "GitHub API authentication failed (HTTP 401)." + if [[ -n "${GITHUB_TOKEN:-}" ]]; then + msg_error "Your GITHUB_TOKEN appears to be invalid or expired." + else + msg_error "The repository may require authentication." + fi + if prompt_for_github_token; then + header=(-H "Authorization: token $GITHUB_TOKEN") + continue + fi + break + elif [[ "$http_code" == "403" ]]; then + if ((attempt < max_retries)); then + msg_warn "GitHub API rate limit hit, retrying in ${retry_delay}s... (attempt $attempt/$max_retries)" + sleep "$retry_delay" + retry_delay=$((retry_delay * 2)) + else + msg_error "GitHub API rate limit exceeded (HTTP 403)." + if prompt_for_github_token; then + header=(-H "Authorization: token $GITHUB_TOKEN") + retry_delay=2 + attempt=0 + fi + fi + else + sleep "$retry_delay" + fi + ((attempt++)) + done + + if ! $success; then + if [[ "$http_code" == "000" || -z "$http_code" ]]; then + msg_error "GitHub API connection failed (no response)." + msg_error "Check your network/DNS: curl -sSL https://api.github.com/rate_limit" + elif [[ "$http_code" != "401" ]]; then + msg_error "Failed to fetch release metadata (HTTP $http_code)" + fi + return 1 + fi + + local json tag_name + json=$(/dev/null || uname -m) + [[ "$arch" == "x86_64" ]] && arch="amd64" + [[ "$arch" == "aarch64" ]] && arch="arm64" + + local assets url_match="" + assets=$(echo "$json" | jq -r '.assets[].browser_download_url') + + # If explicit filename pattern is provided (param $6), match that first + if [[ -n "$asset_pattern" ]]; then + for u in $assets; do + case "${u##*/}" in + $asset_pattern) + url_match="$u" + break + ;; + esac + done + fi + + # If no match via explicit pattern, fall back to architecture heuristic + if [[ -z "$url_match" ]]; then + for u in $assets; do + if [[ "$u" =~ ($arch|amd64|x86_64|aarch64|arm64).*\.deb$ ]]; then + url_match="$u" + break + fi + done + fi + + # Fallback: any .deb file + if [[ -z "$url_match" ]]; then + for u in $assets; do + [[ "$u" =~ \.deb$ ]] && url_match="$u" && break + done + fi + + # Fallback: scan older releases for a matching .deb asset + if [[ -z "$url_match" ]]; then + local fallback_json + if fallback_json=$(_gh_scan_older_releases "$repo" "binary" "$asset_pattern" "$tag_name"); then + json="$fallback_json" + tag_name=$(echo "$json" | jq -r '.tag_name // .name // empty') + [[ "$tag_name" =~ ^v ]] && version="${tag_name:1}" || version="$tag_name" + msg_info "Fetching GitHub release: $app ($version)" + assets=$(echo "$json" | jq -r '.assets[].browser_download_url') + if [[ -n "$asset_pattern" ]]; then + for u in $assets; do + case "${u##*/}" in $asset_pattern) + url_match="$u" + break + ;; + esac + done + fi + if [[ -z "$url_match" ]]; then + for u in $assets; do + if [[ "$u" =~ ($arch|amd64|x86_64|aarch64|arm64).*\.deb$ ]]; then + url_match="$u" + break + fi + done + fi + if [[ -z "$url_match" ]]; then + for u in $assets; do + [[ "$u" =~ \.deb$ ]] && url_match="$u" && break + done + fi + fi + fi + + if [[ -z "$url_match" ]]; then + msg_error "No suitable .deb asset found for $app" + rm -rf "$tmpdir" + return 1 + fi + + filename="${url_match##*/}" + curl_download "$tmpdir/$filename" "$url_match" || { + msg_error "Download failed: $url_match" + rm -rf "$tmpdir" + return 1 + } + + chmod 644 "$tmpdir/$filename" + # SYSTEMD_OFFLINE=1 prevents systemd-tmpfiles failures in unprivileged LXC (Debian 13+/systemd 257+) + # Support DPKG_CONFOLD/DPKG_CONFNEW env vars for config file handling during .deb upgrades + local dpkg_opts="" + [[ "${DPKG_FORCE_CONFOLD:-}" == "1" ]] && dpkg_opts="-o Dpkg::Options::=--force-confold" + [[ "${DPKG_FORCE_CONFNEW:-}" == "1" ]] && dpkg_opts="-o Dpkg::Options::=--force-confnew" + DEBIAN_FRONTEND=noninteractive SYSTEMD_OFFLINE=1 $STD apt install -y $dpkg_opts "$tmpdir/$filename" || { + SYSTEMD_OFFLINE=1 $STD dpkg -i "$tmpdir/$filename" || { + msg_error "Both apt and dpkg installation failed" + rm -rf "$tmpdir" + return 1 + } + } + + ### Prebuild Mode ### + elif [[ "$mode" == "prebuild" ]]; then + local pattern="${6%\"}" + pattern="${pattern#\"}" + [[ -z "$pattern" ]] && { + msg_error "Mode 'prebuild' requires 6th parameter (asset filename pattern)" + rm -rf "$tmpdir" + return 1 + } + + local asset_url="" + for u in $(echo "$json" | jq -r '.assets[].browser_download_url'); do + filename_candidate="${u##*/}" + case "$filename_candidate" in + $pattern) + asset_url="$u" + break + ;; + esac + done + + # Fallback: scan older releases for a matching asset + if [[ -z "$asset_url" ]]; then + local fallback_json + if fallback_json=$(_gh_scan_older_releases "$repo" "prebuild" "$pattern" "$tag_name"); then + json="$fallback_json" + tag_name=$(echo "$json" | jq -r '.tag_name // .name // empty') + [[ "$tag_name" =~ ^v ]] && version="${tag_name:1}" || version="$tag_name" + msg_info "Fetching GitHub release: $app ($version)" + for u in $(echo "$json" | jq -r '.assets[].browser_download_url'); do + filename_candidate="${u##*/}" + case "$filename_candidate" in $pattern) + asset_url="$u" + break + ;; + esac + done + fi + fi + + [[ -z "$asset_url" ]] && { + msg_error "No asset matching '$pattern' found" + rm -rf "$tmpdir" + return 1 + } + + filename="${asset_url##*/}" + curl_download "$tmpdir/$filename" "$asset_url" || { + msg_error "Download failed: $asset_url" + rm -rf "$tmpdir" + return 1 + } + + local unpack_tmp + unpack_tmp=$(mktemp -d) + mkdir -p "$target" + if [[ "${CLEAN_INSTALL:-0}" == "1" ]]; then + rm -rf "${target:?}/"* + fi + + if [[ "$filename" == *.zip ]]; then + ensure_dependencies unzip + unzip -q "$tmpdir/$filename" -d "$unpack_tmp" || { + msg_error "Failed to extract ZIP archive" + rm -rf "$tmpdir" "$unpack_tmp" + return 1 + } + elif [[ "$filename" == *.tar.* || "$filename" == *.tgz || "$filename" == *.txz ]]; then + tar --no-same-owner -xf "$tmpdir/$filename" -C "$unpack_tmp" || { + msg_error "Failed to extract TAR archive" + rm -rf "$tmpdir" "$unpack_tmp" + return 1 + } + else + msg_error "Unsupported archive format: $filename" + rm -rf "$tmpdir" "$unpack_tmp" + return 1 + fi + + local top_dirs + top_dirs=$(find "$unpack_tmp" -mindepth 1 -maxdepth 1 -type d | wc -l) + local top_entries inner_dir + top_entries=$(find "$unpack_tmp" -mindepth 1 -maxdepth 1) + if [[ "$(echo "$top_entries" | wc -l)" -eq 1 && -d "$top_entries" ]]; then + # Strip leading folder + inner_dir="$top_entries" + shopt -s dotglob nullglob + if compgen -G "$inner_dir/*" >/dev/null; then + cp -r "$inner_dir"/* "$target/" || { + msg_error "Failed to copy contents from $inner_dir to $target" + rm -rf "$tmpdir" "$unpack_tmp" + return 1 + } + else + msg_error "Inner directory is empty: $inner_dir" + rm -rf "$tmpdir" "$unpack_tmp" + return 1 + fi + shopt -u dotglob nullglob + else + # Copy all contents + shopt -s dotglob nullglob + if compgen -G "$unpack_tmp/*" >/dev/null; then + cp -r "$unpack_tmp"/* "$target/" || { + msg_error "Failed to copy contents to $target" + rm -rf "$tmpdir" "$unpack_tmp" + return 1 + } + else + msg_error "Unpacked archive is empty" + rm -rf "$tmpdir" "$unpack_tmp" + return 1 + fi + shopt -u dotglob nullglob + fi + + ### Singlefile Mode ### + elif [[ "$mode" == "singlefile" ]]; then + local pattern="${6%\"}" + pattern="${pattern#\"}" + [[ -z "$pattern" ]] && { + msg_error "Mode 'singlefile' requires 6th parameter (asset filename pattern)" + rm -rf "$tmpdir" + return 1 + } + + local asset_url="" + for u in $(echo "$json" | jq -r '.assets[].browser_download_url'); do + filename_candidate="${u##*/}" + case "$filename_candidate" in + $pattern) + asset_url="$u" + break + ;; + esac + done + + # Fallback: scan older releases for a matching asset + if [[ -z "$asset_url" ]]; then + local fallback_json + if fallback_json=$(_gh_scan_older_releases "$repo" "singlefile" "$pattern" "$tag_name"); then + json="$fallback_json" + tag_name=$(echo "$json" | jq -r '.tag_name // .name // empty') + [[ "$tag_name" =~ ^v ]] && version="${tag_name:1}" || version="$tag_name" + msg_info "Fetching GitHub release: $app ($version)" + for u in $(echo "$json" | jq -r '.assets[].browser_download_url'); do + filename_candidate="${u##*/}" + case "$filename_candidate" in $pattern) + asset_url="$u" + break + ;; + esac + done + fi + fi + [[ -z "$asset_url" ]] && { + msg_error "No asset matching '$pattern' found" + rm -rf "$tmpdir" + return 1 + } + + filename="${asset_url##*/}" + mkdir -p "$target" + + local use_filename="${USE_ORIGINAL_FILENAME:-false}" + local target_file="$app" + [[ "$use_filename" == "true" ]] && target_file="$filename" + + curl_download "$target/$target_file" "$asset_url" || { + msg_error "Download failed: $asset_url" + rm -rf "$tmpdir" + return 1 + } + + if [[ "$target_file" != *.jar && -f "$target/$target_file" ]]; then + chmod +x "$target/$target_file" + fi + + else + msg_error "Unknown mode: $mode" + rm -rf "$tmpdir" + return 1 + fi + + echo "$version" >"$version_file" + msg_ok "Deployed: $app ($version)" + rm -rf "$tmpdir" +} + +# ------------------------------------------------------------------------------ +# Installs Adminer (Debian/Ubuntu via APT, Alpine via direct download). +# +# Description: +# - Adds Adminer to Apache or web root +# - Supports Alpine and Debian-based systems +# ------------------------------------------------------------------------------ + +function setup_adminer() { + if grep -qi alpine /etc/os-release; then + msg_info "Setup Adminer (Alpine)" + mkdir -p /var/www/localhost/htdocs/adminer + if ! curl_with_retry "https://github.com/vrana/adminer/releases/latest/download/adminer.php" "/var/www/localhost/htdocs/adminer/index.php"; then + msg_error "Failed to download Adminer" + return 1 + fi + cache_installed_version "adminer" "latest-alpine" + msg_ok "Setup Adminer (Alpine)" + else + msg_info "Setup Adminer (Debian/Ubuntu)" + ensure_dependencies adminer + $STD a2enconf adminer || { + msg_error "Failed to enable Adminer Apache config" + return 1 + } + $STD systemctl reload apache2 || { + msg_error "Failed to reload Apache" + return 1 + } + local VERSION + VERSION=$(dpkg -s adminer 2>/dev/null | grep '^Version:' | awk '{print $2}' 2>/dev/null || echo 'unknown') + cache_installed_version "adminer" "${VERSION:-unknown}" + msg_ok "Setup Adminer (Debian/Ubuntu)" + fi +} + +# ------------------------------------------------------------------------------ +# Installs or updates Composer globally (robust, idempotent). +# +# - Installs to /usr/local/bin/composer +# - Removes old binaries/symlinks in /usr/bin, /bin, /root/.composer, etc. +# - Ensures /usr/local/bin is in PATH (permanent) +# - Auto-updates to latest version +# ------------------------------------------------------------------------------ + +function setup_composer() { + local COMPOSER_BIN="/usr/local/bin/composer" + export COMPOSER_ALLOW_SUPERUSER=1 + + # Get currently installed version + local INSTALLED_VERSION="" + if [[ -x "$COMPOSER_BIN" ]]; then + INSTALLED_VERSION=$("$COMPOSER_BIN" --version 2>/dev/null | awk '{print $3}') + fi + + # Scenario 1: Already installed - just self-update + if [[ -n "$INSTALLED_VERSION" ]]; then + msg_info "Update Composer $INSTALLED_VERSION" + $STD "$COMPOSER_BIN" self-update --no-interaction || { + msg_warn "Composer self-update failed, continuing with current version" + } + local UPDATED_VERSION + UPDATED_VERSION=$("$COMPOSER_BIN" --version 2>/dev/null | awk '{print $3}') + cache_installed_version "composer" "$UPDATED_VERSION" + msg_ok "Update Composer $UPDATED_VERSION" + return 0 + fi + + # Scenario 2: Fresh install + msg_info "Setup Composer" + + for old in /usr/bin/composer /bin/composer /root/.composer/vendor/bin/composer; do + [[ -e "$old" && "$old" != "$COMPOSER_BIN" ]] && rm -f "$old" + done + + ensure_usr_local_bin_persist + export PATH="/usr/local/bin:$PATH" + + if ! curl_with_retry "https://getcomposer.org/installer" "/tmp/composer-setup.php"; then + msg_error "Failed to download Composer installer" + return 1 + fi + + $STD php /tmp/composer-setup.php --install-dir=/usr/local/bin --filename=composer || { + msg_error "Failed to install Composer" + rm -f /tmp/composer-setup.php + return 1 + } + rm -f /tmp/composer-setup.php + + if [[ ! -x "$COMPOSER_BIN" ]]; then + msg_error "Composer installation failed" + return 1 + fi + + chmod +x "$COMPOSER_BIN" + $STD "$COMPOSER_BIN" self-update --no-interaction || { + msg_warn "Composer self-update failed after fresh install" + } + + local FINAL_VERSION + FINAL_VERSION=$("$COMPOSER_BIN" --version 2>/dev/null | awk '{print $3}') + cache_installed_version "composer" "$FINAL_VERSION" + msg_ok "Setup Composer" +} + +# ------------------------------------------------------------------------------ +# Installs FFmpeg from source or prebuilt binary (Debian/Ubuntu only). +# +# Description: +# - Downloads and builds FFmpeg from GitHub (https://github.com/FFmpeg/FFmpeg) +# - Supports specific version override via FFMPEG_VERSION (e.g. n7.1.1) +# - Supports build profile via FFMPEG_TYPE: +# - minimal : x264, vpx, mp3 only +# - medium : adds subtitles, fonts, opus, vorbis +# - full : adds dav1d, svt-av1, zlib, numa +# - binary : downloads static build (johnvansickle.com) +# - Defaults to latest stable version and full feature set +# +# Notes: +# - Requires: curl, jq, build-essential, and matching codec libraries +# - Result is installed to /usr/local/bin/ffmpeg +# ------------------------------------------------------------------------------ + +function setup_ffmpeg() { + local TMP_DIR=$(mktemp -d) + local GITHUB_REPO="FFmpeg/FFmpeg" + local VERSION="${FFMPEG_VERSION:-latest}" + local TYPE="${FFMPEG_TYPE:-full}" + local BIN_PATH="/usr/local/bin/ffmpeg" + + # Get currently installed version + local INSTALLED_VERSION="" + if command -v ffmpeg &>/dev/null; then + INSTALLED_VERSION=$(ffmpeg -version 2>/dev/null | head -n1 | awk '{print $3}') + fi + + msg_info "Setup FFmpeg ${VERSION} ($TYPE)" + + # Binary fallback mode + if [[ "$TYPE" == "binary" ]]; then + if ! CURL_TIMEOUT=300 curl_with_retry "https://johnvansickle.com/ffmpeg/releases/ffmpeg-release-amd64-static.tar.xz" "$TMP_DIR/ffmpeg.tar.xz"; then + msg_error "Failed to download FFmpeg binary" + rm -rf "$TMP_DIR" + return 1 + fi + tar -xf "$TMP_DIR/ffmpeg.tar.xz" -C "$TMP_DIR" || { + msg_error "Failed to extract FFmpeg binary" + rm -rf "$TMP_DIR" + return 1 + } + local EXTRACTED_DIR + EXTRACTED_DIR=$(find "$TMP_DIR" -maxdepth 1 -type d -name "ffmpeg-*") + cp "$EXTRACTED_DIR/ffmpeg" "$BIN_PATH" + cp "$EXTRACTED_DIR/ffprobe" /usr/local/bin/ffprobe + chmod +x "$BIN_PATH" /usr/local/bin/ffprobe + local FINAL_VERSION=$($BIN_PATH -version 2>/dev/null | head -n1 | awk '{print $3}') + rm -rf "$TMP_DIR" + cache_installed_version "ffmpeg" "$FINAL_VERSION" + ensure_usr_local_bin_persist + [[ -n "$INSTALLED_VERSION" ]] && msg_ok "Upgrade FFmpeg $INSTALLED_VERSION → $FINAL_VERSION" || msg_ok "Setup FFmpeg $FINAL_VERSION" + return 0 + fi + + ensure_dependencies jq + + # Auto-detect latest stable version if none specified + if [[ "$VERSION" == "latest" || -z "$VERSION" ]]; then + local ffmpeg_tags + ffmpeg_tags=$(curl -fsSL --max-time 15 "https://api.github.com/repos/${GITHUB_REPO}/tags" 2>/dev/null || echo "") + + if [[ -z "$ffmpeg_tags" ]]; then + msg_warn "Could not fetch FFmpeg versions from GitHub, trying binary fallback" + VERSION="" # Will trigger binary fallback below + else + VERSION=$(echo "$ffmpeg_tags" | jq -r '.[].name' 2>/dev/null | + grep -E '^n[0-9]+\.[0-9]+\.[0-9]+$' | + sort -V | tail -n1 || echo "") + fi + fi + + if [[ -z "$VERSION" ]]; then + msg_info "Could not determine FFmpeg source version, using pre-built binary" + VERSION="" # Will use binary fallback + fi + + # Dependency selection + local DEPS=(build-essential yasm nasm pkg-config) + case "$TYPE" in + minimal) + DEPS+=(libx264-dev libvpx-dev libmp3lame-dev) + ;; + medium) + DEPS+=(libx264-dev libvpx-dev libmp3lame-dev libfreetype6-dev libass-dev libopus-dev libvorbis-dev) + ;; + full) + DEPS+=( + libx264-dev libx265-dev libvpx-dev libmp3lame-dev + libfreetype6-dev libass-dev libopus-dev libvorbis-dev + libdav1d-dev libsvtav1-dev zlib1g-dev libnuma-dev + libva-dev libdrm-dev + ) + ;; + *) + msg_error "Invalid FFMPEG_TYPE: $TYPE" + rm -rf "$TMP_DIR" + return 1 + ;; + esac + + ensure_dependencies "${DEPS[@]}" + + # Try to download source if VERSION is set + if [[ -n "$VERSION" ]]; then + if ! CURL_TIMEOUT=300 curl_with_retry "https://github.com/${GITHUB_REPO}/archive/refs/tags/${VERSION}.tar.gz" "$TMP_DIR/ffmpeg.tar.gz"; then + msg_warn "Failed to download FFmpeg source ${VERSION}, falling back to pre-built binary" + VERSION="" + fi + fi + + # If no source download (either VERSION empty or download failed), use binary + if [[ -z "$VERSION" ]]; then + msg_info "Setup FFmpeg from pre-built binary" + if ! CURL_TIMEOUT=300 curl_with_retry "https://johnvansickle.com/ffmpeg/releases/ffmpeg-release-amd64-static.tar.xz" "$TMP_DIR/ffmpeg.tar.xz"; then + msg_error "Failed to download FFmpeg pre-built binary" + rm -rf "$TMP_DIR" + return 1 + fi + + tar -xJf "$TMP_DIR/ffmpeg.tar.xz" -C "$TMP_DIR" || { + msg_error "Failed to extract FFmpeg binary archive" + rm -rf "$TMP_DIR" + return 1 + } + + if ! cp "$TMP_DIR/ffmpeg-"*/ffmpeg /usr/local/bin/ffmpeg 2>/dev/null; then + msg_error "Failed to install FFmpeg binary" + rm -rf "$TMP_DIR" + return 1 + fi + + cache_installed_version "ffmpeg" "static" + rm -rf "$TMP_DIR" + msg_ok "Setup FFmpeg from pre-built binary" + return 0 + fi + + tar -xzf "$TMP_DIR/ffmpeg.tar.gz" -C "$TMP_DIR" || { + msg_error "Failed to extract FFmpeg source" + rm -rf "$TMP_DIR" + return 1 + } + + cd "$TMP_DIR/FFmpeg-"* || { + msg_error "Source extraction failed" + rm -rf "$TMP_DIR" + return 1 + } + + local args=( + --enable-gpl + --enable-shared + --enable-nonfree + --disable-static + --enable-libx264 + --enable-libvpx + --enable-libmp3lame + ) + + if [[ "$TYPE" != "minimal" ]]; then + args+=(--enable-libfreetype --enable-libass --enable-libopus --enable-libvorbis) + fi + + if [[ "$TYPE" == "full" ]]; then + args+=(--enable-libx265 --enable-libdav1d --enable-zlib) + args+=(--enable-vaapi --enable-libdrm) + fi + + if [[ ${#args[@]} -eq 0 ]]; then + msg_error "FFmpeg configure args array is empty" + rm -rf "$TMP_DIR" + return 1 + fi + + $STD ./configure "${args[@]}" || { + msg_error "FFmpeg configure failed" + rm -rf "$TMP_DIR" + return 1 + } + $STD make -j"$(nproc)" || { + msg_error "FFmpeg compilation failed" + rm -rf "$TMP_DIR" + return 1 + } + $STD make install || { + msg_error "FFmpeg installation failed" + rm -rf "$TMP_DIR" + return 1 + } + echo "/usr/local/lib" >/etc/ld.so.conf.d/ffmpeg.conf + $STD ldconfig + + ldconfig -p 2>/dev/null | grep libavdevice >/dev/null || { + msg_error "libavdevice not registered with dynamic linker" + rm -rf "$TMP_DIR" + return 1 + } + + if ! command -v ffmpeg &>/dev/null; then + msg_error "FFmpeg installation failed" + rm -rf "$TMP_DIR" + return 1 + fi + + local FINAL_VERSION + FINAL_VERSION=$(ffmpeg -version 2>/dev/null | head -n1 | awk '{print $3}') + rm -rf "$TMP_DIR" + cache_installed_version "ffmpeg" "$FINAL_VERSION" + ensure_usr_local_bin_persist + [[ -n "$INSTALLED_VERSION" ]] && msg_ok "Upgrade FFmpeg $INSTALLED_VERSION → $FINAL_VERSION" || msg_ok "Setup FFmpeg $FINAL_VERSION" +} + +# ------------------------------------------------------------------------------ +# Installs Go (Golang) from official tarball. +# +# Description: +# - Determines system architecture +# - Downloads latest version if GO_VERSION not set +# +# Variables: +# GO_VERSION - Version to install (e.g. 1.22.2 or latest) +# ------------------------------------------------------------------------------ + +function setup_go() { + local ARCH + case "$(uname -m)" in + x86_64) ARCH="amd64" ;; + aarch64) ARCH="arm64" ;; + *) + msg_error "Unsupported architecture: $(uname -m)" + return 1 + ;; + esac + + # Resolve "latest" version + local GO_VERSION="${GO_VERSION:-latest}" + if [[ "$GO_VERSION" == "latest" ]]; then + local go_version_tmp + go_version_tmp=$(curl_with_retry "https://go.dev/VERSION?m=text" "-" 2>/dev/null | head -n1 | sed 's/^go//') || true + if [[ -z "$go_version_tmp" ]]; then + msg_error "Could not determine latest Go version" + return 1 + fi + GO_VERSION="$go_version_tmp" + fi + + local GO_BIN="/usr/local/bin/go" + local GO_INSTALL_DIR="/usr/local/go" + + # Get currently installed version + local CURRENT_VERSION="" + if [[ -x "$GO_BIN" ]]; then + CURRENT_VERSION=$("$GO_BIN" version 2>/dev/null | awk '{print $3}' | sed 's/go//') + fi + + # Scenario 1: Already at target version + if [[ -n "$CURRENT_VERSION" && "$CURRENT_VERSION" == "$GO_VERSION" ]]; then + cache_installed_version "go" "$GO_VERSION" + return 0 + fi + + # Scenario 2: Different version or not installed + if [[ -n "$CURRENT_VERSION" && "$CURRENT_VERSION" != "$GO_VERSION" ]]; then + msg_info "Upgrade Go from $CURRENT_VERSION to $GO_VERSION" + remove_old_tool_version "go" + else + msg_info "Setup Go $GO_VERSION" + fi + + local TARBALL="go${GO_VERSION}.linux-${ARCH}.tar.gz" + local URL="https://go.dev/dl/${TARBALL}" + local TMP_TAR=$(mktemp) + + if ! CURL_TIMEOUT=300 curl_with_retry "$URL" "$TMP_TAR"; then + msg_error "Failed to download Go $GO_VERSION" + rm -f "$TMP_TAR" + return 1 + fi + + $STD tar -C /usr/local -xzf "$TMP_TAR" || { + msg_error "Failed to extract Go tarball" + rm -f "$TMP_TAR" + return 1 + } + + ln -sf /usr/local/go/bin/go /usr/local/bin/go + ln -sf /usr/local/go/bin/gofmt /usr/local/bin/gofmt + rm -f "$TMP_TAR" + + cache_installed_version "go" "$GO_VERSION" + ensure_usr_local_bin_persist + msg_ok "Setup Go $GO_VERSION" +} + +# ------------------------------------------------------------------------------ +# Installs or updates Ghostscript (gs) from source. +# +# Description: +# - Fetches latest release +# - Builds and installs system-wide +# ------------------------------------------------------------------------------ + +function setup_gs() { + local TMP_DIR=$(mktemp -d) + local CURRENT_VERSION=$(gs --version 2>/dev/null || echo "0") + + ensure_dependencies jq + + local RELEASE_JSON + RELEASE_JSON=$(curl -fsSL --max-time 15 https://api.github.com/repos/ArtifexSoftware/ghostpdl-downloads/releases/latest 2>/dev/null || echo "") + + if [[ -z "$RELEASE_JSON" ]]; then + msg_warn "Cannot fetch latest Ghostscript version from GitHub API" + # Try to get from current version + if command -v gs &>/dev/null; then + gs --version | head -n1 + cache_installed_version "ghostscript" "$CURRENT_VERSION" + return 0 + fi + msg_error "Cannot determine Ghostscript version and no existing installation found" + return 1 + fi + local LATEST_VERSION + LATEST_VERSION=$(echo "$RELEASE_JSON" | jq -r '.tag_name' | sed 's/^gs//') + local LATEST_VERSION_DOTTED + LATEST_VERSION_DOTTED=$(echo "$RELEASE_JSON" | jq -r '.name' | grep -o '[0-9]\+\.[0-9]\+\.[0-9]\+') + + if [[ -z "$LATEST_VERSION" || -z "$LATEST_VERSION_DOTTED" ]]; then + msg_warn "Could not determine latest Ghostscript version from GitHub - checking system" + # Fallback: try to use system version or return error + if [[ "$CURRENT_VERSION" == "0" ]]; then + msg_error "Ghostscript not installed and cannot determine latest version" + rm -rf "$TMP_DIR" + return 1 + fi + rm -rf "$TMP_DIR" + return 0 + fi + + # Scenario 1: Already at latest version + if [[ -n "$LATEST_VERSION_DOTTED" ]] && dpkg --compare-versions "$CURRENT_VERSION" ge "$LATEST_VERSION_DOTTED" 2>/dev/null; then + cache_installed_version "ghostscript" "$LATEST_VERSION_DOTTED" + rm -rf "$TMP_DIR" + return 0 + fi + + # Scenario 2: New install or upgrade + if [[ "$CURRENT_VERSION" != "0" && "$CURRENT_VERSION" != "$LATEST_VERSION_DOTTED" ]]; then + msg_info "Upgrade Ghostscript from $CURRENT_VERSION to $LATEST_VERSION_DOTTED" + else + msg_info "Setup Ghostscript $LATEST_VERSION_DOTTED" + fi + + if ! CURL_TIMEOUT=180 curl_with_retry "https://github.com/ArtifexSoftware/ghostpdl-downloads/releases/download/gs${LATEST_VERSION}/ghostscript-${LATEST_VERSION_DOTTED}.tar.gz" "$TMP_DIR/ghostscript.tar.gz"; then + msg_error "Failed to download Ghostscript" + rm -rf "$TMP_DIR" + return 1 + fi + + if ! tar -xzf "$TMP_DIR/ghostscript.tar.gz" -C "$TMP_DIR"; then + msg_error "Failed to extract Ghostscript archive" + rm -rf "$TMP_DIR" + return 1 + fi + + # Verify directory exists before cd + if [[ ! -d "$TMP_DIR/ghostscript-${LATEST_VERSION_DOTTED}" ]]; then + msg_error "Ghostscript source directory not found: $TMP_DIR/ghostscript-${LATEST_VERSION_DOTTED}" + rm -rf "$TMP_DIR" + return 1 + fi + + cd "$TMP_DIR/ghostscript-${LATEST_VERSION_DOTTED}" || { + msg_error "Failed to enter Ghostscript source directory" + rm -rf "$TMP_DIR" + return 1 + } + + ensure_dependencies build-essential libpng-dev zlib1g-dev + + $STD ./configure || { + msg_error "Ghostscript configure failed" + rm -rf "$TMP_DIR" + return 1 + } + $STD make -j"$(nproc)" || { + msg_error "Ghostscript compilation failed" + rm -rf "$TMP_DIR" + return 1 + } + $STD make install || { + msg_error "Ghostscript installation failed" + rm -rf "$TMP_DIR" + return 1 + } + + hash -r + if [[ ! -x "$(command -v gs)" ]]; then + if [[ -x /usr/local/bin/gs ]]; then + ln -sf /usr/local/bin/gs /usr/bin/gs + fi + fi + + rm -rf "$TMP_DIR" + cache_installed_version "ghostscript" "$LATEST_VERSION_DOTTED" + ensure_usr_local_bin_persist + msg_ok "Setup Ghostscript $LATEST_VERSION_DOTTED" +} + +# ------------------------------------------------------------------------------ +# Sets up Hardware Acceleration on debian or ubuntu. +# +# Description: +# - Detects all available GPUs (Intel, AMD, NVIDIA) +# - Allows user to select which GPU(s) to configure (with 60s timeout) +# - Installs the correct libraries and packages for each GPU type +# - Supports: Debian 11/12/13, Ubuntu 22.04/24.04 +# - Intel: Legacy (Gen 6-8), Modern (Gen 9+), Arc +# - AMD: Discrete GPUs, APUs, ROCm compute +# - NVIDIA: Version-matched drivers from CUDA repository +# +# Notes: +# - Some Intel packages are fetched from GitHub due to missing Debian packages +# - NVIDIA requires matching host driver version +# ------------------------------------------------------------------------------ +function setup_hwaccel() { + # Check if user explicitly disabled GPU in advanced settings + # ENABLE_GPU is exported from build.func + if [[ "${ENABLE_GPU:-no}" == "no" ]]; then + return 0 + fi + + # Check if GPU passthrough is enabled (device nodes must exist) + if [[ ! -d /dev/dri && ! -e /dev/nvidia0 && ! -e /dev/kfd ]]; then + msg_warn "No GPU passthrough detected (/dev/dri, /dev/nvidia*, /dev/kfd not found) - skipping hardware acceleration setup" + return 0 + fi + + msg_info "Setup Hardware Acceleration" + + # Install pciutils if needed + if ! command -v lspci &>/dev/null; then + $STD apt -y update || { + msg_warn "Failed to update package list" + return 0 + } + $STD apt -y install pciutils || { + msg_warn "Failed to install pciutils" + return 0 + } + fi + + # ═══════════════════════════════════════════════════════════════════════════ + # GPU Detection - Build list of all available GPUs with details + # ═══════════════════════════════════════════════════════════════════════════ + local -a GPU_LIST=() + local -a GPU_TYPES=() + local -a GPU_NAMES=() + local gpu_count=0 + + # Get all GPU entries from lspci + while IFS= read -r line; do + [[ -z "$line" ]] && continue + local pci_addr gpu_name gpu_type="" + + pci_addr=$(echo "$line" | awk '{print $1}') + gpu_name=$(echo "$line" | sed 's/^[^ ]* [^:]*: //') + + # Determine GPU type + # Note: Use -w (word boundary) for ATI to avoid matching "CorporATIon" + if echo "$gpu_name" | grep -qi 'Intel'; then + gpu_type="INTEL" + # Subtype detection for Intel + # Order matters: Check Arc first, then Gen9+ (UHD/Iris/HD 5xx-6xx), then Legacy (HD 2xxx-5xxx) + # HD Graphics 530/630 = Gen 9 (Skylake/Kaby Lake) - 3 digits + # HD Graphics 4600/5500 = Gen 7-8 (Haswell/Broadwell) - 4 digits starting with 2-5 + if echo "$gpu_name" | grep -qiE 'Arc|DG[12]'; then + gpu_type="INTEL_ARC" + elif echo "$gpu_name" | grep -qiE 'UHD|Iris|HD Graphics [5-6][0-9]{2}[^0-9]|HD Graphics [5-6][0-9]{2}$'; then + # HD Graphics 5xx/6xx (3 digits) = Gen 9+ (Skylake onwards) + gpu_type="INTEL_GEN9+" + elif echo "$gpu_name" | grep -qiE 'HD Graphics [2-5][0-9]{3}'; then + # HD Graphics 2xxx-5xxx (4 digits) = Gen 6-8 Legacy + gpu_type="INTEL_LEGACY" + fi + elif echo "$gpu_name" | grep -qiwE 'AMD|ATI|Radeon|Advanced Micro Devices'; then + gpu_type="AMD" + elif echo "$gpu_name" | grep -qi 'NVIDIA'; then + gpu_type="NVIDIA" + fi + + if [[ -n "$gpu_type" ]]; then + GPU_LIST+=("$pci_addr") + GPU_TYPES+=("$gpu_type") + GPU_NAMES+=("$gpu_name") + ((gpu_count++)) || true + fi + done < <(lspci 2>/dev/null | grep -Ei 'vga|3d|display') + + # Check for AMD APU via CPU vendor if no discrete GPU found + local cpu_vendor + cpu_vendor=$(lscpu 2>/dev/null | grep -i 'Vendor ID' | awk '{print $3}' 2>/dev/null || echo "") + + if [[ $gpu_count -eq 0 ]]; then + if [[ "$cpu_vendor" == "AuthenticAMD" ]]; then + GPU_LIST+=("integrated") + GPU_TYPES+=("AMD_APU") + GPU_NAMES+=("AMD APU (Integrated Graphics)") + ((gpu_count++)) || true + else + msg_warn "No GPU detected - skipping hardware acceleration setup" + return 0 + fi + fi + + # ═══════════════════════════════════════════════════════════════════════════ + # GPU Selection - Let user choose which GPU(s) to configure + # ═══════════════════════════════════════════════════════════════════════════ + local -a SELECTED_INDICES=() + local install_nvidia_drivers="yes" + + if [[ $gpu_count -eq 1 ]]; then + # Single GPU - auto-select + SELECTED_INDICES=(0) + msg_ok "Detected GPU: ${GPU_NAMES[0]} (${GPU_TYPES[0]})" + else + # Multiple GPUs - show selection menu + echo "" + msg_custom "⚠" "${YW}" "Multiple GPUs detected:" + echo "" + for i in "${!GPU_LIST[@]}"; do + local type_display="${GPU_TYPES[$i]}" + case "${GPU_TYPES[$i]}" in + INTEL_ARC) type_display="Intel Arc" ;; + INTEL_GEN9+) type_display="Intel Gen9+" ;; + INTEL_LEGACY) type_display="Intel Legacy" ;; + INTEL) type_display="Intel" ;; + AMD) type_display="AMD" ;; + AMD_APU) type_display="AMD APU" ;; + NVIDIA) type_display="NVIDIA" ;; + esac + printf " %d) [%s] %s\n" "$((i + 1))" "$type_display" "${GPU_NAMES[$i]}" + done + printf " A) Configure ALL GPUs\n" + echo "" + + # Read with 60 second timeout + local selection="" + echo -n "Select GPU(s) to configure (1-${gpu_count}, A=all) [timeout 60s, default=all]: " + if read -r -t 60 selection; then + selection="${selection^^}" # uppercase + else + echo "" + msg_info "Timeout - configuring all GPUs automatically" + selection="A" + fi + + # Parse selection + if [[ "$selection" == "A" || -z "$selection" ]]; then + # Select all + for i in "${!GPU_LIST[@]}"; do + SELECTED_INDICES+=("$i") + done + elif [[ "$selection" =~ ^[0-9,]+$ ]]; then + # Parse comma-separated numbers + IFS=',' read -ra nums <<<"$selection" + for num in "${nums[@]}"; do + num=$(echo "$num" | tr -d ' ') + if [[ "$num" =~ ^[0-9]+$ ]] && ((num >= 1 && num <= gpu_count)); then + SELECTED_INDICES+=("$((num - 1))") + fi + done + else + # Invalid - default to all + msg_warn "Invalid selection - configuring all GPUs" + for i in "${!GPU_LIST[@]}"; do + SELECTED_INDICES+=("$i") + done + fi + fi + + # Ask whether to install NVIDIA drivers in the container + local nvidia_selected="no" + for idx in "${SELECTED_INDICES[@]}"; do + if [[ "${GPU_TYPES[$idx]}" == "NVIDIA" ]]; then + nvidia_selected="yes" + break + fi + done + + if [[ "$nvidia_selected" == "yes" ]]; then + if [[ -n "${INSTALL_NVIDIA_DRIVERS:-}" ]]; then + install_nvidia_drivers="${INSTALL_NVIDIA_DRIVERS}" + else + echo "" + msg_custom "🎮" "${GN}" "NVIDIA GPU passthrough detected" + local nvidia_reply="" + read -r -t 60 -p "${TAB3}⚙️ Install NVIDIA driver libraries in the container? [Y/n] (auto-yes in 60s): " nvidia_reply || nvidia_reply="" + case "${nvidia_reply,,}" in + n | no) install_nvidia_drivers="no" ;; + *) install_nvidia_drivers="yes" ;; + esac + fi + fi + + # ═══════════════════════════════════════════════════════════════════════════ + # OS Detection + # ═══════════════════════════════════════════════════════════════════════════ + local os_id os_codename os_version + os_id=$(grep -oP '(?<=^ID=).+' /etc/os-release 2>/dev/null | tr -d '"' || echo "debian") + os_codename=$(grep -oP '(?<=^VERSION_CODENAME=).+' /etc/os-release 2>/dev/null | tr -d '"' || echo "unknown") + os_version=$(grep -oP '(?<=^VERSION_ID=).+' /etc/os-release 2>/dev/null | tr -d '"' || echo "") + [[ -z "$os_id" ]] && os_id="debian" + + local in_ct="${CTTYPE:-0}" + + # ═══════════════════════════════════════════════════════════════════════════ + # Process Selected GPUs + # ═══════════════════════════════════════════════════════════════════════════ + for idx in "${SELECTED_INDICES[@]}"; do + local gpu_type="${GPU_TYPES[$idx]}" + local gpu_name="${GPU_NAMES[$idx]}" + + msg_info "Configuring: ${gpu_name}" + + case "$gpu_type" in + # ───────────────────────────────────────────────────────────────────────── + # Intel Arc GPUs (DG1, DG2, Arc A-series) + # ───────────────────────────────────────────────────────────────────────── + INTEL_ARC) + _setup_intel_arc "$os_id" "$os_codename" + ;; + + # ───────────────────────────────────────────────────────────────────────── + # Intel Gen 9+ (Skylake 2015+: UHD, Iris, HD 6xx+) + # ───────────────────────────────────────────────────────────────────────── + INTEL_GEN9+ | INTEL) + _setup_intel_modern "$os_id" "$os_codename" + ;; + + # ───────────────────────────────────────────────────────────────────────── + # Intel Legacy (Gen 6-8: HD 2000-5999, Sandy Bridge to Broadwell) + # ───────────────────────────────────────────────────────────────────────── + INTEL_LEGACY) + _setup_intel_legacy "$os_id" "$os_codename" + ;; + + # ───────────────────────────────────────────────────────────────────────── + # AMD Discrete GPUs + # ───────────────────────────────────────────────────────────────────────── + AMD) + _setup_amd_gpu "$os_id" "$os_codename" + ;; + + # ───────────────────────────────────────────────────────────────────────── + # AMD APU (Integrated Graphics) + # ───────────────────────────────────────────────────────────────────────── + AMD_APU) + _setup_amd_apu "$os_id" "$os_codename" + ;; + + # ───────────────────────────────────────────────────────────────────────── + # NVIDIA GPUs + # ───────────────────────────────────────────────────────────────────────── + NVIDIA) + if [[ "$install_nvidia_drivers" == "yes" ]]; then + _setup_nvidia_gpu "$os_id" "$os_codename" "$os_version" + else + msg_warn "Skipping NVIDIA driver installation (user opted to install manually)" + fi + ;; + esac + done + + # ═══════════════════════════════════════════════════════════════════════════ + # Device Permissions + # ═══════════════════════════════════════════════════════════════════════════ + _setup_gpu_permissions "$in_ct" + + cache_installed_version "hwaccel" "1.0" + msg_ok "Setup Hardware Acceleration" +} + +# ══════════════════════════════════════════════════════════════════════════════ +# Intel Arc GPU Setup +# ══════════════════════════════════════════════════════════════════════════════ +_setup_intel_arc() { + local os_id="$1" os_codename="$2" + + msg_info "Installing Intel Arc GPU drivers" + + if [[ "$os_id" == "ubuntu" ]]; then + # Ubuntu 22.04+ has Arc support in HWE kernel + $STD apt -y install \ + intel-media-va-driver-non-free \ + intel-opencl-icd \ + libmfx-gen1.2 \ + vainfo \ + intel-gpu-tools 2>/dev/null || msg_warn "Some Intel Arc packages failed" + + elif [[ "$os_id" == "debian" ]]; then + # Add non-free repos + _add_debian_nonfree "$os_codename" + + # For Trixie/Sid: Fetch latest drivers from GitHub (Debian repo packages may be too old or missing) + # For Bookworm: Use repo packages (GitHub latest requires libstdc++6 >= 13.1, unavailable on Bookworm) + if [[ "$os_codename" == "trixie" || "$os_codename" == "sid" ]]; then + msg_info "Fetching Intel compute-runtime from GitHub for Arc support" + + # libigdgmm - bundled in compute-runtime releases + fetch_and_deploy_gh_release "libigdgmm12" "intel/compute-runtime" "binary" "latest" "" "libigdgmm12_*_amd64.deb" || true + + # Intel Graphics Compiler (note: packages have -2 suffix) + fetch_and_deploy_gh_release "intel-igc-core" "intel/intel-graphics-compiler" "binary" "latest" "" "intel-igc-core-2_*_amd64.deb" || true + fetch_and_deploy_gh_release "intel-igc-opencl" "intel/intel-graphics-compiler" "binary" "latest" "" "intel-igc-opencl-2_*_amd64.deb" || true + + # Compute Runtime (depends on IGC and gmmlib) + fetch_and_deploy_gh_release "intel-opencl-icd" "intel/compute-runtime" "binary" "latest" "" "intel-opencl-icd_*_amd64.deb" || true + fetch_and_deploy_gh_release "intel-level-zero-gpu" "intel/compute-runtime" "binary" "latest" "" "libze-intel-gpu1_*_amd64.deb" || true + fi + + $STD apt -y install \ + intel-media-va-driver-non-free \ + ocl-icd-libopencl1 \ + libvpl2 \ + libmfx-gen1.2 \ + vainfo \ + intel-gpu-tools 2>/dev/null || msg_warn "Some Intel Arc packages failed" + + # Bookworm has compatible versions of these packages in repos + [[ "$os_codename" == "bookworm" ]] && $STD apt -y install intel-opencl-icd libigdgmm12 2>/dev/null || true + fi + + msg_ok "Intel Arc GPU configured" +} + +# ══════════════════════════════════════════════════════════════════════════════ +# Intel Modern GPU Setup (Gen 9+) +# ══════════════════════════════════════════════════════════════════════════════ +_setup_intel_modern() { + local os_id="$1" os_codename="$2" + + msg_info "Installing Intel Gen 9+ GPU drivers" + + if [[ "$os_id" == "ubuntu" ]]; then + $STD apt -y install \ + va-driver-all \ + intel-media-va-driver \ + ocl-icd-libopencl1 \ + vainfo \ + intel-gpu-tools 2>/dev/null || msg_warn "Some Intel packages failed" + + # Try non-free driver for better codec support + $STD apt -y install intel-media-va-driver-non-free 2>/dev/null || true + $STD apt -y install intel-opencl-icd 2>/dev/null || true + $STD apt -y install libmfx-gen1.2 2>/dev/null || true + + elif [[ "$os_id" == "debian" ]]; then + _add_debian_nonfree "$os_codename" + + # For Trixie/Sid: Fetch from GitHub (Debian packages too old or missing) + if [[ "$os_codename" == "trixie" || "$os_codename" == "sid" ]]; then + msg_info "Fetching Intel compute-runtime from GitHub" + + # libigdgmm first (bundled in compute-runtime releases) + fetch_and_deploy_gh_release "libigdgmm12" "intel/compute-runtime" "binary" "latest" "" "libigdgmm12_*_amd64.deb" || true + + # Intel Graphics Compiler (note: packages have -2 suffix) + fetch_and_deploy_gh_release "intel-igc-core" "intel/intel-graphics-compiler" "binary" "latest" "" "intel-igc-core-2_*_amd64.deb" || true + fetch_and_deploy_gh_release "intel-igc-opencl" "intel/intel-graphics-compiler" "binary" "latest" "" "intel-igc-opencl-2_*_amd64.deb" || true + + # Compute Runtime + fetch_and_deploy_gh_release "intel-opencl-icd" "intel/compute-runtime" "binary" "latest" "" "intel-opencl-icd_*_amd64.deb" || true + fi + + $STD apt -y install \ + intel-media-va-driver-non-free \ + ocl-icd-libopencl1 \ + vainfo \ + libmfx-gen1.2 \ + intel-gpu-tools 2>/dev/null || msg_warn "Some Intel packages failed" + + # Bookworm has intel-opencl-icd in repos (compatible version) + [[ "$os_codename" == "bookworm" ]] && $STD apt -y install intel-opencl-icd libigdgmm12 2>/dev/null || true + fi + + msg_ok "Intel Gen 9+ GPU configured" +} + +# ══════════════════════════════════════════════════════════════════════════════ +# Intel Legacy GPU Setup (Gen 6-8) +# ══════════════════════════════════════════════════════════════════════════════ +_setup_intel_legacy() { + local os_id="$1" os_codename="$2" + + msg_info "Installing Intel Legacy GPU drivers (Gen 6-8)" + + # Legacy GPUs use i965 driver - stable repo packages only + $STD apt -y install \ + va-driver-all \ + i965-va-driver \ + mesa-va-drivers \ + ocl-icd-libopencl1 \ + vainfo \ + intel-gpu-tools 2>/dev/null || msg_warn "Some Intel legacy packages failed" + + # beignet provides OpenCL for older Intel GPUs (Sandy Bridge to Broadwell) + # Note: beignet-opencl-icd was removed in Debian 12+ and Ubuntu 22.04+ + # Check if package is available before attempting installation + if apt-cache show beignet-opencl-icd &>/dev/null; then + $STD apt -y install beignet-opencl-icd 2>/dev/null || msg_warn "beignet-opencl-icd installation failed (optional)" + else + msg_warn "beignet-opencl-icd not available - OpenCL support for legacy Intel GPU limited" + msg_warn "Note: Hardware video encoding/decoding (VA-API) still works without OpenCL" + fi + + msg_ok "Intel Legacy GPU configured" +} + +# ══════════════════════════════════════════════════════════════════════════════ +# AMD Discrete GPU Setup +# ══════════════════════════════════════════════════════════════════════════════ +_setup_amd_gpu() { + local os_id="$1" os_codename="$2" + + msg_info "Installing AMD GPU drivers" + + # Core Mesa drivers + $STD apt -y install \ + mesa-va-drivers \ + mesa-vdpau-drivers \ + mesa-opencl-icd \ + ocl-icd-libopencl1 \ + libdrm-amdgpu1 \ + vainfo \ + clinfo 2>/dev/null || msg_warn "Some AMD packages failed" + + # Firmware for AMD GPUs + if [[ "$os_id" == "debian" ]]; then + _add_debian_nonfree_firmware "$os_codename" + $STD apt -y install firmware-amd-graphics 2>/dev/null || msg_warn "AMD firmware not available" + fi + # Ubuntu includes AMD firmware in linux-firmware by default + + # ROCm compute stack (OpenCL + HIP) + _setup_rocm "$os_id" "$os_codename" + + msg_ok "AMD GPU configured" +} + +# ══════════════════════════════════════════════════════════════════════════════ +# AMD APU Setup (Integrated Graphics) +# ══════════════════════════════════════════════════════════════════════════════ +_setup_amd_apu() { + local os_id="$1" os_codename="$2" + + msg_info "Installing AMD APU drivers" + + $STD apt -y install \ + mesa-va-drivers \ + mesa-vdpau-drivers \ + mesa-opencl-icd \ + ocl-icd-libopencl1 \ + vainfo 2>/dev/null || msg_warn "Some AMD APU packages failed" + + if [[ "$os_id" == "debian" ]]; then + _add_debian_nonfree_firmware "$os_codename" + $STD apt -y install firmware-amd-graphics 2>/dev/null || true + fi + + msg_ok "AMD APU configured" +} + +# ══════════════════════════════════════════════════════════════════════════════ +# AMD ROCm Compute Setup +# Adds ROCm repository and installs the ROCm compute stack for AMD GPUs/APUs. +# Provides: OpenCL, HIP, rocm-smi, rocminfo +# Supported: Debian 12/13, Ubuntu 22.04/24.04 (amd64 only) +# ══════════════════════════════════════════════════════════════════════════════ +_setup_rocm() { + local os_id="$1" os_codename="$2" + + # Only amd64 is supported + if [[ "$(dpkg --print-architecture 2>/dev/null)" != "amd64" ]]; then + msg_warn "ROCm is only available for amd64 — skipping" + return 0 + fi + + local ROCM_VERSION="7.2" + local ROCM_REPO_CODENAME + + # Map OS codename to ROCm repository codename (Ubuntu-based repos) + case "${os_id}-${os_codename}" in + debian-bookworm) ROCM_REPO_CODENAME="jammy" ;; + debian-trixie | debian-sid) ROCM_REPO_CODENAME="noble" ;; + ubuntu-jammy) ROCM_REPO_CODENAME="jammy" ;; + ubuntu-noble) ROCM_REPO_CODENAME="noble" ;; + *) + msg_warn "ROCm not supported on ${os_id} ${os_codename} — skipping" + return 0 + ;; + esac + + msg_info "Installing ROCm ${ROCM_VERSION} compute stack" + + # ROCm main repository (userspace compute libs) + setup_deb822_repo \ + "rocm" \ + "https://repo.radeon.com/rocm/rocm.gpg.key" \ + "https://repo.radeon.com/rocm/apt/${ROCM_VERSION}" \ + "${ROCM_REPO_CODENAME}" \ + "main" \ + "amd64" || { + msg_warn "Failed to add ROCm repository — skipping ROCm" + return 0 + } + + # AMDGPU driver repository (append to same keyring) + { + echo "" + echo "Types: deb" + echo "URIs: https://repo.radeon.com/amdgpu/latest/ubuntu" + echo "Suites: ${ROCM_REPO_CODENAME}" + echo "Components: main" + echo "Architectures: amd64" + echo "Signed-By: /etc/apt/keyrings/rocm.gpg" + } >>/etc/apt/sources.list.d/rocm.sources + + # Pin ROCm packages to prefer radeon repo + cat </etc/apt/preferences.d/rocm-pin-600 +Package: * +Pin: release o=repo.radeon.com +Pin-Priority: 600 +EOF + + $STD apt update || msg_warn "apt update failed (AMD repo may be temporarily unavailable) — continuing anyway" + # Install only runtime packages — full 'rocm' meta-package includes 15GB+ dev tools + $STD apt install -y rocm-opencl-runtime rocm-hip-runtime rocm-smi-lib 2>/dev/null || { + msg_warn "ROCm runtime install failed — trying minimal set" + $STD apt install -y rocm-opencl-runtime rocm-smi-lib 2>/dev/null || msg_warn "ROCm minimal install also failed" + } + + # Group membership for GPU access + usermod -aG render,video root 2>/dev/null || true + + # Environment (PATH + LD_LIBRARY_PATH) + if [[ -d /opt/rocm ]]; then + cat <<'ENVEOF' >/etc/profile.d/rocm.sh +export PATH="$PATH:/opt/rocm/bin" +export LD_LIBRARY_PATH="${LD_LIBRARY_PATH:+$LD_LIBRARY_PATH:}/opt/rocm/lib" +ENVEOF + chmod +x /etc/profile.d/rocm.sh + # Also make available for current session / systemd services + echo "/opt/rocm/lib" >/etc/ld.so.conf.d/rocm.conf + ldconfig 2>/dev/null || true + fi + + if [[ -x /opt/rocm/bin/rocminfo ]]; then + msg_ok "ROCm ${ROCM_VERSION} installed" + else + msg_warn "ROCm installed but rocminfo not found — GPU may not be available in container" + fi +} + +# ══════════════════════════════════════════════════════════════════════════════ +# NVIDIA GPU Setup +# ══════════════════════════════════════════════════════════════════════════════ +_setup_nvidia_gpu() { + local os_id="$1" os_codename="$2" os_version="$3" + + msg_info "Installing NVIDIA GPU drivers" + + # Prevent interactive dialogs (e.g., "Mismatching nvidia kernel module" whiptail) + export DEBIAN_FRONTEND=noninteractive + export NEEDRESTART_MODE=a + + # Detect host driver version (passed through via /proc) + # Format varies by driver type: + # Proprietary: "NVRM version: NVIDIA UNIX x86_64 Kernel Module 550.54.14 Thu..." + # Open: "NVRM version: NVIDIA UNIX Open Kernel Module for x86_64 590.48.01 Release..." + # Use regex to extract version number (###.##.## or ###.## pattern) + local nvidia_host_version="" + if [[ -f /proc/driver/nvidia/version ]]; then + nvidia_host_version=$(grep -oP '\d{3,}\.\d+(\.\d+)?' /proc/driver/nvidia/version 2>/dev/null | head -1) + fi + + if [[ -z "$nvidia_host_version" ]]; then + msg_warn "NVIDIA host driver version not found in /proc/driver/nvidia/version" + msg_warn "Ensure NVIDIA drivers are installed on host and GPU passthrough is enabled" + $STD apt-get -y install va-driver-all vainfo 2>/dev/null || true + return 0 + fi + + msg_info "Host NVIDIA driver version: ${nvidia_host_version}" + + if [[ "$os_id" == "debian" ]]; then + # Enable non-free components + if [[ -f /etc/apt/sources.list.d/debian.sources ]]; then + if ! grep -q "non-free" /etc/apt/sources.list.d/debian.sources 2>/dev/null; then + sed -i -E 's/Components: (.*)$/Components: \1 contrib non-free non-free-firmware/g' /etc/apt/sources.list.d/debian.sources 2>/dev/null || true + fi + fi + $STD apt-get -y update 2>/dev/null || msg_warn "apt update failed - continuing anyway" + + # For Debian 13 Trixie/Sid: Use Debian's own nvidia packages first (better compatibility) + # NVIDIA's CUDA repo targets Debian 12 and may not have amd64 packages for Trixie + if [[ "$os_codename" == "trixie" || "$os_codename" == "sid" ]]; then + msg_info "Debian ${os_codename}: Using Debian's NVIDIA packages" + + # Extract major version for flexible matching (580.126.09 -> 580) + local nvidia_major_version="${nvidia_host_version%%.*}" + + # Check what versions are actually available + local available_version="" + available_version=$(apt-cache madison libcuda1 2>/dev/null | awk '{print $3}' | grep -E "^${nvidia_major_version}\." | head -1 || true) + + if [[ -n "$available_version" ]]; then + msg_info "Found available NVIDIA version: ${available_version}" + local nvidia_pkgs="libcuda1=${available_version} libnvcuvid1=${available_version} libnvidia-encode1=${available_version} libnvidia-ml1=${available_version}" + if $STD apt-get -y -o Dpkg::Options::="--force-confold" install --no-install-recommends $nvidia_pkgs 2>/dev/null; then + msg_ok "Installed NVIDIA libraries (${available_version})" + else + msg_warn "Failed to install NVIDIA ${available_version} - trying unversioned" + $STD apt-get -y -o Dpkg::Options::="--force-confold" install --no-install-recommends libcuda1 libnvcuvid1 libnvidia-encode1 libnvidia-ml1 2>/dev/null || true + fi + else + # No matching major version - try latest available or unversioned + msg_warn "No NVIDIA packages for version ${nvidia_major_version}.x found in repos" + available_version=$(apt-cache madison libcuda1 2>/dev/null | awk '{print $3}' | head -1 || true) + if [[ -n "$available_version" ]]; then + msg_info "Trying latest available: ${available_version} (may cause version mismatch)" + $STD apt-get -y -o Dpkg::Options::="--force-confold" install --no-install-recommends \ + libcuda1="${available_version}" libnvcuvid1="${available_version}" \ + libnvidia-encode1="${available_version}" libnvidia-ml1="${available_version}" 2>/dev/null || + $STD apt-get -y -o Dpkg::Options::="--force-confold" install --no-install-recommends \ + libcuda1 libnvcuvid1 libnvidia-encode1 libnvidia-ml1 2>/dev/null || + msg_warn "NVIDIA library installation failed - GPU compute may not work" + else + msg_warn "No NVIDIA packages available in Debian repos - GPU support disabled" + fi + fi + $STD apt-get -y -o Dpkg::Options::="--force-confold" install --no-install-recommends nvidia-smi 2>/dev/null || true + + else + # Debian 11/12: Use NVIDIA CUDA repository for version matching + local cuda_repo="debian12" + case "$os_codename" in + bullseye) cuda_repo="debian11" ;; + bookworm) cuda_repo="debian12" ;; + esac + + # Add NVIDIA CUDA repository + if [[ ! -f /usr/share/keyrings/cuda-archive-keyring.gpg ]]; then + msg_info "Adding NVIDIA CUDA repository (${cuda_repo})" + local cuda_keyring + cuda_keyring="$(mktemp)" + if curl -fsSL -o "$cuda_keyring" "https://developer.download.nvidia.com/compute/cuda/repos/${cuda_repo}/x86_64/cuda-keyring_1.1-1_all.deb" 2>/dev/null; then + $STD dpkg -i "$cuda_keyring" 2>/dev/null || true + else + msg_warn "Failed to download NVIDIA CUDA keyring" + fi + rm -f "$cuda_keyring" + fi + + # Pin NVIDIA repo for version matching + cat <<'NVIDIA_PIN' >/etc/apt/preferences.d/nvidia-cuda-pin +Package: * +Pin: origin developer.download.nvidia.com +Pin-Priority: 1001 +NVIDIA_PIN + + $STD apt-get -y update 2>/dev/null || msg_warn "apt update failed - continuing anyway" + + # Extract major version for flexible matching (580.126.09 -> 580) + local nvidia_major_version="${nvidia_host_version%%.*}" + + # Check what versions are actually available in CUDA repo + local available_version="" + available_version=$(apt-cache madison libcuda1 2>/dev/null | awk '{print $3}' | grep -E "^${nvidia_major_version}\." | head -1 || true) + + if [[ -n "$available_version" ]]; then + msg_info "Installing NVIDIA libraries (version ${available_version})" + local nvidia_pkgs="libcuda1=${available_version} libnvcuvid1=${available_version} libnvidia-encode1=${available_version} libnvidia-ml1=${available_version}" + if $STD apt-get -y -o Dpkg::Options::="--force-confold" install --no-install-recommends $nvidia_pkgs 2>/dev/null; then + msg_ok "Installed version-matched NVIDIA libraries" + else + msg_warn "Version-pinned install failed - trying unpinned" + $STD apt-get -y -o Dpkg::Options::="--force-confold" install --no-install-recommends libcuda1 libnvcuvid1 libnvidia-encode1 libnvidia-ml1 2>/dev/null || + msg_warn "NVIDIA library installation failed" + fi + else + msg_warn "No NVIDIA packages for version ${nvidia_major_version}.x in CUDA repo (host: ${nvidia_host_version})" + # Try latest available version + available_version=$(apt-cache madison libcuda1 2>/dev/null | awk '{print $3}' | head -1 || true) + if [[ -n "$available_version" ]]; then + msg_info "Trying latest available: ${available_version} (version mismatch warning)" + if $STD apt-get -y -o Dpkg::Options::="--force-confold" install --no-install-recommends \ + libcuda1="${available_version}" libnvcuvid1="${available_version}" \ + libnvidia-encode1="${available_version}" libnvidia-ml1="${available_version}" 2>/dev/null; then + msg_ok "Installed NVIDIA libraries (${available_version}) - version differs from host" + else + $STD apt-get -y -o Dpkg::Options::="--force-confold" install --no-install-recommends libcuda1 libnvcuvid1 libnvidia-encode1 libnvidia-ml1 2>/dev/null || + msg_warn "NVIDIA library installation failed" + fi + else + msg_warn "No NVIDIA packages available in CUDA repo - GPU support disabled" + fi + fi + + $STD apt-get -y -o Dpkg::Options::="--force-confold" install --no-install-recommends nvidia-smi 2>/dev/null || true + fi + + elif [[ "$os_id" == "ubuntu" ]]; then + # Ubuntu versioning + local ubuntu_cuda_repo="" + case "$os_version" in + 22.04) ubuntu_cuda_repo="ubuntu2204" ;; + 24.04) ubuntu_cuda_repo="ubuntu2404" ;; + *) ubuntu_cuda_repo="ubuntu2204" ;; # Fallback + esac + + # Add NVIDIA CUDA repository for Ubuntu + if [[ ! -f /usr/share/keyrings/cuda-archive-keyring.gpg ]]; then + msg_info "Adding NVIDIA CUDA repository (${ubuntu_cuda_repo})" + local cuda_keyring + cuda_keyring="$(mktemp)" + if curl -fsSL -o "$cuda_keyring" "https://developer.download.nvidia.com/compute/cuda/repos/${ubuntu_cuda_repo}/x86_64/cuda-keyring_1.1-1_all.deb" 2>/dev/null; then + $STD dpkg -i "$cuda_keyring" 2>/dev/null || true + else + msg_warn "Failed to download NVIDIA CUDA keyring" + fi + rm -f "$cuda_keyring" + fi + + $STD apt-get -y update 2>/dev/null || msg_warn "apt update failed - continuing anyway" + + # Extract major version for flexible matching + local nvidia_major_version="${nvidia_host_version%%.*}" + + # Check what versions are available + local available_version="" + available_version=$(apt-cache madison libcuda1 2>/dev/null | awk '{print $3}' | grep -E "^${nvidia_major_version}\." | head -1 || true) + + if [[ -n "$available_version" ]]; then + msg_info "Installing NVIDIA libraries (version ${available_version})" + local nvidia_pkgs="libcuda1=${available_version} libnvcuvid1=${available_version} libnvidia-encode1=${available_version} libnvidia-ml1=${available_version}" + if $STD apt-get -y -o Dpkg::Options::="--force-confold" install --no-install-recommends $nvidia_pkgs 2>/dev/null; then + msg_ok "Installed version-matched NVIDIA libraries" + else + # Fallback to Ubuntu repo packages with versioned nvidia-utils + msg_warn "CUDA repo install failed - trying Ubuntu native packages (nvidia-utils-${nvidia_major_version})" + if $STD apt-get -y -o Dpkg::Options::="--force-confold" install --no-install-recommends \ + libnvidia-decode-${nvidia_major_version} libnvidia-encode-${nvidia_major_version} nvidia-utils-${nvidia_major_version} 2>/dev/null; then + msg_ok "Installed Ubuntu NVIDIA packages (${nvidia_major_version})" + else + msg_warn "NVIDIA driver installation failed - please install manually: apt install nvidia-utils-${nvidia_major_version}" + fi + fi + else + msg_warn "No NVIDIA packages for version ${nvidia_major_version}.x in CUDA repo" + # Fallback to Ubuntu repo packages with versioned nvidia-utils + msg_info "Trying Ubuntu native packages (nvidia-utils-${nvidia_major_version})" + if $STD apt-get -y -o Dpkg::Options::="--force-confold" install --no-install-recommends \ + libnvidia-decode-${nvidia_major_version} libnvidia-encode-${nvidia_major_version} nvidia-utils-${nvidia_major_version} 2>/dev/null; then + msg_ok "Installed Ubuntu NVIDIA packages (${nvidia_major_version})" + else + msg_warn "NVIDIA driver installation failed - please install manually: apt install nvidia-utils-${nvidia_major_version}" + fi + fi + fi + + # VA-API for hybrid setups (Intel + NVIDIA) + $STD apt-get -y install va-driver-all vainfo 2>/dev/null || true + + # Fix GLX alternatives: nvidia-alternative diverts mesa libs but in LXC + # containers the nvidia GLX libs are typically missing, leaving libGL.so.1 + # pointing nowhere. Fall back to mesa if nvidia GLX dir is empty/missing. + if command -v update-glx &>/dev/null; then + local nvidia_glx_dir="/usr/lib/nvidia" + if [[ ! -f "${nvidia_glx_dir}/libGL.so.1" ]] && [[ -d /usr/lib/mesa-diverted ]]; then + msg_info "NVIDIA GLX libs missing in container - falling back to mesa" + $STD update-glx --set glx /usr/lib/mesa-diverted 2>/dev/null || true + ldconfig 2>/dev/null || true + fi + fi + + msg_ok "NVIDIA GPU configured" +} + +# ══════════════════════════════════════════════════════════════════════════════ +# Helper: Add Debian non-free repositories +# ══════════════════════════════════════════════════════════════════════════════ +_add_debian_nonfree() { + local os_codename="$1" + + [[ -f /etc/apt/sources.list.d/non-free.sources ]] && return 0 + + case "$os_codename" in + bullseye) + cat <<'EOF' >/etc/apt/sources.list.d/non-free.sources +Types: deb +URIs: http://deb.debian.org/debian +Suites: bullseye bullseye-updates +Components: non-free +Signed-By: /usr/share/keyrings/debian-archive-keyring.gpg +EOF + ;; + bookworm) + cat <<'EOF' >/etc/apt/sources.list.d/non-free.sources +Types: deb +URIs: http://deb.debian.org/debian +Suites: bookworm bookworm-updates +Components: non-free non-free-firmware +Signed-By: /usr/share/keyrings/debian-archive-keyring.gpg +EOF + ;; + trixie | sid) + cat <<'EOF' >/etc/apt/sources.list.d/non-free.sources +Types: deb +URIs: http://deb.debian.org/debian +Suites: trixie trixie-updates +Components: non-free non-free-firmware +Signed-By: /usr/share/keyrings/debian-archive-keyring.gpg + +Types: deb +URIs: http://deb.debian.org/debian-security +Suites: trixie-security +Components: non-free non-free-firmware +Signed-By: /usr/share/keyrings/debian-archive-keyring.gpg +EOF + ;; + esac + $STD apt -y update +} + +# ══════════════════════════════════════════════════════════════════════════════ +# Helper: Add Debian non-free-firmware repository +# ══════════════════════════════════════════════════════════════════════════════ +_add_debian_nonfree_firmware() { + local os_codename="$1" + + [[ -f /etc/apt/sources.list.d/non-free-firmware.sources ]] && return 0 + + case "$os_codename" in + bullseye) + # Debian 11 uses 'non-free' component (no separate non-free-firmware) + cat <<'EOF' >/etc/apt/sources.list.d/non-free-firmware.sources +Types: deb +URIs: http://deb.debian.org/debian +Suites: bullseye bullseye-updates +Components: non-free +Signed-By: /usr/share/keyrings/debian-archive-keyring.gpg + +Types: deb +URIs: http://deb.debian.org/debian-security +Suites: bullseye-security +Components: non-free +Signed-By: /usr/share/keyrings/debian-archive-keyring.gpg +EOF + ;; + bookworm) + cat <<'EOF' >/etc/apt/sources.list.d/non-free-firmware.sources +Types: deb +URIs: http://deb.debian.org/debian +Suites: bookworm bookworm-updates +Components: non-free-firmware +Signed-By: /usr/share/keyrings/debian-archive-keyring.gpg + +Types: deb +URIs: http://deb.debian.org/debian-security +Suites: bookworm-security +Components: non-free-firmware +Signed-By: /usr/share/keyrings/debian-archive-keyring.gpg +EOF + ;; + trixie | sid) + cat <<'EOF' >/etc/apt/sources.list.d/non-free-firmware.sources +Types: deb +URIs: http://deb.debian.org/debian +Suites: trixie trixie-updates +Components: non-free-firmware +Signed-By: /usr/share/keyrings/debian-archive-keyring.gpg + +Types: deb +URIs: http://deb.debian.org/debian-security +Suites: trixie-security +Components: non-free-firmware +Signed-By: /usr/share/keyrings/debian-archive-keyring.gpg +EOF + ;; + esac + $STD apt -y update +} + +# ══════════════════════════════════════════════════════════════════════════════ +# Helper: Setup GPU device permissions +# ══════════════════════════════════════════════════════════════════════════════ +_setup_gpu_permissions() { + local in_ct="$1" + + # /dev/dri permissions (Intel/AMD) + if [[ "$in_ct" == "0" && -d /dev/dri ]]; then + if ls /dev/dri/card* /dev/dri/renderD* &>/dev/null; then + chgrp video /dev/dri 2>/dev/null || true + chmod 755 /dev/dri 2>/dev/null || true + chmod 660 /dev/dri/* 2>/dev/null || true + $STD adduser "$(id -u -n)" video 2>/dev/null || true + $STD adduser "$(id -u -n)" render 2>/dev/null || true + + # Sync GID with host + local host_video_gid host_render_gid + host_video_gid=$(getent group video | cut -d: -f3) + host_render_gid=$(getent group render | cut -d: -f3) + if [[ -n "$host_video_gid" ]]; then + sed -i "s/^video:x:[0-9]*:/video:x:$host_video_gid:/" /etc/group 2>/dev/null || true + fi + if [[ -n "$host_render_gid" ]]; then + sed -i "s/^render:x:[0-9]*:/render:x:$host_render_gid:/" /etc/group 2>/dev/null || true + fi + + # Verify VA-API + if command -v vainfo &>/dev/null; then + if vainfo &>/dev/null; then + msg_info "VA-API verified and working" + else + msg_warn "vainfo test failed - check GPU passthrough" + fi + fi + fi + fi + + # /dev/nvidia* permissions (NVIDIA) + if ls /dev/nvidia* &>/dev/null 2>&1; then + msg_info "Configuring NVIDIA device permissions" + for nvidia_dev in /dev/nvidia*; do + [[ -e "$nvidia_dev" ]] && { + chgrp video "$nvidia_dev" 2>/dev/null || true + chmod 666 "$nvidia_dev" 2>/dev/null || true + } + done + if [[ -d /dev/nvidia-caps ]]; then + chmod 755 /dev/nvidia-caps 2>/dev/null || true + for caps_dev in /dev/nvidia-caps/*; do + [[ -e "$caps_dev" ]] && { + chgrp video "$caps_dev" 2>/dev/null || true + chmod 666 "$caps_dev" 2>/dev/null || true + } + done + fi + + # Verify nvidia-smi + if command -v nvidia-smi &>/dev/null; then + if nvidia-smi &>/dev/null; then + msg_info "nvidia-smi verified and working" + else + msg_warn "nvidia-smi test failed - check driver version match" + fi + fi + fi + + # /dev/kfd permissions (AMD ROCm) + if [[ -e /dev/kfd ]]; then + chmod 666 /dev/kfd 2>/dev/null || true + msg_info "AMD ROCm compute device configured" + fi +} + +# ------------------------------------------------------------------------------ +# Installs ImageMagick 7 from source (Debian/Ubuntu only). +# +# Description: +# - Downloads the latest ImageMagick source tarball +# - Builds and installs ImageMagick to /usr/local +# - Configures dynamic linker (ldconfig) +# +# Notes: +# - Requires: build-essential, libtool, libjpeg-dev, libpng-dev, etc. +# ------------------------------------------------------------------------------ +function setup_imagemagick() { + local TMP_DIR=$(mktemp -d) + local BINARY_PATH="/usr/local/bin/magick" + + # Get currently installed version + local INSTALLED_VERSION="" + if command -v magick &>/dev/null; then + INSTALLED_VERSION=$(magick -version | awk '/^Version/ {print $3}') + fi + + msg_info "Setup ImageMagick" + + ensure_dependencies \ + build-essential \ + libtool \ + libjpeg-dev \ + libpng-dev \ + libtiff-dev \ + libwebp-dev \ + libheif-dev \ + libde265-dev \ + libopenjp2-7-dev \ + libxml2-dev \ + liblcms2-dev \ + libfreetype6-dev \ + libraw-dev \ + libfftw3-dev \ + liblqr-1-0-dev \ + libgsl-dev \ + pkg-config \ + ghostscript + + if ! CURL_TIMEOUT=180 curl_with_retry "https://imagemagick.org/archive/ImageMagick.tar.gz" "$TMP_DIR/ImageMagick.tar.gz"; then + msg_error "Failed to download ImageMagick" + rm -rf "$TMP_DIR" + return 1 + fi + + tar -xzf "$TMP_DIR/ImageMagick.tar.gz" -C "$TMP_DIR" || { + msg_error "Failed to extract ImageMagick" + rm -rf "$TMP_DIR" + return 1 + } + + cd "$TMP_DIR"/ImageMagick-* || { + msg_error "Source extraction failed" + rm -rf "$TMP_DIR" + return 1 + } + + $STD ./configure --disable-static || { + msg_error "ImageMagick configure failed" + rm -rf "$TMP_DIR" + return 1 + } + $STD make -j"$(nproc)" || { + msg_error "ImageMagick compilation failed" + rm -rf "$TMP_DIR" + return 1 + } + $STD make install || { + msg_error "ImageMagick installation failed" + rm -rf "$TMP_DIR" + return 1 + } + $STD ldconfig /usr/local/lib + + if [[ ! -x "$BINARY_PATH" ]]; then + msg_error "ImageMagick installation failed" + rm -rf "$TMP_DIR" + return 1 + fi + + local FINAL_VERSION + FINAL_VERSION=$("$BINARY_PATH" -version | awk '/^Version/ {print $3}') + rm -rf "$TMP_DIR" + cache_installed_version "imagemagick" "$FINAL_VERSION" + ensure_usr_local_bin_persist + + if [[ -n "$INSTALLED_VERSION" ]]; then + msg_ok "Upgrade ImageMagick $INSTALLED_VERSION → $FINAL_VERSION" + else + msg_ok "Setup ImageMagick $FINAL_VERSION" + fi +} + +# ------------------------------------------------------------------------------ +# Installs Temurin JDK via Adoptium APT repository. +# +# Description: +# - Removes previous JDK if version mismatch +# - Installs or upgrades to specified JAVA_VERSION +# +# Variables: +# JAVA_VERSION - Temurin JDK version to install (e.g. 17, 21) +# ------------------------------------------------------------------------------ + +function setup_java() { + local JAVA_VERSION="${JAVA_VERSION:-21}" + local DISTRO_ID DISTRO_CODENAME + DISTRO_ID=$(awk -F= '/^ID=/{print $2}' /etc/os-release | tr -d '"') + DISTRO_CODENAME=$(awk -F= '/VERSION_CODENAME/ { print $2 }' /etc/os-release) + local DESIRED_PACKAGE="temurin-${JAVA_VERSION}-jdk" + + # Prepare repository (cleanup + validation) + prepare_repository_setup "adoptium" || { + msg_error "Failed to prepare Adoptium repository" + return 1 + } + + # Add repo if needed + if [[ ! -f /etc/apt/sources.list.d/adoptium.sources ]]; then + local SUITE + SUITE=$(get_fallback_suite "$DISTRO_ID" "$DISTRO_CODENAME" "https://packages.adoptium.net/artifactory/deb") + setup_deb822_repo \ + "adoptium" \ + "https://packages.adoptium.net/artifactory/api/gpg/key/public" \ + "https://packages.adoptium.net/artifactory/deb" \ + "$SUITE" \ + "main" + fi + + # Get currently installed version + local INSTALLED_VERSION="" + if dpkg -l | grep -q "temurin-.*-jdk" 2>/dev/null; then + INSTALLED_VERSION=$(dpkg -l 2>/dev/null | awk '/temurin-.*-jdk/{print $2}' | grep -oP 'temurin-\K[0-9]+' | head -n1 || echo "") + fi + + # Validate INSTALLED_VERSION is not empty if JDK package found + local JDK_COUNT=0 + JDK_COUNT=$(dpkg -l 2>/dev/null | grep -c "temurin-.*-jdk" || true) + if [[ -z "$INSTALLED_VERSION" && "${JDK_COUNT:-0}" -gt 0 ]]; then + msg_warn "Found Temurin JDK but cannot determine version - attempting reinstall" + # Try to get actual package name for purge + local OLD_PACKAGE + OLD_PACKAGE=$(dpkg -l 2>/dev/null | awk '/temurin-.*-jdk/{print $2}' | head -n1 || echo "") + if [[ -n "$OLD_PACKAGE" ]]; then + msg_info "Removing existing package: $OLD_PACKAGE" + $STD apt purge -y "$OLD_PACKAGE" || true + fi + INSTALLED_VERSION="" # Reset to trigger fresh install + fi + + # Scenario 1: Already at correct version + if [[ "$INSTALLED_VERSION" == "$JAVA_VERSION" ]]; then + msg_info "Update Temurin JDK $JAVA_VERSION" + ensure_apt_working || return 1 + upgrade_packages_with_retry "$DESIRED_PACKAGE" || { + msg_error "Failed to update Temurin JDK" + return 1 + } + cache_installed_version "temurin-jdk" "$JAVA_VERSION" + msg_ok "Update Temurin JDK $JAVA_VERSION" + return 0 + fi + + # Scenario 2: Different version - remove old and install new + if [[ -n "$INSTALLED_VERSION" ]]; then + msg_info "Upgrade Temurin JDK from $INSTALLED_VERSION to $JAVA_VERSION" + $STD apt purge -y "temurin-${INSTALLED_VERSION}-jdk" || true + else + msg_info "Setup Temurin JDK $JAVA_VERSION" + fi + + ensure_apt_working || return 1 + + # Install with retry logic + install_packages_with_retry "$DESIRED_PACKAGE" || { + msg_error "Failed to install Temurin JDK $JAVA_VERSION" + return 1 + } + + cache_installed_version "temurin-jdk" "$JAVA_VERSION" + msg_ok "Setup Temurin JDK $JAVA_VERSION" +} + +# ------------------------------------------------------------------------------ +# Installs a local IP updater script using networkd-dispatcher. +# +# Description: +# - Stores current IP in /run/local-ip.env +# - Automatically runs on network changes +# ------------------------------------------------------------------------------ + +function setup_local_ip_helper() { + local BASE_DIR="/usr/local/community-scripts/ip-management" + local SCRIPT_PATH="$BASE_DIR/update_local_ip.sh" + local IP_FILE="/run/local-ip.env" + local DISPATCHER_SCRIPT="/etc/networkd-dispatcher/routable.d/10-update-local-ip.sh" + + # Check if already set up + if [[ -f "$SCRIPT_PATH" && -f "$DISPATCHER_SCRIPT" ]]; then + msg_info "Update Local IP Helper" + cache_installed_version "local-ip-helper" "1.0" + msg_ok "Update Local IP Helper" + else + msg_info "Setup Local IP Helper" + fi + + mkdir -p "$BASE_DIR" + + # Install networkd-dispatcher if not present + if ! dpkg -s networkd-dispatcher >/dev/null 2>&1; then + ensure_dependencies networkd-dispatcher || { + msg_error "Failed to install networkd-dispatcher" + return 1 + } + fi + + # Write update_local_ip.sh + cat <<'EOF' >"$SCRIPT_PATH" +#!/bin/bash +set -euo pipefail + +IP_FILE="/run/local-ip.env" +mkdir -p "$(dirname "$IP_FILE")" + +get_current_ip() { + local ip + + # Try IPv4 targets first + local ipv4_targets=("8.8.8.8" "1.1.1.1" "192.168.1.1" "10.0.0.1" "172.16.0.1" "default") + for target in "${ipv4_targets[@]}"; do + if [[ "$target" == "default" ]]; then + ip=$(ip route get 1 2>/dev/null | awk '{for(i=1;i<=NF;i++) if ($i=="src") print $(i+1)}') + else + ip=$(ip route get "$target" 2>/dev/null | awk '{for(i=1;i<=NF;i++) if ($i=="src") print $(i+1)}') + fi + if [[ -n "$ip" ]]; then + echo "$ip" + return 0 + fi + done + + # IPv6 fallback: Try direct interface lookup for eth0 + ip=$(ip -6 addr show eth0 scope global 2>/dev/null | awk '/inet6 / {print $2}' | cut -d/ -f1 | head -n1) + if [[ -n "$ip" && "$ip" =~ : ]]; then + echo "$ip" + return 0 + fi + + # IPv6 fallback: Use routing table with IPv6 targets (Google DNS, Cloudflare DNS) + local ipv6_targets=("2001:4860:4860::8888" "2606:4700:4700::1111") + for target in "${ipv6_targets[@]}"; do + ip=$(ip -6 route get "$target" 2>/dev/null | awk '{for(i=1;i<=NF;i++) if ($i=="src") print $(i+1)}') + if [[ -n "$ip" && "$ip" =~ : ]]; then + echo "$ip" + return 0 + fi + done + + return 1 +} + +current_ip="$(get_current_ip)" + +if [[ -z "$current_ip" ]]; then + echo "[ERROR] Could not detect local IP" >&2 + exit 123 +fi + +if [[ -f "$IP_FILE" ]]; then + source "$IP_FILE" + [[ "$LOCAL_IP" == "$current_ip" ]] && exit 0 +fi + +echo "LOCAL_IP=$current_ip" > "$IP_FILE" +echo "[INFO] LOCAL_IP updated to $current_ip" +EOF + + chmod +x "$SCRIPT_PATH" + + # Install dispatcher hook + mkdir -p "$(dirname "$DISPATCHER_SCRIPT")" + cat <"$DISPATCHER_SCRIPT" +#!/bin/bash +$SCRIPT_PATH +EOF + + chmod +x "$DISPATCHER_SCRIPT" + systemctl enable -q --now networkd-dispatcher.service || { + msg_warn "Failed to enable networkd-dispatcher service" + } + + cache_installed_version "local-ip-helper" "1.0" + msg_ok "Setup Local IP Helper" +} + +# ------------------------------------------------------------------------------ +# Installs or updates MariaDB. +# +# Description: +# - Uses Debian/Ubuntu distribution packages by default (most reliable) +# - Only uses official MariaDB repository when a specific version is requested +# - Detects current MariaDB version and replaces it if necessary +# - Preserves existing database data +# +# Variables: +# MARIADB_VERSION - MariaDB version to install (optional) +# - Not set or "latest": Uses distribution packages (recommended) +# - Specific version (e.g. "11.4", "12.2"): Uses MariaDB official repo +# ------------------------------------------------------------------------------ + +setup_mariadb() { + local MARIADB_VERSION="${MARIADB_VERSION:-latest}" + local USE_DISTRO_PACKAGES=false + + # Ensure non-interactive mode for all apt operations + export DEBIAN_FRONTEND=noninteractive + export NEEDRESTART_MODE=a + export NEEDRESTART_SUSPEND=1 + + # Determine installation method: + # - "latest" or empty: Use distribution packages (avoids mirror issues) + # - Specific version: Use MariaDB official repository + if [[ "$MARIADB_VERSION" == "latest" || -z "$MARIADB_VERSION" ]]; then + USE_DISTRO_PACKAGES=true + msg_info "Setup MariaDB (distribution packages)" + else + msg_info "Setup MariaDB $MARIADB_VERSION (official repository)" + fi + + # Get currently installed version + local CURRENT_VERSION="" + CURRENT_VERSION=$(is_tool_installed "mariadb" 2>/dev/null) || true + + # Pre-configure debconf to prevent any interactive prompts during install/upgrade + debconf-set-selections </dev/null | grep -E "Candidate:" | awk '{print $2}' | grep -oP '^\d+:\K\d+\.\d+\.\d+' || echo "") + + if [[ -n "$DISTRO_VERSION" ]]; then + # Compare versions - if current is higher, keep it + local CURRENT_MAJOR DISTRO_MAJOR + CURRENT_MAJOR=$(echo "$CURRENT_VERSION" | awk -F. '{print $1}') + DISTRO_MAJOR=$(echo "$DISTRO_VERSION" | awk -F. '{print $1}') + + if [[ "$CURRENT_MAJOR" -gt "$DISTRO_MAJOR" ]]; then + msg_warn "MariaDB $CURRENT_VERSION is already installed (higher than distro $DISTRO_VERSION)" + msg_warn "Keeping existing installation to preserve data integrity" + msg_warn "To use distribution packages, manually remove MariaDB first" + _setup_mariadb_runtime_dir + cache_installed_version "mariadb" "$CURRENT_VERSION" + msg_ok "Setup MariaDB $CURRENT_VERSION (existing installation kept)" + return 0 + fi + fi + fi + + # Install or upgrade MariaDB from distribution packages + if ! install_packages_with_retry "mariadb-server" "mariadb-client"; then + msg_error "Failed to install MariaDB packages from distribution" + return 1 + fi + + # Get installed version for caching + local INSTALLED_VERSION="" + INSTALLED_VERSION=$(mariadb --version 2>/dev/null | grep -oP '\d+\.\d+\.\d+' | head -n1 || echo "distro") + + # Configure runtime directory and finish + _setup_mariadb_runtime_dir + cache_installed_version "mariadb" "$INSTALLED_VERSION" + msg_ok "Setup MariaDB $INSTALLED_VERSION (distribution packages)" + return 0 + fi + + # ============================================================================ + # OFFICIAL REPOSITORY PATH (only when specific version requested) + # ============================================================================ + + # First, check if there's an old/broken repository that needs cleanup + if [[ -f /etc/apt/sources.list.d/mariadb.sources ]] || [[ -f /etc/apt/sources.list.d/mariadb.list ]]; then + local OLD_REPO_VERSION="" + OLD_REPO_VERSION=$(grep -oP 'repo/\K[0-9]+\.[0-9]+(\.[0-9]+)?' /etc/apt/sources.list.d/mariadb.sources 2>/dev/null || + grep -oP 'repo/\K[0-9]+\.[0-9]+(\.[0-9]+)?' /etc/apt/sources.list.d/mariadb.list 2>/dev/null || echo "") + + # Check if old repo points to a different version + if [[ -n "$OLD_REPO_VERSION" ]] && [[ "${OLD_REPO_VERSION%.*}" != "${MARIADB_VERSION%.*}" ]]; then + msg_info "Cleaning up old MariaDB repository (was: $OLD_REPO_VERSION, requested: $MARIADB_VERSION)" + cleanup_old_repo_files "mariadb" + $STD apt update || msg_warn "APT update had issues, continuing..." + fi + fi + + # Scenario 1: Already installed at target version - just update packages + if [[ -n "$CURRENT_VERSION" && "$CURRENT_VERSION" == "$MARIADB_VERSION" ]]; then + msg_info "Update MariaDB $MARIADB_VERSION" + + # Ensure APT is working + ensure_apt_working || return 1 + + # Check if repository needs to be refreshed + if [[ -f /etc/apt/sources.list.d/mariadb.sources ]]; then + local REPO_VERSION="" + REPO_VERSION=$(grep -oP 'repo/\K[0-9]+\.[0-9]+' /etc/apt/sources.list.d/mariadb.sources 2>/dev/null || echo "") + if [[ -n "$REPO_VERSION" && "$REPO_VERSION" != "${MARIADB_VERSION%.*}" ]]; then + msg_warn "Repository version mismatch, updating..." + manage_tool_repository "mariadb" "$MARIADB_VERSION" "http://mirror.mariadb.org/repo/$MARIADB_VERSION" \ + "https://mariadb.org/mariadb_release_signing_key.asc" || { + msg_error "Failed to update MariaDB repository" + return 1 + } + fi + fi + + # Perform upgrade with retry logic + ensure_apt_working || return 1 + upgrade_packages_with_retry "mariadb-server" "mariadb-client" || { + msg_error "Failed to upgrade MariaDB packages" + return 1 + } + cache_installed_version "mariadb" "$MARIADB_VERSION" + msg_ok "Update MariaDB $MARIADB_VERSION" + return 0 + fi + + # Scenario 2b: Different version installed - clean upgrade + if [[ -n "$CURRENT_VERSION" && "$CURRENT_VERSION" != "$MARIADB_VERSION" ]]; then + msg_info "Upgrade MariaDB from $CURRENT_VERSION to $MARIADB_VERSION" + remove_old_tool_version "mariadb" + fi + + # Scenario 3: Fresh install or version change with specific version + # Prepare repository (cleanup + validation) + prepare_repository_setup "mariadb" || { + msg_error "Failed to prepare MariaDB repository" + return 1 + } + + # Install required dependencies first + local mariadb_deps=() + for dep in gawk rsync socat libdbi-perl pv; do + if apt-cache search "^${dep}$" 2>/dev/null | grep -q .; then + mariadb_deps+=("$dep") + fi + done + + if [[ ${#mariadb_deps[@]} -gt 0 ]]; then + $STD apt install -y "${mariadb_deps[@]}" 2>/dev/null || true + fi + + # Setup repository + manage_tool_repository "mariadb" "$MARIADB_VERSION" "http://mirror.mariadb.org/repo/$MARIADB_VERSION" \ + "https://mariadb.org/mariadb_release_signing_key.asc" || { + msg_error "Failed to setup MariaDB repository" + return 1 + } + + # Install packages with retry logic + if ! install_packages_with_retry "mariadb-server" "mariadb-client"; then + # Fallback: try distribution packages + msg_warn "Failed to install MariaDB $MARIADB_VERSION from official repo, falling back to distribution packages..." + cleanup_old_repo_files "mariadb" + $STD apt update || { + msg_warn "APT update also failed, continuing with cache" + } + if install_packages_with_retry "mariadb-server" "mariadb-client"; then + local FALLBACK_VERSION="" + FALLBACK_VERSION=$(mariadb --version 2>/dev/null | grep -oP '\d+\.\d+\.\d+' | head -n1 || echo "distro") + msg_warn "Installed MariaDB $FALLBACK_VERSION from distribution instead of requested $MARIADB_VERSION" + _setup_mariadb_runtime_dir + cache_installed_version "mariadb" "$FALLBACK_VERSION" + msg_ok "Setup MariaDB $FALLBACK_VERSION (fallback to distribution packages)" + return 0 + else + msg_error "Failed to install MariaDB packages (both official repo and distribution)" + return 1 + fi + fi + + _setup_mariadb_runtime_dir + cache_installed_version "mariadb" "$MARIADB_VERSION" + msg_ok "Setup MariaDB $MARIADB_VERSION" +} + +# ------------------------------------------------------------------------------ +# Helper function: Configure MariaDB runtime directory persistence +# ------------------------------------------------------------------------------ +_setup_mariadb_runtime_dir() { + # Configure tmpfiles.d to ensure /run/mysqld directory is created on boot + # This fixes the issue where MariaDB fails to start after container reboot + + # Create tmpfiles.d configuration with error handling + if ! printf '# Ensure /run/mysqld directory exists with correct permissions for MariaDB\nd /run/mysqld 0755 mysql mysql -\n' >/etc/tmpfiles.d/mariadb.conf; then + msg_warn "Failed to create /etc/tmpfiles.d/mariadb.conf - runtime directory may not persist on reboot" + fi + + # Create the directory now if it doesn't exist + # Verify mysql user exists before attempting ownership change + if [[ ! -d /run/mysqld ]]; then + mkdir -p /run/mysqld + # Set permissions first (works regardless of user existence) + chmod 755 /run/mysqld + # Set ownership only if mysql user exists + if getent passwd mysql >/dev/null 2>&1; then + chown mysql:mysql /run/mysqld + else + msg_warn "mysql user not found - directory created with correct permissions but ownership not set" + fi + fi +} + +# ------------------------------------------------------------------------------ +# Creates MariaDB database with user, charset and optional extra grants/modes +# +# Description: +# - Generates password if empty +# - Creates database with utf8mb4_unicode_ci +# - Creates local user with password +# - Grants full access to this DB +# - Optional: apply extra GRANT statements (comma-separated) +# - Optional: apply custom GLOBAL sql_mode +# - Saves credentials to file +# - Exports variables for use in calling script +# +# Usage: +# MARIADB_DB_NAME="myapp_db" MARIADB_DB_USER="myapp_user" setup_mariadb_db +# MARIADB_DB_NAME="domain_monitor" MARIADB_DB_USER="domainmonitor" setup_mariadb_db +# MARIADB_DB_NAME="myapp" MARIADB_DB_USER="myapp" MARIADB_DB_EXTRA_GRANTS="GRANT SELECT ON \`mysql\`.\`time_zone_name\`" setup_mariadb_db +# MARIADB_DB_NAME="ghostfolio" MARIADB_DB_USER="ghostfolio" MARIADB_DB_SQL_MODE="" setup_mariadb_db +# +# Variables: +# MARIADB_DB_NAME - Database name (required) +# MARIADB_DB_USER - Database user (required) +# MARIADB_DB_PASS - User password (optional, auto-generated if empty) +# MARIADB_DB_EXTRA_GRANTS - Comma-separated GRANT statements (optional) +# Example: "GRANT SELECT ON \`mysql\`.\`time_zone_name\`" +# MARIADB_DB_SQL_MODE - Optional global sql_mode override (e.g. "", "STRICT_TRANS_TABLES") +# MARIADB_DB_CREDS_FILE - Credentials file path (optional, default: ~/${APPLICATION}.creds) +# +# Exports: +# MARIADB_DB_NAME, MARIADB_DB_USER, MARIADB_DB_PASS +# ------------------------------------------------------------------------------ + +function setup_mariadb_db() { + if [[ -z "${MARIADB_DB_NAME:-}" || -z "${MARIADB_DB_USER:-}" ]]; then + msg_error "MARIADB_DB_NAME and MARIADB_DB_USER must be set before calling setup_mariadb_db" + return 1 + fi + + if [[ -z "${MARIADB_DB_PASS:-}" ]]; then + MARIADB_DB_PASS=$(openssl rand -base64 18 | tr -dc 'a-zA-Z0-9' | head -c13) + fi + + msg_info "Setting up MariaDB Database" + + $STD mariadb -u root -e "CREATE DATABASE \`$MARIADB_DB_NAME\` CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci;" + $STD mariadb -u root -e "CREATE USER '$MARIADB_DB_USER'@'localhost' IDENTIFIED BY '$MARIADB_DB_PASS';" + $STD mariadb -u root -e "GRANT ALL ON \`$MARIADB_DB_NAME\`.* TO '$MARIADB_DB_USER'@'localhost';" + + # Optional extra grants + if [[ -n "${MARIADB_DB_EXTRA_GRANTS:-}" ]]; then + IFS=',' read -ra G_LIST <<<"${MARIADB_DB_EXTRA_GRANTS:-}" + for g in "${G_LIST[@]}"; do + g=$(echo "$g" | xargs) + $STD mariadb -u root -e "$g TO '$MARIADB_DB_USER'@'localhost';" + done + fi + + # Optional sql_mode override + if [[ -n "${MARIADB_DB_SQL_MODE:-}" ]]; then + $STD mariadb -u root -e "SET GLOBAL sql_mode='${MARIADB_DB_SQL_MODE:-}';" + fi + + $STD mariadb -u root -e "FLUSH PRIVILEGES;" + + local app_name="${APPLICATION,,}" + local CREDS_FILE="${MARIADB_DB_CREDS_FILE:-${HOME}/${app_name}.creds}" + { + echo "MariaDB Credentials" + echo "Database: $MARIADB_DB_NAME" + echo "User: $MARIADB_DB_USER" + echo "Password: $MARIADB_DB_PASS" + } >>"$CREDS_FILE" + + msg_ok "Set up MariaDB Database" + + export MARIADB_DB_NAME + export MARIADB_DB_USER + export MARIADB_DB_PASS +} + +# ------------------------------------------------------------------------------ +# Installs or updates MongoDB to specified major version. +# +# Description: +# - Preserves data across installations +# - Adds official MongoDB repo +# +# Variables: +# MONGO_VERSION - MongoDB major version to install (e.g. 7.0, 8.0) +# ------------------------------------------------------------------------------ + +function setup_mongodb() { + local MONGO_VERSION="${MONGO_VERSION:-8.0}" + local DISTRO_ID DISTRO_CODENAME + DISTRO_ID=$(get_os_info id) + DISTRO_CODENAME=$(get_os_info codename) + + # Ensure non-interactive mode for all apt operations + export DEBIAN_FRONTEND=noninteractive + export NEEDRESTART_MODE=a + export NEEDRESTART_SUSPEND=1 + + # Check AVX support + if ! grep -qm1 'avx[^ ]*' /proc/cpuinfo; then + local major="${MONGO_VERSION%%.*}" + if ((major > 5)); then + msg_error "MongoDB ${MONGO_VERSION} requires AVX support, which is not available on this system." + return 1 + fi + fi + + case "$DISTRO_ID" in + ubuntu) + MONGO_BASE_URL="https://repo.mongodb.org/apt/ubuntu" + ;; + debian) + MONGO_BASE_URL="https://repo.mongodb.org/apt/debian" + ;; + *) + msg_error "Unsupported distribution: $DISTRO_ID" + return 1 + ;; + esac + + # Get currently installed version + local INSTALLED_VERSION="" + INSTALLED_VERSION=$(is_tool_installed "mongodb" 2>/dev/null) || true + + # Scenario 1: Already at target version - just update packages + if [[ -n "$INSTALLED_VERSION" && "$INSTALLED_VERSION" == "$MONGO_VERSION" ]]; then + msg_info "Update MongoDB $MONGO_VERSION" + + ensure_apt_working || return 1 + + # Perform upgrade with retry logic + upgrade_packages_with_retry "mongodb-org" || { + msg_error "Failed to upgrade MongoDB" + return 1 + } + cache_installed_version "mongodb" "$MONGO_VERSION" + msg_ok "Update MongoDB $MONGO_VERSION" + return 0 + fi + + # Scenario 2: Different version installed - clean upgrade + if [[ -n "$INSTALLED_VERSION" && "$INSTALLED_VERSION" != "$MONGO_VERSION" ]]; then + msg_info "Upgrade MongoDB from $INSTALLED_VERSION to $MONGO_VERSION" + remove_old_tool_version "mongodb" + else + msg_info "Setup MongoDB $MONGO_VERSION" + fi + + cleanup_orphaned_sources + + # Prepare repository (cleanup + validation) + prepare_repository_setup "mongodb" || { + msg_error "Failed to prepare MongoDB repository" + return 1 + } + + # Setup repository + manage_tool_repository "mongodb" "$MONGO_VERSION" "$MONGO_BASE_URL" \ + "https://www.mongodb.org/static/pgp/server-${MONGO_VERSION}.asc" || { + msg_error "Failed to setup MongoDB repository" + return 1 + } + + # Wait for repo to settle + $STD apt update || { + msg_error "APT update failed — invalid MongoDB repo for ${DISTRO_ID}-${DISTRO_CODENAME}?" + return 1 + } + + # Install MongoDB with retry logic + install_packages_with_retry "mongodb-org" || { + msg_error "Failed to install MongoDB packages" + return 1 + } + + if ! command -v mongod >/dev/null 2>&1; then + msg_error "MongoDB binary not found after installation" + return 1 + fi + + mkdir -p /var/lib/mongodb + chown -R mongodb:mongodb /var/lib/mongodb + + $STD systemctl enable mongod || { + msg_warn "Failed to enable mongod service" + } + safe_service_restart mongod + + # Verify MongoDB version + local INSTALLED_VERSION + INSTALLED_VERSION=$(mongod --version 2>/dev/null | grep -oP 'db version v\K[0-9]+\.[0-9]+' | head -n1 || echo "0.0") + verify_tool_version "MongoDB" "$MONGO_VERSION" "$INSTALLED_VERSION" || true + + cache_installed_version "mongodb" "$MONGO_VERSION" + msg_ok "Setup MongoDB $MONGO_VERSION" +} + +# ------------------------------------------------------------------------------ +# Installs or upgrades MySQL. +# +# Description: +# - By default uses distro repository (Debian/Ubuntu apt) for stability +# - Optionally uses official MySQL repository for specific versions +# - Detects existing MySQL installation +# - Purges conflicting packages before installation +# - Supports clean upgrade +# - Handles Debian Trixie libaio1t64 transition +# +# Variables: +# USE_MYSQL_REPO - Use official MySQL repository (default: true) +# Set to "false" to use distro packages instead +# MYSQL_VERSION - MySQL version to install when using official repo +# (e.g. 8.0, 8.4) (default: 8.0) +# +# Examples: +# setup_mysql # Uses official MySQL repo, 8.0 +# MYSQL_VERSION="8.4" setup_mysql # Specific version from MySQL repo +# USE_MYSQL_REPO=false setup_mysql # Uses distro package instead +# ------------------------------------------------------------------------------ + +function setup_mysql() { + local MYSQL_VERSION="${MYSQL_VERSION:-8.0}" + local USE_MYSQL_REPO="${USE_MYSQL_REPO:-true}" + local DISTRO_ID DISTRO_CODENAME + DISTRO_ID=$(awk -F= '/^ID=/{print $2}' /etc/os-release | tr -d '"') + DISTRO_CODENAME=$(awk -F= '/^VERSION_CODENAME=/{print $2}' /etc/os-release) + + # Ensure non-interactive mode for all apt operations + export DEBIAN_FRONTEND=noninteractive + export NEEDRESTART_MODE=a + export NEEDRESTART_SUSPEND=1 + + # Get currently installed version + local CURRENT_VERSION="" + CURRENT_VERSION=$(is_tool_installed "mysql" 2>/dev/null) || true + + # Scenario 1: Use distro repository (default, most stable) + if [[ "$USE_MYSQL_REPO" != "true" && "$USE_MYSQL_REPO" != "TRUE" && "$USE_MYSQL_REPO" != "1" ]]; then + msg_info "Setup MySQL (distro package)" + + # If already installed, just update + if [[ -n "$CURRENT_VERSION" ]]; then + msg_info "Update MySQL $CURRENT_VERSION" + ensure_apt_working || return 1 + upgrade_packages_with_retry "default-mysql-server" "default-mysql-client" || + upgrade_packages_with_retry "mysql-server" "mysql-client" || + upgrade_packages_with_retry "mariadb-server" "mariadb-client" || { + msg_error "Failed to upgrade MySQL/MariaDB packages" + return 1 + } + cache_installed_version "mysql" "$CURRENT_VERSION" + msg_ok "Update MySQL $CURRENT_VERSION" + return 0 + fi + + # Fresh install from distro repo + ensure_apt_working || return 1 + + export DEBIAN_FRONTEND=noninteractive + # Try default-mysql-server first, fallback to mysql-server, then mariadb + if apt-cache search "^default-mysql-server$" 2>/dev/null | grep -q .; then + install_packages_with_retry "default-mysql-server" "default-mysql-client" || { + msg_warn "default-mysql-server failed, trying mysql-server" + install_packages_with_retry "mysql-server" "mysql-client" || { + msg_warn "mysql-server failed, trying mariadb as fallback" + install_packages_with_retry "mariadb-server" "mariadb-client" || { + msg_error "Failed to install any MySQL/MariaDB from distro repository" + return 1 + } + } + } + elif apt-cache search "^mysql-server$" 2>/dev/null | grep -q .; then + install_packages_with_retry "mysql-server" "mysql-client" || { + msg_warn "mysql-server failed, trying mariadb as fallback" + install_packages_with_retry "mariadb-server" "mariadb-client" || { + msg_error "Failed to install any MySQL/MariaDB from distro repository" + return 1 + } + } + else + # Distro doesn't have MySQL, use MariaDB + install_packages_with_retry "mariadb-server" "mariadb-client" || { + msg_error "Failed to install MariaDB from distro repository" + return 1 + } + fi + + # Get installed version + local INSTALLED_VERSION="" + INSTALLED_VERSION=$(is_tool_installed "mysql" 2>/dev/null) || true + if [[ -z "$INSTALLED_VERSION" ]]; then + INSTALLED_VERSION=$(is_tool_installed "mariadb" 2>/dev/null) || true + fi + cache_installed_version "mysql" "${INSTALLED_VERSION:-distro}" + msg_ok "Setup MySQL/MariaDB ${INSTALLED_VERSION:-from distro}" + return 0 + fi + + # Scenario 2: Use official MySQL repository (USE_MYSQL_REPO=true) + # Scenario 2a: Already at target version - just update packages + if [[ -n "$CURRENT_VERSION" && "$CURRENT_VERSION" == "$MYSQL_VERSION" ]]; then + msg_info "Update MySQL $MYSQL_VERSION" + + ensure_apt_working || return 1 + + # Perform upgrade with retry logic (non-fatal if fails) + upgrade_packages_with_retry "mysql-server" "mysql-client" || { + msg_warn "MySQL package upgrade had issues, continuing with current version" + } + + cache_installed_version "mysql" "$MYSQL_VERSION" + msg_ok "Update MySQL $MYSQL_VERSION" + return 0 + fi + + # Scenario 2: Different version installed - clean upgrade + if [[ -n "$CURRENT_VERSION" && "$CURRENT_VERSION" != "$MYSQL_VERSION" ]]; then + msg_info "Upgrade MySQL from $CURRENT_VERSION to $MYSQL_VERSION" + remove_old_tool_version "mysql" + else + msg_info "Setup MySQL $MYSQL_VERSION" + fi + + # Prepare repository (cleanup + validation) + prepare_repository_setup "mysql" || { + msg_error "Failed to prepare MySQL repository" + return 1 + } + + # Debian 13+ Fix: MySQL 8.0 incompatible with libaio1t64, use 8.4 LTS + if [[ "$DISTRO_ID" == "debian" && "$DISTRO_CODENAME" =~ ^(trixie|forky|sid)$ ]]; then + msg_info "Debian ${DISTRO_CODENAME} detected → using MySQL 8.4 LTS (libaio1t64 compatible)" + + if ! download_gpg_key "https://repo.mysql.com/RPM-GPG-KEY-mysql-2023" "/etc/apt/keyrings/mysql.gpg" "dearmor"; then + msg_error "Failed to import MySQL GPG key" + return 1 + fi + + cat >/etc/apt/sources.list.d/mysql.sources </dev/null | grep -q . && + install_packages_with_retry "mysql-server" "mysql-client"; then + mysql_install_success=true + elif apt-cache search "^mysql-community-server$" 2>/dev/null | grep -q . && + install_packages_with_retry "mysql-community-server" "mysql-community-client"; then + mysql_install_success=true + elif apt-cache search "^mysql$" 2>/dev/null | grep -q . && + install_packages_with_retry "mysql"; then + mysql_install_success=true + fi + + if [[ "$mysql_install_success" == false ]]; then + msg_error "MySQL ${MYSQL_VERSION} package not available for suite ${SUITE}" + return 1 + fi + + # Verify mysql command is accessible + if ! command -v mysql >/dev/null 2>&1; then + hash -r + if ! command -v mysql >/dev/null 2>&1; then + msg_error "MySQL installed but mysql command still not found" + return 1 + fi + fi + + cache_installed_version "mysql" "$MYSQL_VERSION" + msg_ok "Setup MySQL $MYSQL_VERSION" +} + +# ------------------------------------------------------------------------------ +# Installs Node.js and optional global modules. +# +# Description: +# - Installs specified Node.js version using NodeSource APT repo +# - Optionally installs or updates global npm modules +# +# Variables: +# NODE_VERSION - Node.js version to install (default: 24 LTS) +# NODE_MODULE - Comma-separated list of global modules (e.g. "yarn,@vue/cli@5.0.0") +# ------------------------------------------------------------------------------ + +function setup_nodejs() { + local NODE_VERSION="${NODE_VERSION:-24}" + local NODE_MODULE="${NODE_MODULE:-}" + + # ALWAYS clean up legacy installations first (nvm, etc.) to prevent conflicts + cleanup_legacy_install "nodejs" + + # Get currently installed version + local CURRENT_NODE_VERSION="" + CURRENT_NODE_VERSION=$(is_tool_installed "nodejs" 2>/dev/null) || true + + # Ensure jq is available for JSON parsing + if ! command -v jq &>/dev/null; then + $STD apt update + $STD apt install -y jq || { + msg_error "Failed to install jq" + return 1 + } + fi + + # Scenario 1: Already installed at target version - just update packages/modules + if [[ -n "$CURRENT_NODE_VERSION" && "$CURRENT_NODE_VERSION" == "$NODE_VERSION" ]]; then + msg_info "Update Node.js $NODE_VERSION" + + ensure_apt_working || return 1 + + # Just update npm to latest + $STD npm install -g npm@latest 2>/dev/null || true + + cache_installed_version "nodejs" "$NODE_VERSION" + msg_ok "Update Node.js $NODE_VERSION" + else + # Scenario 2: Different version installed - clean upgrade + if [[ -n "$CURRENT_NODE_VERSION" && "$CURRENT_NODE_VERSION" != "$NODE_VERSION" ]]; then + msg_info "Upgrade Node.js from $CURRENT_NODE_VERSION to $NODE_VERSION" + remove_old_tool_version "nodejs" + else + msg_info "Setup Node.js $NODE_VERSION" + fi + + # Remove ALL Debian nodejs packages BEFORE adding NodeSource repo + if dpkg -l 2>/dev/null | grep -qE "^ii.*(nodejs|libnode|node-cjs|node-acorn|node-balanced|node-brace|node-minimatch|node-undici|node-xtend|node-corepack)"; then + msg_info "Removing Debian-packaged Node.js and dependencies" + $STD apt purge -y nodejs nodejs-doc libnode* node-* 2>/dev/null || true + $STD apt autoremove -y 2>/dev/null || true + $STD apt clean 2>/dev/null || true + fi + + # Remove any APT pinning (not needed) + rm -f /etc/apt/preferences.d/nodesource 2>/dev/null || true + + # Prepare repository (cleanup + validation) + prepare_repository_setup "nodesource" || { + msg_error "Failed to prepare Node.js repository" + return 1 + } + + # Setup NodeSource repository + manage_tool_repository "nodejs" "$NODE_VERSION" "https://deb.nodesource.com/node_${NODE_VERSION}.x" "https://deb.nodesource.com/gpgkey/nodesource-repo.gpg.key" || { + msg_error "Failed to setup Node.js repository" + return 1 + } + + # Force APT cache refresh after repository setup + $STD apt update || { + msg_warn "apt update failed after Node.js repository setup" + } + + ensure_dependencies curl ca-certificates gnupg + + install_packages_with_retry "nodejs" || { + msg_error "Failed to install Node.js ${NODE_VERSION} from NodeSource" + return 1 + } + + # Verify Node.js was installed correctly + if ! command -v node >/dev/null 2>&1; then + msg_error "Node.js binary not found after installation" + return 1 + fi + + local INSTALLED_NODE_VERSION + INSTALLED_NODE_VERSION=$(node -v 2>/dev/null | grep -oP '^v\K[0-9]+' || echo "0") + verify_tool_version "Node.js" "$NODE_VERSION" "$INSTALLED_NODE_VERSION" || true + + # Verify npm is available (should come with NodeSource nodejs) + if ! command -v npm >/dev/null 2>&1; then + msg_error "npm not found after Node.js installation - repository issue?" + return 1 + fi + + # Update to latest npm (with version check to avoid incompatibility) + local NPM_VERSION + NPM_VERSION=$(npm -v 2>/dev/null || echo "0") + if [[ "$NPM_VERSION" != "0" ]]; then + $STD npm install -g npm@latest 2>/dev/null || { + msg_warn "Failed to update npm to latest version (continuing with bundled npm $NPM_VERSION)" + } + fi + + cache_installed_version "nodejs" "$NODE_VERSION" + msg_ok "Setup Node.js $NODE_VERSION" + fi + + export NODE_OPTIONS="--max-old-space-size=4096" + + # Ensure valid working directory for npm (avoids uv_cwd error) + if [[ ! -d /opt ]]; then + mkdir -p /opt + fi + cd /opt || { + msg_error "Failed to set safe working directory before npm install" + return 1 + } + + # Install global Node modules + if [[ -n "$NODE_MODULE" ]]; then + IFS=',' read -ra MODULES <<<"$NODE_MODULE" + local failed_modules=0 + for mod in "${MODULES[@]}"; do + local MODULE_NAME MODULE_REQ_VERSION MODULE_INSTALLED_VERSION + if [[ "$mod" == @*/*@* ]]; then + # Scoped package with version, e.g. @vue/cli-service@latest + MODULE_NAME="${mod%@*}" + MODULE_REQ_VERSION="${mod##*@}" + elif [[ "$mod" == *"@"* ]]; then + # Unscoped package with version, e.g. yarn@latest + MODULE_NAME="${mod%@*}" + MODULE_REQ_VERSION="${mod##*@}" + else + # No version specified + MODULE_NAME="$mod" + MODULE_REQ_VERSION="latest" + fi + + # Check if the module is already installed + if $STD npm list -g --depth=0 "$MODULE_NAME" 2>&1 | grep -q "$MODULE_NAME@"; then + MODULE_INSTALLED_VERSION="$(npm list -g --depth=0 "$MODULE_NAME" 2>&1 | grep "$MODULE_NAME@" | awk -F@ '{print $2}' 2>/dev/null | tr -d '[:space:]' || echo '')" + if [[ "$MODULE_REQ_VERSION" != "latest" && "$MODULE_REQ_VERSION" != "$MODULE_INSTALLED_VERSION" ]]; then + msg_info "Updating $MODULE_NAME from v$MODULE_INSTALLED_VERSION to v$MODULE_REQ_VERSION" + if ! $STD npm install -g "${MODULE_NAME}@${MODULE_REQ_VERSION}" 2>/dev/null; then + msg_warn "Failed to update $MODULE_NAME to version $MODULE_REQ_VERSION" + ((failed_modules++)) || true + continue + fi + elif [[ "$MODULE_REQ_VERSION" == "latest" ]]; then + msg_info "Updating $MODULE_NAME to latest version" + if ! $STD npm install -g "${MODULE_NAME}@latest" 2>/dev/null; then + msg_warn "Failed to update $MODULE_NAME to latest version" + ((failed_modules++)) || true + continue + fi + fi + else + msg_info "Installing $MODULE_NAME@$MODULE_REQ_VERSION" + if ! $STD npm install -g "${MODULE_NAME}@${MODULE_REQ_VERSION}" 2>/dev/null; then + msg_warn "Failed to install $MODULE_NAME@$MODULE_REQ_VERSION" + ((failed_modules++)) || true + continue + fi + fi + done + if [[ $failed_modules -eq 0 ]]; then + msg_ok "Installed Node.js modules: $NODE_MODULE" + else + msg_warn "Installed Node.js modules with $failed_modules failure(s): $NODE_MODULE" + fi + fi +} + +# ------------------------------------------------------------------------------ +# Installs PHP with selected modules and configures Apache/FPM support. +# +# Description: +# - Adds Sury PHP repo if needed +# - Installs default and user-defined modules +# - Patches php.ini for CLI, Apache, and FPM as needed +# - Handles built-in modules gracefully (e.g., opcache in PHP 8.5+) +# - Skips unavailable packages without failing +# +# Variables: +# PHP_VERSION - PHP version to install (default: 8.4) +# PHP_MODULE - Additional comma-separated modules +# PHP_APACHE - Set YES to enable PHP with Apache +# PHP_FPM - Set YES to enable PHP-FPM +# PHP_MEMORY_LIMIT - (default: 512M) +# PHP_UPLOAD_MAX_FILESIZE - (default: 128M) +# PHP_POST_MAX_SIZE - (default: 128M) +# PHP_MAX_EXECUTION_TIME - (default: 300) +# +# Notes on modules: +# - Base modules (always installed): bcmath, cli, curl, gd, intl, mbstring, +# readline, xml, zip, common +# - Extended modules (commonly needed): mysql, sqlite3, pgsql, redis, +# imagick, bz2, ldap, soap, imap, gmp, apcu +# - Some modules are built-in depending on PHP version: +# * PHP 8.5+: opcache is built-in (no separate package) +# * All versions: ctype, fileinfo, iconv, tokenizer, phar, posix, etc. +# are part of php-common +# - Unavailable modules are skipped with a warning, not an error +# ------------------------------------------------------------------------------ + +function setup_php() { + local PHP_VERSION="${PHP_VERSION:-8.4}" + local PHP_MODULE="${PHP_MODULE:-}" + local PHP_APACHE="${PHP_APACHE:-NO}" + local PHP_FPM="${PHP_FPM:-NO}" + local DISTRO_ID DISTRO_CODENAME + DISTRO_ID=$(awk -F= '/^ID=/{print $2}' /etc/os-release | tr -d '"') + DISTRO_CODENAME=$(awk -F= '/^VERSION_CODENAME=/{print $2}' /etc/os-release) + + # Parse version for compatibility checks + local PHP_MAJOR="${PHP_VERSION%%.*}" + local PHP_MINOR="${PHP_VERSION#*.}" + PHP_MINOR="${PHP_MINOR%%.*}" + + # Modules that are ALWAYS part of php-common (no separate package needed) + # These are either built-in or virtual packages provided by php-common + local BUILTIN_MODULES="calendar,ctype,exif,ffi,fileinfo,ftp,gettext,iconv,pdo,phar,posix,shmop,sockets,sysvmsg,sysvsem,sysvshm,tokenizer" + + # Modules that became built-in in specific PHP versions + # PHP 8.5+: opcache is now part of the core + local BUILTIN_85="" + if [[ "$PHP_MAJOR" -gt 8 ]] || [[ "$PHP_MAJOR" -eq 8 && "$PHP_MINOR" -ge 5 ]]; then + BUILTIN_85="opcache" + fi + + # Base modules - essential for most PHP applications + # Note: 'common' provides many built-in extensions + local BASE_MODULES="cli,common,bcmath,curl,dom,gd,gmp,intl,mbstring,readline,xml,zip" + + # Add opcache only for PHP < 8.5 (it's built-in starting from 8.5) + if [[ "$PHP_MAJOR" -lt 8 ]] || [[ "$PHP_MAJOR" -eq 8 && "$PHP_MINOR" -lt 5 ]]; then + BASE_MODULES="${BASE_MODULES},opcache" + fi + + # Extended default modules - commonly needed by web applications + # These cover ~90% of typical use cases without bloat + local EXTENDED_MODULES="mysql,sqlite3,pgsql,redis,imagick,bz2,apcu" + + local COMBINED_MODULES="${BASE_MODULES},${EXTENDED_MODULES}" + + local PHP_MEMORY_LIMIT="${PHP_MEMORY_LIMIT:-512M}" + local PHP_UPLOAD_MAX_FILESIZE="${PHP_UPLOAD_MAX_FILESIZE:-128M}" + local PHP_POST_MAX_SIZE="${PHP_POST_MAX_SIZE:-128M}" + local PHP_MAX_EXECUTION_TIME="${PHP_MAX_EXECUTION_TIME:-300}" + + # Merge with user-defined modules + if [[ -n "$PHP_MODULE" ]]; then + COMBINED_MODULES="${COMBINED_MODULES},${PHP_MODULE}" + fi + + # Filter out built-in modules (they don't have separate packages) + local FILTERED_MODULES="" + IFS=',' read -ra ALL_MODULES <<<"$COMBINED_MODULES" + for mod in "${ALL_MODULES[@]}"; do + mod=$(echo "$mod" | tr -d '[:space:]') + [[ -z "$mod" ]] && continue + + # Skip if it's a known built-in module + if echo ",$BUILTIN_MODULES,$BUILTIN_85," | grep -qi ",$mod,"; then + continue + fi + + # Add to filtered list + if [[ -z "$FILTERED_MODULES" ]]; then + FILTERED_MODULES="$mod" + else + FILTERED_MODULES="${FILTERED_MODULES},$mod" + fi + done + + # Deduplicate + COMBINED_MODULES=$(echo "$FILTERED_MODULES" | tr ',' '\n' | awk '!seen[$0]++' | paste -sd, -) + + # Get current PHP-CLI version + local CURRENT_PHP="" + CURRENT_PHP=$(is_tool_installed "php" 2>/dev/null) || true + + # Remove conflicting PHP version before pinning + if [[ -n "$CURRENT_PHP" && "$CURRENT_PHP" != "$PHP_VERSION" ]]; then + msg_info "Removing conflicting PHP ${CURRENT_PHP} (need ${PHP_VERSION})" + stop_all_services "php.*-fpm" + $STD apt purge -y "php*" 2>/dev/null || true + $STD apt autoremove -y 2>/dev/null || true + fi + + # NOW create pinning for the desired version + mkdir -p /etc/apt/preferences.d + cat </etc/apt/preferences.d/php-pin +Package: php${PHP_VERSION}* +Pin: version ${PHP_VERSION}.* +Pin-Priority: 1001 + +Package: php[0-9].* +Pin: release o=packages.sury.org-php +Pin-Priority: -1 +EOF + + # Setup repository + prepare_repository_setup "php" "deb.sury.org-php" || { + msg_error "Failed to prepare PHP repository" + return 1 + } + + # Use different repository based on OS + if [[ "$DISTRO_ID" == "ubuntu" ]]; then + # Ubuntu: Use ondrej/php PPA + msg_info "Adding ondrej/php PPA for Ubuntu" + $STD apt install -y software-properties-common || { + msg_error "Failed to install software-properties-common" + return 1 + } + # Don't use $STD for add-apt-repository as it uses background processes + add-apt-repository -y ppa:ondrej/php >>"$(get_active_logfile)" 2>&1 + else + # Debian: Use Sury repository + manage_tool_repository "php" "$PHP_VERSION" "" "https://packages.sury.org/debsuryorg-archive-keyring.deb" || { + msg_error "Failed to setup PHP repository" + return 1 + } + fi + ensure_apt_working || return 1 + $STD apt update || { + msg_warn "apt update failed after PHP repository setup" + } + + # Get available PHP version from repository + local AVAILABLE_PHP_VERSION="" + AVAILABLE_PHP_VERSION=$(apt-cache show "php${PHP_VERSION}" 2>/dev/null | grep -m1 "^Version:" | awk '{print $2}' 2>/dev/null | cut -d- -f1 || true) + + if [[ -z "$AVAILABLE_PHP_VERSION" ]]; then + msg_error "PHP ${PHP_VERSION} not found in configured repositories" + return 1 + fi + + # Build module list - verify each package exists before adding + local MODULE_LIST="php${PHP_VERSION}" + local SKIPPED_MODULES="" + + IFS=',' read -ra MODULES <<<"$COMBINED_MODULES" + for mod in "${MODULES[@]}"; do + mod=$(echo "$mod" | tr -d '[:space:]') + [[ -z "$mod" ]] && continue + + local pkg_name="php${PHP_VERSION}-${mod}" + + # Check if package exists in repository + if apt-cache show "$pkg_name" &>/dev/null; then + MODULE_LIST+=" $pkg_name" + else + # Package doesn't exist - could be built-in or renamed + if [[ -z "$SKIPPED_MODULES" ]]; then + SKIPPED_MODULES="$mod" + else + SKIPPED_MODULES="${SKIPPED_MODULES}, $mod" + fi + fi + done + + # Log skipped modules (informational, not an error) + if [[ -n "$SKIPPED_MODULES" ]]; then + msg_info "Skipping unavailable/built-in modules: $SKIPPED_MODULES" + fi + + if [[ "$PHP_FPM" == "YES" ]]; then + if apt-cache show "php${PHP_VERSION}-fpm" &>/dev/null; then + MODULE_LIST+=" php${PHP_VERSION}-fpm" + else + msg_warn "php${PHP_VERSION}-fpm not available" + fi + # Create systemd override for PHP-FPM to fix runtime directory issues in LXC containers + mkdir -p /etc/systemd/system/php${PHP_VERSION}-fpm.service.d/ + cat </etc/systemd/system/php${PHP_VERSION}-fpm.service.d/override.conf +[Service] +RuntimeDirectory=php +RuntimeDirectoryMode=0755 +EOF + $STD systemctl daemon-reload + fi + + # install apache2 with PHP support if requested + if [[ "$PHP_APACHE" == "YES" ]]; then + if ! dpkg -l 2>/dev/null | grep -q "libapache2-mod-php${PHP_VERSION}"; then + msg_info "Installing Apache with PHP ${PHP_VERSION} module" + install_packages_with_retry "apache2" || { + msg_error "Failed to install Apache" + return 1 + } + install_packages_with_retry "libapache2-mod-php${PHP_VERSION}" || { + msg_warn "Failed to install libapache2-mod-php${PHP_VERSION}, continuing without Apache module" + } + fi + fi + + # Install PHP packages (pinning via preferences.d ensures correct version) + msg_info "Installing PHP ${PHP_VERSION} packages" + + # First attempt: Install all verified packages at once + if ! $STD apt install -y $MODULE_LIST 2>/dev/null; then + msg_warn "Bulk installation failed, attempting individual installation" + + # Install main package first (critical) + if ! $STD apt install -y "php${PHP_VERSION}" 2>/dev/null; then + msg_error "Failed to install php${PHP_VERSION}" + return 1 + fi + + # Try to install Apache module individually if requested + if [[ "$PHP_APACHE" == "YES" ]]; then + $STD apt install -y "libapache2-mod-php${PHP_VERSION}" 2>/dev/null || { + msg_warn "Could not install libapache2-mod-php${PHP_VERSION}" + } + fi + + # Try to install each package individually + for pkg in $MODULE_LIST; do + [[ "$pkg" == "php${PHP_VERSION}" ]] && continue # Already installed + $STD apt install -y "$pkg" 2>/dev/null || { + msg_warn "Could not install $pkg - continuing without it" + } + done + fi + cache_installed_version "php" "$PHP_VERSION" + + # Patch all relevant php.ini files + local PHP_INI_PATHS=("/etc/php/${PHP_VERSION}/cli/php.ini") + [[ "$PHP_FPM" == "YES" ]] && PHP_INI_PATHS+=("/etc/php/${PHP_VERSION}/fpm/php.ini") + [[ "$PHP_APACHE" == "YES" ]] && PHP_INI_PATHS+=("/etc/php/${PHP_VERSION}/apache2/php.ini") + for ini in "${PHP_INI_PATHS[@]}"; do + if [[ -f "$ini" ]]; then + $STD sed -i "s|^memory_limit = .*|memory_limit = ${PHP_MEMORY_LIMIT}|" "$ini" + $STD sed -i "s|^upload_max_filesize = .*|upload_max_filesize = ${PHP_UPLOAD_MAX_FILESIZE}|" "$ini" + $STD sed -i "s|^post_max_size = .*|post_max_size = ${PHP_POST_MAX_SIZE}|" "$ini" + $STD sed -i "s|^max_execution_time = .*|max_execution_time = ${PHP_MAX_EXECUTION_TIME}|" "$ini" + fi + done + + # Patch Apache configuration if needed + if [[ "$PHP_APACHE" == "YES" ]]; then + for mod in $(ls /etc/apache2/mods-enabled/ 2>/dev/null | grep -E '^php[0-9]\.[0-9]\.conf$' | sed 's/\.conf//'); do + if [[ "$mod" != "php${PHP_VERSION}" ]]; then + $STD a2dismod "$mod" || true + fi + done + $STD a2enmod mpm_prefork + $STD a2enmod "php${PHP_VERSION}" + safe_service_restart apache2 || true + fi + + # Enable and restart PHP-FPM if requested + if [[ "$PHP_FPM" == "YES" ]]; then + if systemctl list-unit-files | grep -q "php${PHP_VERSION}-fpm.service"; then + $STD systemctl enable php${PHP_VERSION}-fpm + safe_service_restart php${PHP_VERSION}-fpm + fi + fi + + # Verify PHP installation - critical check + if ! command -v php >/dev/null 2>&1; then + msg_error "PHP installation verification failed - php command not found" + return 1 + fi + + local INSTALLED_VERSION=$(php -v 2>/dev/null | awk '/^PHP/{print $2}' | cut -d. -f1,2) + + if [[ "$INSTALLED_VERSION" != "$PHP_VERSION" ]]; then + msg_error "PHP version mismatch: requested ${PHP_VERSION} but got ${INSTALLED_VERSION}" + msg_error "This indicates a critical package installation issue" + # Don't cache wrong version + return 1 + fi + + cache_installed_version "php" "$INSTALLED_VERSION" + msg_ok "Setup PHP ${INSTALLED_VERSION}" +} + +# ------------------------------------------------------------------------------ +# Installs or upgrades PostgreSQL and optional extensions/modules. +# +# Description: +# - By default uses distro repository (Debian/Ubuntu apt) for stability +# - Optionally uses official PGDG repository for specific versions +# - Detects existing PostgreSQL version +# - Dumps all databases before upgrade +# - Installs optional PG_MODULES (e.g. postgis, contrib) +# - Restores dumped data post-upgrade +# +# Variables: +# USE_PGDG_REPO - Use official PGDG repository (default: true) +# Set to "false" to use distro packages instead +# PG_VERSION - Major PostgreSQL version (e.g. 15, 16) (default: 16) +# PG_MODULES - Comma-separated list of modules (e.g. "postgis,contrib") +# +# Examples: +# setup_postgresql # Uses PGDG repo, PG 16 +# PG_VERSION="17" setup_postgresql # Specific version from PGDG +# USE_PGDG_REPO=false setup_postgresql # Uses distro package instead +# ------------------------------------------------------------------------------ + +function setup_postgresql() { + local PG_VERSION="${PG_VERSION:-16}" + local PG_MODULES="${PG_MODULES:-}" + local USE_PGDG_REPO="${USE_PGDG_REPO:-true}" + local DISTRO_ID DISTRO_CODENAME + DISTRO_ID=$(awk -F= '/^ID=/{print $2}' /etc/os-release | tr -d '"') + DISTRO_CODENAME=$(awk -F= '/^VERSION_CODENAME=/{print $2}' /etc/os-release) + + # Ensure non-interactive mode for all apt operations + export DEBIAN_FRONTEND=noninteractive + export NEEDRESTART_MODE=a + export NEEDRESTART_SUSPEND=1 + + # Get currently installed version + local CURRENT_PG_VERSION="" + if command -v psql >/dev/null; then + CURRENT_PG_VERSION="$(psql -V 2>/dev/null | awk '{print $3}' | cut -d. -f1)" + fi + + # Scenario 1: Use distro repository (default, most stable) + if [[ "$USE_PGDG_REPO" != "true" && "$USE_PGDG_REPO" != "TRUE" && "$USE_PGDG_REPO" != "1" ]]; then + msg_info "Setup PostgreSQL (distro package)" + + # If already installed, just update + if [[ -n "$CURRENT_PG_VERSION" ]]; then + msg_info "Update PostgreSQL $CURRENT_PG_VERSION" + ensure_apt_working || return 1 + upgrade_packages_with_retry "postgresql" "postgresql-client" || true + cache_installed_version "postgresql" "$CURRENT_PG_VERSION" + msg_ok "Update PostgreSQL $CURRENT_PG_VERSION" + + # Still install modules if specified + if [[ -n "$PG_MODULES" ]]; then + IFS=',' read -ra MODULES <<<"$PG_MODULES" + for module in "${MODULES[@]}"; do + $STD apt install -y "postgresql-${CURRENT_PG_VERSION}-${module}" 2>/dev/null || true + done + fi + return 0 + fi + + # Fresh install from distro repo + ensure_apt_working || return 1 + + export DEBIAN_FRONTEND=noninteractive + install_packages_with_retry "postgresql" "postgresql-client" || { + msg_error "Failed to install PostgreSQL from distro repository" + return 1 + } + + # Get installed version + local INSTALLED_VERSION="" + if command -v psql >/dev/null; then + INSTALLED_VERSION="$(psql -V 2>/dev/null | awk '{print $3}' | cut -d. -f1)" + fi + + $STD systemctl enable --now postgresql 2>/dev/null || true + + # Add PostgreSQL binaries to PATH + if [[ -n "$INSTALLED_VERSION" ]] && ! grep -q '/usr/lib/postgresql' /etc/environment 2>/dev/null; then + echo 'PATH="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/lib/postgresql/'"${INSTALLED_VERSION}"'/bin"' >/etc/environment + fi + + cache_installed_version "postgresql" "${INSTALLED_VERSION:-distro}" + msg_ok "Setup PostgreSQL ${INSTALLED_VERSION:-from distro}" + + # Install optional modules + if [[ -n "$PG_MODULES" && -n "$INSTALLED_VERSION" ]]; then + IFS=',' read -ra MODULES <<<"$PG_MODULES" + for module in "${MODULES[@]}"; do + $STD apt install -y "postgresql-${INSTALLED_VERSION}-${module}" 2>/dev/null || true + done + fi + return 0 + fi + + # Scenario 2: Use official PGDG repository (USE_PGDG_REPO=true) + # Scenario 2a: Already at correct version + if [[ "$CURRENT_PG_VERSION" == "$PG_VERSION" ]]; then + msg_info "Update PostgreSQL $PG_VERSION" + ensure_apt_working || return 1 + + # Perform upgrade with retry logic (non-fatal if fails) + upgrade_packages_with_retry "postgresql-${PG_VERSION}" "postgresql-client-${PG_VERSION}" 2>/dev/null || true + cache_installed_version "postgresql" "$PG_VERSION" + msg_ok "Update PostgreSQL $PG_VERSION" + + # Still install modules if specified + if [[ -n "$PG_MODULES" ]]; then + IFS=',' read -ra MODULES <<<"$PG_MODULES" + for module in "${MODULES[@]}"; do + $STD apt install -y "postgresql-${PG_VERSION}-${module}" 2>/dev/null || true + done + fi + return 0 + fi + + # Scenario 2: Different version - backup, remove old, install new + if [[ -n "$CURRENT_PG_VERSION" ]]; then + msg_info "Upgrade PostgreSQL from $CURRENT_PG_VERSION to $PG_VERSION" + msg_info "Creating backup of PostgreSQL $CURRENT_PG_VERSION databases..." + local PG_BACKUP_FILE="/var/lib/postgresql/backup_$(date +%F)_v${CURRENT_PG_VERSION}.sql" + $STD runuser -u postgres -- pg_dumpall >"$PG_BACKUP_FILE" || { + msg_error "Failed to backup PostgreSQL databases" + return 1 + } + $STD systemctl stop postgresql || true + $STD apt purge -y "postgresql-${CURRENT_PG_VERSION}" "postgresql-client-${CURRENT_PG_VERSION}" 2>/dev/null || true + else + msg_info "Setup PostgreSQL $PG_VERSION" + fi + + # Scenario 3: Fresh install or after removal - setup repo and install + prepare_repository_setup "pgdg" "postgresql" || { + msg_error "Failed to prepare PostgreSQL repository" + return 1 + } + + local SUITE + case "$DISTRO_CODENAME" in + trixie | forky | sid) + if verify_repo_available "https://apt.postgresql.org/pub/repos/apt" "trixie-pgdg"; then + SUITE="trixie-pgdg" + else + msg_warn "PGDG repo not available for ${DISTRO_CODENAME}, falling back to distro packages" + USE_PGDG_REPO=false setup_postgresql + return $? + fi + ;; + *) + SUITE=$(get_fallback_suite "$DISTRO_ID" "$DISTRO_CODENAME" "https://apt.postgresql.org/pub/repos/apt") + SUITE="${SUITE}-pgdg" + ;; + esac + + setup_deb822_repo \ + "pgdg" \ + "https://www.postgresql.org/media/keys/ACCC4CF8.asc" \ + "https://apt.postgresql.org/pub/repos/apt" \ + "$SUITE" \ + "main" + + if ! $STD apt update; then + msg_error "APT update failed for PostgreSQL repository" + return 1 + fi + + # Install ssl-cert dependency if available + if apt-cache search "^ssl-cert$" 2>/dev/null | grep -q .; then + $STD apt install -y ssl-cert 2>/dev/null || true + fi + + # Try multiple PostgreSQL package patterns with retry logic + local pg_install_success=false + + if apt-cache search "^postgresql-${PG_VERSION}$" 2>/dev/null | grep -q . && + install_packages_with_retry "postgresql-${PG_VERSION}" "postgresql-client-${PG_VERSION}"; then + pg_install_success=true + fi + + if [[ "$pg_install_success" == false ]] && + apt-cache search "^postgresql-server-${PG_VERSION}$" 2>/dev/null | grep -q . && + $STD apt install -y "postgresql-server-${PG_VERSION}" "postgresql-client-${PG_VERSION}" 2>/dev/null; then + pg_install_success=true + fi + + if [[ "$pg_install_success" == false ]] && + apt-cache search "^postgresql$" 2>/dev/null | grep -q . && + $STD apt install -y postgresql postgresql-client 2>/dev/null; then + pg_install_success=true + fi + + if [[ "$pg_install_success" == false ]]; then + msg_error "PostgreSQL package not available for suite ${SUITE}" + return 1 + fi + + if ! command -v psql >/dev/null 2>&1; then + msg_error "PostgreSQL installed but psql command not found" + return 1 + fi + + # Restore database backup if we upgraded from previous version + if [[ -n "$CURRENT_PG_VERSION" && -n "${PG_BACKUP_FILE:-}" && -f "${PG_BACKUP_FILE}" ]]; then + msg_info "Restoring PostgreSQL databases from backup..." + $STD runuser -u postgres -- psql <"$PG_BACKUP_FILE" 2>/dev/null || { + msg_warn "Failed to restore database backup - this may be expected for major version upgrades" + } + fi + + $STD systemctl enable --now postgresql 2>/dev/null || { + msg_warn "Failed to enable/start PostgreSQL service" + } + + # Add PostgreSQL binaries to PATH + if ! grep -q '/usr/lib/postgresql' /etc/environment 2>/dev/null; then + echo 'PATH="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/lib/postgresql/'"${PG_VERSION}"'/bin"' >/etc/environment + fi + + cache_installed_version "postgresql" "$PG_VERSION" + msg_ok "Setup PostgreSQL $PG_VERSION" + + # Install optional modules + if [[ -n "$PG_MODULES" ]]; then + IFS=',' read -ra MODULES <<<"$PG_MODULES" + for module in "${MODULES[@]}"; do + $STD apt install -y "postgresql-${PG_VERSION}-${module}" 2>/dev/null || { + msg_warn "Failed to install PostgreSQL module: ${module}" + } + done + fi +} + +# ------------------------------------------------------------------------------ +# Creates PostgreSQL database with user and optional extensions +# +# Description: +# - Creates PostgreSQL role with login and password +# - Creates database with UTF8 encoding and template0 +# - Installs optional extensions (postgis, pgvector, etc.) +# - Configures ALTER ROLE settings for Django/Rails compatibility +# - Saves credentials to file +# - Exports variables for use in calling script +# +# Usage: +# PG_DB_NAME="myapp_db" PG_DB_USER="myapp_user" setup_postgresql_db +# PG_DB_NAME="immich" PG_DB_USER="immich" PG_DB_EXTENSIONS="pgvector" setup_postgresql_db +# PG_DB_NAME="ghostfolio" PG_DB_USER="ghostfolio" PG_DB_GRANT_SUPERUSER="true" setup_postgresql_db +# PG_DB_NAME="adventurelog" PG_DB_USER="adventurelog" PG_DB_EXTENSIONS="postgis" setup_postgresql_db +# +# Variables: +# PG_DB_NAME - Database name (required) +# PG_DB_USER - Database user (required) +# PG_DB_PASS - Database password (optional, auto-generated if empty) +# PG_DB_EXTENSIONS - Comma-separated list of extensions (optional, e.g. "postgis,pgvector") +# PG_DB_GRANT_SUPERUSER - Grant SUPERUSER privilege (optional, "true" to enable, security risk!) +# PG_DB_SCHEMA_PERMS - Grant schema-level permissions (optional, "true" to enable) +# PG_DB_SKIP_ALTER_ROLE - Skip ALTER ROLE settings (optional, "true" to skip) +# PG_DB_CREDS_FILE - Credentials file path (optional, default: ~/${APPLICATION}.creds) +# +# Exports: +# PG_DB_NAME, PG_DB_USER, PG_DB_PASS - For use in calling script +# ------------------------------------------------------------------------------ + +function setup_postgresql_db() { + # Validation + if [[ -z "${PG_DB_NAME:-}" || -z "${PG_DB_USER:-}" ]]; then + msg_error "PG_DB_NAME and PG_DB_USER must be set before calling setup_postgresql_db" + return 1 + fi + + # Generate password if not provided + if [[ -z "${PG_DB_PASS:-}" ]]; then + PG_DB_PASS=$(openssl rand -base64 18 | tr -dc 'a-zA-Z0-9' | head -c13) + fi + + msg_info "Setting up PostgreSQL Database" + $STD sudo -u postgres psql -c "CREATE ROLE $PG_DB_USER WITH LOGIN PASSWORD '$PG_DB_PASS';" + $STD sudo -u postgres psql -c "CREATE DATABASE $PG_DB_NAME WITH OWNER $PG_DB_USER ENCODING 'UTF8' TEMPLATE template0;" + + # Install extensions (comma-separated) + if [[ -n "${PG_DB_EXTENSIONS:-}" ]]; then + IFS=',' read -ra EXT_LIST <<<"${PG_DB_EXTENSIONS:-}" + for ext in "${EXT_LIST[@]}"; do + ext=$(echo "$ext" | xargs) # Trim whitespace + $STD sudo -u postgres psql -d "$PG_DB_NAME" -c "CREATE EXTENSION IF NOT EXISTS $ext;" + done + fi + + # ALTER ROLE settings for Django/Rails compatibility (unless skipped) + if [[ "${PG_DB_SKIP_ALTER_ROLE:-}" != "true" ]]; then + $STD sudo -u postgres psql -c "ALTER ROLE $PG_DB_USER SET client_encoding TO 'utf8';" + $STD sudo -u postgres psql -c "ALTER ROLE $PG_DB_USER SET default_transaction_isolation TO 'read committed';" + $STD sudo -u postgres psql -c "ALTER ROLE $PG_DB_USER SET timezone TO 'UTC';" + fi + + # Schema permissions (if requested) + if [[ "${PG_DB_SCHEMA_PERMS:-}" == "true" ]]; then + $STD sudo -u postgres psql -c "GRANT ALL PRIVILEGES ON DATABASE $PG_DB_NAME TO $PG_DB_USER;" + $STD sudo -u postgres psql -c "ALTER USER $PG_DB_USER CREATEDB;" + $STD sudo -u postgres psql -d "$PG_DB_NAME" -c "GRANT ALL ON SCHEMA public TO $PG_DB_USER;" + $STD sudo -u postgres psql -d "$PG_DB_NAME" -c "GRANT CREATE ON SCHEMA public TO $PG_DB_USER;" + $STD sudo -u postgres psql -d "$PG_DB_NAME" -c "ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT ALL ON TABLES TO $PG_DB_USER;" + $STD sudo -u postgres psql -d "$PG_DB_NAME" -c "ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT ALL ON SEQUENCES TO $PG_DB_USER;" + fi + + # Superuser grant (if requested - WARNING!) + if [[ "${PG_DB_GRANT_SUPERUSER:-}" == "true" ]]; then + msg_warn "Granting SUPERUSER privilege (security risk!)" + $STD sudo -u postgres psql -c "GRANT ALL PRIVILEGES ON DATABASE $PG_DB_NAME to $PG_DB_USER;" + $STD sudo -u postgres psql -c "ALTER USER $PG_DB_USER WITH SUPERUSER;" + fi + + # Save credentials + local app_name="${APPLICATION,,}" + local CREDS_FILE="${PG_DB_CREDS_FILE:-${HOME}/${app_name}.creds}" + { + echo "PostgreSQL Credentials" + echo "Database: $PG_DB_NAME" + echo "User: $PG_DB_USER" + echo "Password: $PG_DB_PASS" + } >>"$CREDS_FILE" + + msg_ok "Set up PostgreSQL Database" + + # Export for use in calling script + export PG_DB_NAME + export PG_DB_USER + export PG_DB_PASS +} + +# ------------------------------------------------------------------------------ +# Installs rbenv and ruby-build, installs Ruby and optionally Rails. +# +# Description: +# - Downloads rbenv and ruby-build from GitHub +# - Compiles and installs target Ruby version +# - Optionally installs Rails via gem +# +# Variables: +# RUBY_VERSION - Ruby version to install (default: 3.4.4) +# RUBY_INSTALL_RAILS - true/false to install Rails (default: true) +# ------------------------------------------------------------------------------ + +function setup_ruby() { + local RUBY_VERSION="${RUBY_VERSION:-3.4.4}" + local RUBY_INSTALL_RAILS="${RUBY_INSTALL_RAILS:-true}" + local RBENV_DIR="$HOME/.rbenv" + local RBENV_BIN="$RBENV_DIR/bin/rbenv" + local PROFILE_FILE="$HOME/.profile" + local TMP_DIR=$(mktemp -d) + + # Get currently installed Ruby version + local CURRENT_RUBY_VERSION="" + if [[ -x "$RBENV_BIN" ]]; then + CURRENT_RUBY_VERSION=$("$RBENV_BIN" global 2>/dev/null || echo "") + fi + + # Scenario 1: Already at correct Ruby version + if [[ "$CURRENT_RUBY_VERSION" == "$RUBY_VERSION" ]]; then + msg_info "Update Ruby $RUBY_VERSION" + cache_installed_version "ruby" "$RUBY_VERSION" + msg_ok "Update Ruby $RUBY_VERSION" + return 0 + fi + + # Scenario 2: Different version - reinstall + if [[ -n "$CURRENT_RUBY_VERSION" ]]; then + msg_info "Upgrade Ruby from $CURRENT_RUBY_VERSION to $RUBY_VERSION" + else + msg_info "Setup Ruby $RUBY_VERSION" + fi + + ensure_apt_working || return 1 + + # Install build dependencies with fallbacks + local ruby_deps=() + local dep_variations=( + "jq" + "autoconf" + "patch" + "build-essential" + "libssl-dev" + "libyaml-dev" + "libreadline-dev|libreadline6-dev" + "zlib1g-dev" + "libgmp-dev" + "libncurses-dev|libncurses5-dev" + "libffi-dev" + "libgdbm-dev" + "libdb-dev" + "uuid-dev" + ) + + for dep_pattern in "${dep_variations[@]}"; do + if [[ "$dep_pattern" == *"|"* ]]; then + IFS='|' read -ra variations <<<"$dep_pattern" + for var in "${variations[@]}"; do + if apt-cache search "^${var}$" 2>/dev/null | grep -q .; then + ruby_deps+=("$var") + break + fi + done + else + if apt-cache search "^${dep_pattern}$" 2>/dev/null | grep -q .; then + ruby_deps+=("$dep_pattern") + fi + fi + done + + if [[ ${#ruby_deps[@]} -gt 0 ]]; then + $STD apt install -y "${ruby_deps[@]}" 2>/dev/null || true + else + msg_error "No Ruby build dependencies available" + rm -rf "$TMP_DIR" + return 1 + fi + + # Download and build rbenv if needed + if [[ ! -x "$RBENV_BIN" ]]; then + local RBENV_RELEASE + local rbenv_json + rbenv_json=$(curl -fsSL --max-time 15 https://api.github.com/repos/rbenv/rbenv/releases/latest 2>/dev/null || echo "") + + if [[ -z "$rbenv_json" ]]; then + msg_error "Failed to fetch latest rbenv version from GitHub" + rm -rf "$TMP_DIR" + return 1 + fi + + RBENV_RELEASE=$(echo "$rbenv_json" | jq -r '.tag_name' 2>/dev/null | sed 's/^v//' || echo "") + + if [[ -z "$RBENV_RELEASE" ]]; then + msg_error "Could not parse rbenv version from GitHub response" + rm -rf "$TMP_DIR" + return 1 + fi + + if ! curl_with_retry "https://github.com/rbenv/rbenv/archive/refs/tags/v${RBENV_RELEASE}.tar.gz" "$TMP_DIR/rbenv.tar.gz"; then + msg_error "Failed to download rbenv" + rm -rf "$TMP_DIR" + return 1 + fi + + tar -xzf "$TMP_DIR/rbenv.tar.gz" -C "$TMP_DIR" || { + msg_error "Failed to extract rbenv" + rm -rf "$TMP_DIR" + return 1 + } + + mkdir -p "$RBENV_DIR" + cp -r "$TMP_DIR/rbenv-${RBENV_RELEASE}/." "$RBENV_DIR/" + (cd "$RBENV_DIR" && src/configure && $STD make -C src) || { + msg_error "Failed to build rbenv" + rm -rf "$TMP_DIR" + return 1 + } + + # Setup profile + if ! grep -q 'rbenv init' "$PROFILE_FILE" 2>/dev/null; then + echo 'export PATH="$HOME/.rbenv/bin:$PATH"' >>"$PROFILE_FILE" + echo 'eval "$(rbenv init -)"' >>"$PROFILE_FILE" + fi + fi + + # Install ruby-build plugin + if [[ ! -d "$RBENV_DIR/plugins/ruby-build" ]]; then + local RUBY_BUILD_RELEASE + local ruby_build_json + ruby_build_json=$(curl -fsSL --max-time 15 https://api.github.com/repos/rbenv/ruby-build/releases/latest 2>/dev/null || echo "") + + if [[ -z "$ruby_build_json" ]]; then + msg_error "Failed to fetch latest ruby-build version from GitHub" + rm -rf "$TMP_DIR" + return 1 + fi + + RUBY_BUILD_RELEASE=$(echo "$ruby_build_json" | jq -r '.tag_name' 2>/dev/null | sed 's/^v//' || echo "") + + if [[ -z "$RUBY_BUILD_RELEASE" ]]; then + msg_error "Could not parse ruby-build version from GitHub response" + rm -rf "$TMP_DIR" + return 1 + fi + + if ! curl_with_retry "https://github.com/rbenv/ruby-build/archive/refs/tags/v${RUBY_BUILD_RELEASE}.tar.gz" "$TMP_DIR/ruby-build.tar.gz"; then + msg_error "Failed to download ruby-build" + rm -rf "$TMP_DIR" + return 1 + fi + + tar -xzf "$TMP_DIR/ruby-build.tar.gz" -C "$TMP_DIR" || { + msg_error "Failed to extract ruby-build" + rm -rf "$TMP_DIR" + return 1 + } + + mkdir -p "$RBENV_DIR/plugins/ruby-build" + cp -r "$TMP_DIR/ruby-build-${RUBY_BUILD_RELEASE}/." "$RBENV_DIR/plugins/ruby-build/" + fi + + # Setup PATH and install Ruby version + export PATH="$RBENV_DIR/bin:$PATH" + eval "$("$RBENV_BIN" init - bash)" 2>/dev/null || true + + if ! "$RBENV_BIN" versions --bare 2>/dev/null | grep -qx "$RUBY_VERSION"; then + $STD "$RBENV_BIN" install "$RUBY_VERSION" || { + msg_error "Failed to install Ruby $RUBY_VERSION" + rm -rf "$TMP_DIR" + return 1 + } + fi + + "$RBENV_BIN" global "$RUBY_VERSION" || { + msg_error "Failed to set Ruby $RUBY_VERSION as global version" + rm -rf "$TMP_DIR" + return 1 + } + + hash -r + + # Install Rails if requested + if [[ "$RUBY_INSTALL_RAILS" == "true" ]]; then + $STD gem install rails || { + msg_warn "Failed to install Rails - Ruby installation successful" + } + fi + + rm -rf "$TMP_DIR" + cache_installed_version "ruby" "$RUBY_VERSION" + msg_ok "Setup Ruby $RUBY_VERSION" +} + +# ------------------------------------------------------------------------------ +# Installs or updates MeiliSearch search engine. +# +# Description: +# - Fresh install: Downloads binary, creates config/service, starts +# - Update: Checks for new release, updates binary if available +# - Waits for service to be ready before returning +# - Exports API keys for use by caller +# +# Variables: +# MEILISEARCH_BIND - Bind address (default: 127.0.0.1:7700) +# MEILISEARCH_ENV - Environment: production/development (default: production) +# MEILISEARCH_DB_PATH - Database path (default: /var/lib/meilisearch/data) +# +# Exports: +# MEILISEARCH_MASTER_KEY - The master key for admin access +# MEILISEARCH_API_KEY - The default search API key +# MEILISEARCH_API_KEY_UID - The UID of the default API key +# +# Example (install script): +# setup_meilisearch +# +# Example (CT update_script): +# setup_meilisearch +# ------------------------------------------------------------------------------ + +function setup_meilisearch() { + local MEILISEARCH_BIND="${MEILISEARCH_BIND:-127.0.0.1:7700}" + local MEILISEARCH_ENV="${MEILISEARCH_ENV:-production}" + local MEILISEARCH_DB_PATH="${MEILISEARCH_DB_PATH:-/var/lib/meilisearch/data}" + local MEILISEARCH_DUMP_DIR="${MEILISEARCH_DUMP_DIR:-/var/lib/meilisearch/dumps}" + local MEILISEARCH_SNAPSHOT_DIR="${MEILISEARCH_SNAPSHOT_DIR:-/var/lib/meilisearch/snapshots}" + + # Get bind address for health checks + local MEILISEARCH_HOST="${MEILISEARCH_BIND%%:*}" + local MEILISEARCH_PORT="${MEILISEARCH_BIND##*:}" + [[ "$MEILISEARCH_HOST" == "0.0.0.0" ]] && MEILISEARCH_HOST="127.0.0.1" + + # Update mode: MeiliSearch already installed + if [[ -f /usr/bin/meilisearch ]]; then + if check_for_gh_release "meilisearch" "meilisearch/meilisearch"; then + msg_info "Updating MeiliSearch" + + # Get current and new version for compatibility check + local CURRENT_VERSION NEW_VERSION + CURRENT_VERSION=$(/usr/bin/meilisearch --version 2>/dev/null | grep -oE '[0-9]+\.[0-9]+\.[0-9]+' | head -1) || CURRENT_VERSION="0.0.0" + NEW_VERSION="${CHECK_UPDATE_RELEASE#v}" + + # Extract major.minor for comparison (Meilisearch requires dump/restore between minor versions) + local CURRENT_MAJOR_MINOR NEW_MAJOR_MINOR + CURRENT_MAJOR_MINOR=$(echo "$CURRENT_VERSION" | cut -d. -f1,2) + NEW_MAJOR_MINOR=$(echo "$NEW_VERSION" | cut -d. -f1,2) + + # Determine if migration is needed (different major.minor = incompatible DB format) + local NEEDS_MIGRATION=false + if [[ "$CURRENT_MAJOR_MINOR" != "$NEW_MAJOR_MINOR" ]]; then + NEEDS_MIGRATION=true + msg_info "MeiliSearch version change detected (${CURRENT_VERSION} → ${NEW_VERSION}), preparing data migration" + fi + + # Read config values for dump/restore + local MEILI_HOST MEILI_PORT MEILI_MASTER_KEY MEILI_DUMP_DIR + MEILI_HOST="${MEILISEARCH_HOST:-127.0.0.1}" + MEILI_PORT="${MEILISEARCH_PORT:-7700}" + MEILI_DUMP_DIR="${MEILISEARCH_DUMP_DIR:-/var/lib/meilisearch/dumps}" + MEILI_MASTER_KEY=$(grep -E "^master_key\s*=" /etc/meilisearch.toml 2>/dev/null | sed 's/.*=\s*"\(.*\)"/\1/' | tr -d ' ') + + # Create dump before update if migration is needed + local DUMP_UID="" + if [[ "$NEEDS_MIGRATION" == "true" ]] && [[ -n "$MEILI_MASTER_KEY" ]]; then + msg_info "Creating MeiliSearch data dump before upgrade" + + # Trigger dump creation + local DUMP_RESPONSE + DUMP_RESPONSE=$(curl -s -X POST "http://${MEILI_HOST}:${MEILI_PORT}/dumps" \ + -H "Authorization: Bearer ${MEILI_MASTER_KEY}" \ + -H "Content-Type: application/json" 2>/dev/null) || true + + # The initial response only contains taskUid, not dumpUid + # dumpUid is only available after the task completes + local TASK_UID + TASK_UID=$(echo "$DUMP_RESPONSE" | grep -oP '"taskUid":\s*\K[0-9]+' || true) + + if [[ -n "$TASK_UID" ]]; then + msg_info "Waiting for dump task ${TASK_UID} to complete..." + local MAX_WAIT=120 + local WAITED=0 + local TASK_RESULT="" + + while [[ $WAITED -lt $MAX_WAIT ]]; do + TASK_RESULT=$(curl -s "http://${MEILI_HOST}:${MEILI_PORT}/tasks/${TASK_UID}" \ + -H "Authorization: Bearer ${MEILI_MASTER_KEY}" 2>/dev/null) || true + + local TASK_STATUS + TASK_STATUS=$(echo "$TASK_RESULT" | grep -oP '"status":\s*"\K[^"]+' || true) + + if [[ "$TASK_STATUS" == "succeeded" ]]; then + # Extract dumpUid from the completed task details + DUMP_UID=$(echo "$TASK_RESULT" | grep -oP '"dumpUid":\s*"\K[^"]+' || true) + if [[ -n "$DUMP_UID" ]]; then + msg_ok "MeiliSearch dump created successfully: ${DUMP_UID}" + else + msg_warn "Dump task succeeded but could not extract dumpUid" + fi + break + elif [[ "$TASK_STATUS" == "failed" ]]; then + local ERROR_MSG + ERROR_MSG=$(echo "$TASK_RESULT" | grep -oP '"message":\s*"\K[^"]+' || echo "Unknown error") + msg_warn "MeiliSearch dump failed: ${ERROR_MSG}" + break + fi + sleep 2 + WAITED=$((WAITED + 2)) + done + + if [[ $WAITED -ge $MAX_WAIT ]]; then + msg_warn "MeiliSearch dump timed out after ${MAX_WAIT}s" + fi + else + msg_warn "Could not trigger MeiliSearch dump (no taskUid in response)" + msg_info "Response was: ${DUMP_RESPONSE:-empty}" + fi + fi + + # If migration is needed but dump failed, we have options: + # 1. Abort the update (safest, but annoying) + # 2. Backup data directory and proceed (allows manual recovery) + # 3. Just proceed and hope for the best (dangerous) + # We choose option 2: backup and proceed with warning + if [[ "$NEEDS_MIGRATION" == "true" ]] && [[ -z "$DUMP_UID" ]]; then + local MEILI_DB_PATH + MEILI_DB_PATH=$(grep -E "^db_path\s*=" /etc/meilisearch.toml 2>/dev/null | sed 's/.*=\s*"\(.*\)"/\1/' | tr -d ' ') + MEILI_DB_PATH="${MEILI_DB_PATH:-/var/lib/meilisearch/data}" + + if [[ -d "$MEILI_DB_PATH" ]] && [[ -n "$(ls -A "$MEILI_DB_PATH" 2>/dev/null)" ]]; then + local BACKUP_PATH="${MEILI_DB_PATH}.backup.$(date +%Y%m%d%H%M%S)" + msg_warn "Backing up MeiliSearch data to ${BACKUP_PATH}" + mv "$MEILI_DB_PATH" "$BACKUP_PATH" + mkdir -p "$MEILI_DB_PATH" + msg_info "Data backed up. After update, you may need to reindex your data." + msg_info "Old data is preserved at: ${BACKUP_PATH}" + fi + fi + + # Stop service and update binary + systemctl stop meilisearch + fetch_and_deploy_gh_release "meilisearch" "meilisearch/meilisearch" "binary" + + # If migration needed and dump was created, remove old data and import dump + if [[ "$NEEDS_MIGRATION" == "true" ]] && [[ -n "$DUMP_UID" ]]; then + local MEILI_DB_PATH + MEILI_DB_PATH=$(grep -E "^db_path\s*=" /etc/meilisearch.toml 2>/dev/null | sed 's/.*=\s*"\(.*\)"/\1/' | tr -d ' ') + MEILI_DB_PATH="${MEILI_DB_PATH:-/var/lib/meilisearch/data}" + + msg_info "Removing old MeiliSearch database for migration" + rm -rf "${MEILI_DB_PATH:?}"/* + + # Import dump using CLI flag (this is the supported method) + local DUMP_FILE="${MEILI_DUMP_DIR}/${DUMP_UID}.dump" + if [[ -f "$DUMP_FILE" ]]; then + msg_info "Importing dump: ${DUMP_FILE}" + + # Start meilisearch with --import-dump flag + # This is a one-time import that happens during startup + /usr/bin/meilisearch --config-file-path /etc/meilisearch.toml --import-dump "$DUMP_FILE" & + local MEILI_PID=$! + + # Wait for meilisearch to become healthy (import happens during startup) + msg_info "Waiting for MeiliSearch to import and start..." + local MAX_WAIT=300 + local WAITED=0 + while [[ $WAITED -lt $MAX_WAIT ]]; do + if curl -sf "http://${MEILI_HOST}:${MEILI_PORT}/health" &>/dev/null; then + msg_ok "MeiliSearch is healthy after import" + break + fi + # Check if process is still running + if ! kill -0 $MEILI_PID 2>/dev/null; then + msg_warn "MeiliSearch process exited during import" + break + fi + sleep 3 + WAITED=$((WAITED + 3)) + done + + # Stop the manual process + kill $MEILI_PID 2>/dev/null || true + sleep 2 + + # Start via systemd for proper management + systemctl start meilisearch + + if systemctl is-active --quiet meilisearch; then + msg_ok "MeiliSearch migrated successfully" + else + msg_warn "MeiliSearch failed to start after migration - check logs with: journalctl -u meilisearch" + fi + else + msg_warn "Dump file not found: ${DUMP_FILE}" + systemctl start meilisearch + fi + else + systemctl start meilisearch + fi + + msg_ok "Updated MeiliSearch" + fi + return 0 + fi + + # Fresh install + msg_info "Setup MeiliSearch" + + # Install binary + fetch_and_deploy_gh_release "meilisearch" "meilisearch/meilisearch" "binary" || { + msg_error "Failed to install MeiliSearch binary" + return 1 + } + + # Download default config + curl -fsSL https://raw.githubusercontent.com/meilisearch/meilisearch/latest/config.toml -o /etc/meilisearch.toml || { + msg_error "Failed to download MeiliSearch config" + return 1 + } + + # Generate master key + MEILISEARCH_MASTER_KEY=$(openssl rand -base64 12) + export MEILISEARCH_MASTER_KEY + + # Configure + sed -i \ + -e "s|^env =.*|env = \"${MEILISEARCH_ENV}\"|" \ + -e "s|^# master_key =.*|master_key = \"${MEILISEARCH_MASTER_KEY}\"|" \ + -e "s|^db_path =.*|db_path = \"${MEILISEARCH_DB_PATH}\"|" \ + -e "s|^dump_dir =.*|dump_dir = \"${MEILISEARCH_DUMP_DIR}\"|" \ + -e "s|^snapshot_dir =.*|snapshot_dir = \"${MEILISEARCH_SNAPSHOT_DIR}\"|" \ + -e 's|^# no_analytics = true|no_analytics = true|' \ + -e "s|^http_addr =.*|http_addr = \"${MEILISEARCH_BIND}\"|" \ + /etc/meilisearch.toml + + # Create data directories + mkdir -p "${MEILISEARCH_DB_PATH}" "${MEILISEARCH_DUMP_DIR}" "${MEILISEARCH_SNAPSHOT_DIR}" + + # Create systemd service + cat </etc/systemd/system/meilisearch.service +[Unit] +Description=Meilisearch +After=network.target + +[Service] +ExecStart=/usr/bin/meilisearch --config-file-path /etc/meilisearch.toml +Restart=always + +[Install] +WantedBy=multi-user.target +EOF + + # Enable and start service + systemctl daemon-reload + systemctl enable -q --now meilisearch + + # Wait for MeiliSearch to be ready (up to 30 seconds) + for i in {1..30}; do + if curl -s -o /dev/null -w "%{http_code}" "http://${MEILISEARCH_HOST}:${MEILISEARCH_PORT}/health" 2>/dev/null | grep -q "200"; then + break + fi + sleep 1 + done + + # Verify service is running + if ! systemctl is-active --quiet meilisearch; then + msg_error "MeiliSearch service failed to start" + return 1 + fi + + # Get API keys with retry logic + MEILISEARCH_API_KEY="" + for i in {1..10}; do + MEILISEARCH_API_KEY=$(curl -s -X GET "http://${MEILISEARCH_HOST}:${MEILISEARCH_PORT}/keys" \ + -H "Authorization: Bearer ${MEILISEARCH_MASTER_KEY}" 2>/dev/null | + grep -o '"key":"[^"]*"' | head -n 1 | sed 's/"key":"//;s/"//') || true + [[ -n "$MEILISEARCH_API_KEY" ]] && break + sleep 2 + done + + MEILISEARCH_API_KEY_UID=$(curl -s -X GET "http://${MEILISEARCH_HOST}:${MEILISEARCH_PORT}/keys" \ + -H "Authorization: Bearer ${MEILISEARCH_MASTER_KEY}" 2>/dev/null | + grep -o '"uid":"[^"]*"' | head -n 1 | sed 's/"uid":"//;s/"//') || true + + export MEILISEARCH_API_KEY + export MEILISEARCH_API_KEY_UID + + # Cache version + local MEILISEARCH_VERSION + MEILISEARCH_VERSION=$(/usr/bin/meilisearch --version 2>/dev/null | grep -oE '[0-9]+\.[0-9]+\.[0-9]+' | head -1) || true + cache_installed_version "meilisearch" "${MEILISEARCH_VERSION:-unknown}" + + msg_ok "Setup MeiliSearch ${MEILISEARCH_VERSION:-}" +} + +# ------------------------------------------------------------------------------ +# Installs or upgrades ClickHouse database server. +# +# Description: +# - Adds ClickHouse official repository +# - Installs specified version +# - Configures systemd service +# - Supports Debian/Ubuntu with fallback mechanism +# +# Variables: +# CLICKHOUSE_VERSION - ClickHouse version to install (default: latest) +# ------------------------------------------------------------------------------ + +function setup_clickhouse() { + local CLICKHOUSE_VERSION="${CLICKHOUSE_VERSION:-latest}" + local DISTRO_ID DISTRO_CODENAME + DISTRO_ID=$(awk -F= '/^ID=/{print $2}' /etc/os-release | tr -d '"') + DISTRO_CODENAME=$(awk -F= '/^VERSION_CODENAME=/{print $2}' /etc/os-release) + + # Ensure non-interactive mode for all apt operations + export DEBIAN_FRONTEND=noninteractive + export NEEDRESTART_MODE=a + export NEEDRESTART_SUSPEND=1 + + # Resolve "latest" version + if [[ "$CLICKHOUSE_VERSION" == "latest" ]]; then + CLICKHOUSE_VERSION=$(curl -fsSL --max-time 15 https://packages.clickhouse.com/tgz/stable/ 2>/dev/null | + grep -oP 'clickhouse-common-static-\K[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+' | + sort -V | tail -n1 || echo "") + + # Fallback to GitHub API if package server failed + if [[ -z "$CLICKHOUSE_VERSION" ]]; then + CLICKHOUSE_VERSION=$(curl -fsSL --max-time 15 https://api.github.com/repos/ClickHouse/ClickHouse/releases/latest 2>/dev/null | + grep -oP '"tag_name":\s*"v\K[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+' | head -n1 || echo "") + fi + + [[ -z "$CLICKHOUSE_VERSION" ]] && { + msg_error "Could not determine latest ClickHouse version from any source" + return 1 + } + fi + + # Get currently installed version + local CURRENT_VERSION="" + if command -v clickhouse-server >/dev/null 2>&1; then + CURRENT_VERSION=$(clickhouse-server --version 2>/dev/null | grep -oP 'version \K[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+' | head -n1) + fi + + # Scenario 1: Already at target version - just update packages + if [[ -n "$CURRENT_VERSION" && "$CURRENT_VERSION" == "$CLICKHOUSE_VERSION" ]]; then + msg_info "Update ClickHouse $CLICKHOUSE_VERSION" + ensure_apt_working || return 1 + + # Perform upgrade with retry logic (non-fatal if fails) + upgrade_packages_with_retry "clickhouse-server" "clickhouse-client" || { + msg_warn "ClickHouse package upgrade had issues, continuing with current version" + } + cache_installed_version "clickhouse" "$CLICKHOUSE_VERSION" + msg_ok "Update ClickHouse $CLICKHOUSE_VERSION" + return 0 + fi + + # Scenario 2: Different version - clean upgrade + if [[ -n "$CURRENT_VERSION" && "$CURRENT_VERSION" != "$CLICKHOUSE_VERSION" ]]; then + msg_info "Upgrade ClickHouse from $CURRENT_VERSION to $CLICKHOUSE_VERSION" + stop_all_services "clickhouse-server" + remove_old_tool_version "clickhouse" + else + msg_info "Setup ClickHouse $CLICKHOUSE_VERSION" + fi + + ensure_dependencies apt-transport-https ca-certificates dirmngr gnupg + + # Prepare repository (cleanup + validation) + prepare_repository_setup "clickhouse" || { + msg_error "Failed to prepare ClickHouse repository" + return 1 + } + + # Setup repository (ClickHouse uses 'stable' suite) + setup_deb822_repo \ + "clickhouse" \ + "https://packages.clickhouse.com/rpm/lts/repodata/repomd.xml.key" \ + "https://packages.clickhouse.com/deb" \ + "stable" \ + "main" + + # Install packages with retry logic + $STD apt update || { + msg_error "APT update failed for ClickHouse repository" + return 1 + } + + install_packages_with_retry "clickhouse-server" "clickhouse-client" || { + msg_error "Failed to install ClickHouse packages" + return 1 + } + + # Verify installation + if ! command -v clickhouse-server >/dev/null 2>&1; then + msg_error "ClickHouse installation completed but clickhouse-server command not found" + return 1 + fi + + # Setup data directory + mkdir -p /var/lib/clickhouse + if id clickhouse >/dev/null 2>&1; then + chown -R clickhouse:clickhouse /var/lib/clickhouse + fi + + # Enable and start service + $STD systemctl enable clickhouse-server || { + msg_warn "Failed to enable clickhouse-server service" + } + safe_service_restart clickhouse-server || true + + cache_installed_version "clickhouse" "$CLICKHOUSE_VERSION" + msg_ok "Setup ClickHouse $CLICKHOUSE_VERSION" +} + +# ------------------------------------------------------------------------------ +# Installs Rust toolchain and optional global crates via cargo. +# +# Description: +# - Installs rustup (if missing) +# - Installs or updates desired Rust toolchain (stable, nightly, or versioned) +# - Installs or updates specified global crates using `cargo install` +# +# Notes: +# - Skips crate install if exact version is already present +# - Updates crate if newer version or different version is requested +# +# Variables: +# RUST_TOOLCHAIN - Rust toolchain to install (default: stable) +# RUST_CRATES - Comma-separated list of crates (e.g. "cargo-edit,wasm-pack@0.12.1") +# ------------------------------------------------------------------------------ + +function setup_rust() { + local RUST_TOOLCHAIN="${RUST_TOOLCHAIN:-stable}" + local RUST_CRATES="${RUST_CRATES:-}" + local CARGO_BIN="${HOME}/.cargo/bin" + + # Get currently installed version + local CURRENT_VERSION="" + if command -v rustc &>/dev/null; then + CURRENT_VERSION=$(rustc --version 2>/dev/null | awk '{print $2}' 2>/dev/null) || true + fi + + # Scenario 1: Rustup not installed - fresh install + if ! command -v rustup &>/dev/null; then + msg_info "Setup Rust ($RUST_TOOLCHAIN)" + curl -fsSL https://sh.rustup.rs | $STD sh -s -- -y --default-toolchain "$RUST_TOOLCHAIN" || { + msg_error "Failed to install Rust" + return 1 + } + export PATH="$CARGO_BIN:$PATH" + echo 'export PATH="$HOME/.cargo/bin:$PATH"' >>"$HOME/.profile" + + # Verify installation + if ! command -v rustc >/dev/null 2>&1; then + msg_error "Rust binary not found after installation" + return 1 + fi + + local RUST_VERSION + RUST_VERSION=$(rustc --version 2>/dev/null | awk '{print $2}' 2>/dev/null) || true + if [[ -z "$RUST_VERSION" ]]; then + msg_error "Failed to determine Rust version" + return 1 + fi + + cache_installed_version "rust" "$RUST_VERSION" + msg_ok "Setup Rust $RUST_VERSION" + else + # Scenario 2: Rustup already installed - update/maintain + msg_info "Update Rust ($RUST_TOOLCHAIN)" + + # Ensure default toolchain is set + $STD rustup default "$RUST_TOOLCHAIN" 2>/dev/null || { + # If default fails, install the toolchain first + $STD rustup install "$RUST_TOOLCHAIN" || { + msg_error "Failed to install Rust toolchain $RUST_TOOLCHAIN" + return 1 + } + $STD rustup default "$RUST_TOOLCHAIN" || { + msg_error "Failed to set default Rust toolchain" + return 1 + } + } + + # Update to latest patch version + $STD rustup update "$RUST_TOOLCHAIN" /dev/null | awk '{print $2}' 2>/dev/null) || true + if [[ -z "$RUST_VERSION" ]]; then + msg_error "Failed to determine Rust version after update" + return 1 + fi + + cache_installed_version "rust" "$RUST_VERSION" + msg_ok "Update Rust $RUST_VERSION" + fi + + # Install global crates + if [[ -n "$RUST_CRATES" ]]; then + msg_info "Processing Rust crates: $RUST_CRATES" + IFS=',' read -ra CRATES <<<"$RUST_CRATES" + for crate in "${CRATES[@]}"; do + crate=$(echo "$crate" | xargs) # trim whitespace + [[ -z "$crate" ]] && continue # skip empty entries + + local NAME VER INSTALLED_VER CRATE_LIST + if [[ "$crate" == *"@"* ]]; then + NAME="${crate%@*}" + VER="${crate##*@}" + else + NAME="$crate" + VER="" + fi + + # Get list of installed crates once + CRATE_LIST=$(cargo install --list 2>/dev/null || echo "") + + # Check if already installed + if echo "$CRATE_LIST" | grep -q "^${NAME} "; then + INSTALLED_VER=$(echo "$CRATE_LIST" | grep "^${NAME} " | head -1 | awk '{print $2}' 2>/dev/null | tr -d 'v:' || echo '') + + if [[ -n "$VER" && "$VER" != "$INSTALLED_VER" ]]; then + msg_info "Upgrading $NAME from v$INSTALLED_VER to v$VER" + $STD cargo install "$NAME" --version "$VER" --force || { + msg_error "Failed to install $NAME@$VER" + return 1 + } + msg_ok "Upgraded $NAME to v$VER" + elif [[ -z "$VER" ]]; then + msg_info "Upgrading $NAME to latest" + $STD cargo install "$NAME" --force || { + msg_error "Failed to upgrade $NAME" + return 1 + } + local NEW_VER=$(cargo install --list 2>/dev/null | grep "^${NAME} " | head -1 | awk '{print $2}' 2>/dev/null | tr -d 'v:' || echo 'unknown') + msg_ok "Upgraded $NAME to v$NEW_VER" + else + msg_ok "$NAME v$INSTALLED_VER already installed" + fi + else + msg_info "Installing $NAME${VER:+@$VER}" + if [[ -n "$VER" ]]; then + $STD cargo install "$NAME" --version "$VER" || { + msg_error "Failed to install $NAME@$VER" + return 1 + } + msg_ok "Installed $NAME v$VER" + else + $STD cargo install "$NAME" || { + msg_error "Failed to install $NAME" + return 1 + } + local NEW_VER=$(cargo install --list 2>/dev/null | grep "^${NAME} " | head -1 | awk '{print $2}' 2>/dev/null | tr -d 'v:' || echo 'unknown') + msg_ok "Installed $NAME v$NEW_VER" + fi + fi + done + msg_ok "Processed Rust crates" + fi +} + +# ------------------------------------------------------------------------------ +# Installs or upgrades uv (Python package manager) from GitHub releases. +# - Downloads platform-specific tarball (no install.sh!) +# - Extracts uv binary +# - Places it in /usr/local/bin +# - Optionally installs a specific Python version via uv +# ------------------------------------------------------------------------------ + +function setup_uv() { + local UV_BIN="/usr/local/bin/uv" + local UVX_BIN="/usr/local/bin/uvx" + local TMP_DIR=$(mktemp -d) + local CACHED_VERSION + + # trap for TMP Cleanup + trap "rm -rf '$TMP_DIR'" EXIT + + CACHED_VERSION=$(get_cached_version "uv") + + # Architecture Detection + local ARCH=$(uname -m) + local OS_TYPE="" + local UV_TAR="" + + if grep -qi "alpine" /etc/os-release; then + OS_TYPE="musl" + else + OS_TYPE="gnu" + fi + + case "$ARCH" in + x86_64) + UV_TAR="uv-x86_64-unknown-linux-${OS_TYPE}.tar.gz" + ;; + aarch64) + UV_TAR="uv-aarch64-unknown-linux-${OS_TYPE}.tar.gz" + ;; + i686) + UV_TAR="uv-i686-unknown-linux-${OS_TYPE}.tar.gz" + ;; + *) + msg_error "Unsupported architecture: $ARCH (supported: x86_64, aarch64, i686)" + return 1 + ;; + esac + + ensure_dependencies jq + + # Fetch latest version + local releases_json + releases_json=$(curl -fsSL --max-time 15 \ + "https://api.github.com/repos/astral-sh/uv/releases/latest" 2>/dev/null || echo "") + + if [[ -z "$releases_json" ]]; then + msg_error "Could not fetch latest uv version from GitHub API" + return 1 + fi + + local LATEST_VERSION + LATEST_VERSION=$(echo "$releases_json" | jq -r '.tag_name' 2>/dev/null | sed 's/^v//') + + if [[ -z "$LATEST_VERSION" ]]; then + msg_error "Could not parse uv version from GitHub API response" + return 1 + fi + + # Get currently installed version + local INSTALLED_VERSION="" + if [[ -x "$UV_BIN" ]]; then + INSTALLED_VERSION=$("$UV_BIN" --version 2>/dev/null | awk '{print $2}') + fi + + # Scenario 1: Already at latest version + if [[ -n "$INSTALLED_VERSION" && "$INSTALLED_VERSION" == "$LATEST_VERSION" ]]; then + cache_installed_version "uv" "$LATEST_VERSION" + + # Check if uvx is needed and missing + if [[ "${USE_UVX:-NO}" == "YES" ]] && [[ ! -x "$UVX_BIN" ]]; then + msg_info "Installing uvx wrapper" + _install_uvx_wrapper || return 1 + msg_ok "uvx wrapper installed" + fi + + return 0 + fi + + # Scenario 2: New install or upgrade + if [[ -n "$INSTALLED_VERSION" && "$INSTALLED_VERSION" != "$LATEST_VERSION" ]]; then + msg_info "Upgrade uv from $INSTALLED_VERSION to $LATEST_VERSION" + else + msg_info "Setup uv $LATEST_VERSION" + fi + + local UV_URL="https://github.com/astral-sh/uv/releases/download/${LATEST_VERSION}/${UV_TAR}" + + if ! curl_with_retry "$UV_URL" "$TMP_DIR/uv.tar.gz"; then + msg_error "Failed to download uv from $UV_URL" + return 1 + fi + + # Extract + $STD tar -xzf "$TMP_DIR/uv.tar.gz" -C "$TMP_DIR" || { + msg_error "Failed to extract uv" + return 1 + } + + # Find and install uv binary (tarball extracts to uv-VERSION-ARCH/ directory) + local UV_BINARY=$(find "$TMP_DIR" -name "uv" -type f -executable | head -n1) + if [[ ! -f "$UV_BINARY" ]]; then + msg_error "Could not find uv binary in extracted tarball" + return 1 + fi + + $STD install -m 755 "$UV_BINARY" "$UV_BIN" || { + msg_error "Failed to install uv binary" + return 1 + } + + ensure_usr_local_bin_persist + export PATH="/usr/local/bin:$PATH" + + # Optional: Install uvx wrapper + if [[ "${USE_UVX:-NO}" == "YES" ]]; then + msg_info "Installing uvx wrapper" + _install_uvx_wrapper || { + msg_error "Failed to install uvx wrapper" + return 1 + } + msg_ok "uvx wrapper installed" + fi + + # Optional: Generate shell completions + $STD uv generate-shell-completion bash >/etc/bash_completion.d/uv 2>/dev/null || true + if [[ -d /usr/share/zsh/site-functions ]]; then + $STD uv generate-shell-completion zsh >/usr/share/zsh/site-functions/_uv 2>/dev/null || true + fi + + # Optional: Install specific Python version if requested + if [[ -n "${PYTHON_VERSION:-}" ]]; then + msg_info "Installing Python $PYTHON_VERSION via uv" + $STD uv python install "$PYTHON_VERSION" || { + msg_error "Failed to install Python $PYTHON_VERSION" + return 1 + } + msg_ok "Python $PYTHON_VERSION installed" + fi + + cache_installed_version "uv" "$LATEST_VERSION" + msg_ok "Setup uv $LATEST_VERSION" +} + +# Helper function to install uvx wrapper +_install_uvx_wrapper() { + local UVX_BIN="/usr/local/bin/uvx" + + cat >"$UVX_BIN" <<'EOF' +#!/bin/bash +# uvx - Run Python applications from PyPI as command-line tools +# Wrapper for: uv tool run +exec /usr/local/bin/uv tool run "$@" +EOF + + chmod +x "$UVX_BIN" + return 0 +} + +# ------------------------------------------------------------------------------ +# Installs or updates yq (mikefarah/yq - Go version). +# +# Description: +# - Checks if yq is installed and from correct source +# - Compares with latest release on GitHub +# - Updates if outdated or wrong implementation +# ------------------------------------------------------------------------------ + +function setup_yq() { + local TMP_DIR=$(mktemp -d) + local BINARY_PATH="/usr/local/bin/yq" + local GITHUB_REPO="mikefarah/yq" + + ensure_dependencies jq + ensure_usr_local_bin_persist + + # Remove non-mikefarah implementations + if command -v yq &>/dev/null; then + if ! yq --version 2>&1 | grep -q 'mikefarah'; then + rm -f "$(command -v yq)" + fi + fi + + local LATEST_VERSION + local releases_json + releases_json=$(curl -fsSL --max-time 15 "https://api.github.com/repos/${GITHUB_REPO}/releases/latest" 2>/dev/null || echo "") + + if [[ -z "$releases_json" ]]; then + msg_error "Could not fetch latest yq version from GitHub API" + rm -rf "$TMP_DIR" + return 1 + fi + + LATEST_VERSION=$(echo "$releases_json" | jq -r '.tag_name' 2>/dev/null | sed 's/^v//' || echo "") + + if [[ -z "$LATEST_VERSION" ]]; then + msg_error "Could not parse yq version from GitHub API response" + rm -rf "$TMP_DIR" + return 1 + fi + + # Get currently installed version + local INSTALLED_VERSION="" + if command -v yq &>/dev/null && yq --version 2>&1 | grep -q 'mikefarah'; then + INSTALLED_VERSION=$(yq --version 2>/dev/null | awk '{print $NF}' | sed 's/^v//') + fi + + # Scenario 1: Already at latest version + if [[ -n "$INSTALLED_VERSION" && "$INSTALLED_VERSION" == "$LATEST_VERSION" ]]; then + cache_installed_version "yq" "$LATEST_VERSION" + rm -rf "$TMP_DIR" + return 0 + fi + + # Scenario 2: New install or upgrade + if [[ -n "$INSTALLED_VERSION" && "$INSTALLED_VERSION" != "$LATEST_VERSION" ]]; then + msg_info "Upgrade yq from $INSTALLED_VERSION to $LATEST_VERSION" + else + msg_info "Setup yq $LATEST_VERSION" + fi + + if ! curl_with_retry "https://github.com/${GITHUB_REPO}/releases/download/v${LATEST_VERSION}/yq_linux_amd64" "$TMP_DIR/yq"; then + msg_error "Failed to download yq" + rm -rf "$TMP_DIR" + return 1 + fi + + chmod +x "$TMP_DIR/yq" + mv "$TMP_DIR/yq" "$BINARY_PATH" || { + msg_error "Failed to install yq" + rm -rf "$TMP_DIR" + return 1 + } + + rm -rf "$TMP_DIR" + hash -r + + local FINAL_VERSION + FINAL_VERSION=$("$BINARY_PATH" --version 2>/dev/null | awk '{print $NF}' | sed 's/^v//') + cache_installed_version "yq" "$FINAL_VERSION" + msg_ok "Setup yq $FINAL_VERSION" +} + +# ------------------------------------------------------------------------------ +# Docker Engine Installation and Management (All-In-One) +# +# Description: +# - By default uses distro repository (docker.io) for stability +# - Optionally uses official Docker repository for latest features +# - Detects and migrates old Docker installations +# - Optional: Installs/Updates Portainer CE +# - Updates running containers interactively +# - Cleans up legacy repository files +# +# Usage: +# setup_docker # Uses distro package (recommended) +# USE_DOCKER_REPO=true setup_docker # Uses official Docker repo +# DOCKER_PORTAINER="true" setup_docker +# DOCKER_LOG_DRIVER="json-file" setup_docker +# +# Variables: +# USE_DOCKER_REPO - Set to "true" to use official Docker repository +# (default: false, uses distro docker.io package) +# DOCKER_PORTAINER - Install Portainer CE (optional, "true" to enable) +# DOCKER_LOG_DRIVER - Log driver (optional, default: "journald") +# DOCKER_SKIP_UPDATES - Skip container update check (optional, "true" to skip) +# +# Features: +# - Uses stable distro packages by default +# - Migrates from get.docker.com to repository-based installation +# - Updates Docker Engine if newer version available +# - Interactive container update with multi-select +# - Portainer installation and update support +# ------------------------------------------------------------------------------ +function setup_docker() { + local docker_installed=false + local portainer_installed=false + local USE_DOCKER_REPO="${USE_DOCKER_REPO:-false}" + + # Check if Docker is already installed + if command -v docker &>/dev/null; then + docker_installed=true + DOCKER_CURRENT_VERSION=$(docker --version | grep -oP '\d+\.\d+\.\d+' | head -1) + msg_info "Docker $DOCKER_CURRENT_VERSION detected" + fi + + # Check if Portainer is running + if docker ps --format '{{.Names}}' 2>/dev/null | grep -q '^portainer$'; then + portainer_installed=true + msg_info "Portainer container detected" + fi + + # Scenario 1: Use distro repository (default, most stable) + if [[ "$USE_DOCKER_REPO" != "true" && "$USE_DOCKER_REPO" != "TRUE" && "$USE_DOCKER_REPO" != "1" ]]; then + + # Install or upgrade Docker from distro repo + if [ "$docker_installed" = true ]; then + msg_info "Checking for Docker updates (distro package)" + ensure_apt_working || return 1 + upgrade_packages_with_retry "docker.io" "docker-compose" || true + DOCKER_CURRENT_VERSION=$(docker --version | grep -oP '\d+\.\d+\.\d+' | head -1) + msg_ok "Docker is up-to-date ($DOCKER_CURRENT_VERSION)" + else + msg_info "Installing Docker (distro package)" + ensure_apt_working || return 1 + + # Install docker.io and docker-compose from distro + if ! install_packages_with_retry "docker.io"; then + msg_error "Failed to install docker.io from distro repository" + return 1 + fi + # docker-compose is optional + $STD apt install -y docker-compose 2>/dev/null || true + + DOCKER_CURRENT_VERSION=$(docker --version | grep -oP '\d+\.\d+\.\d+' | head -1) + msg_ok "Installed Docker $DOCKER_CURRENT_VERSION (distro package)" + fi + + # Configure daemon.json + local log_driver="${DOCKER_LOG_DRIVER:-journald}" + mkdir -p /etc/docker + if [ ! -f /etc/docker/daemon.json ]; then + cat </etc/docker/daemon.json +{ + "log-driver": "$log_driver" +} +EOF + fi + + # Enable and start Docker + systemctl enable -q --now docker + + # Continue to Portainer section below + else + # Scenario 2: Use official Docker repository (USE_DOCKER_REPO=true) + + # Cleanup old repository configurations + if [ -f /etc/apt/sources.list.d/docker.list ]; then + msg_info "Migrating from old Docker repository format" + rm -f /etc/apt/sources.list.d/docker.list + rm -f /etc/apt/keyrings/docker.asc + fi + + # Setup/Update Docker repository + msg_info "Setting up Docker Repository" + setup_deb822_repo \ + "docker" \ + "https://download.docker.com/linux/$(get_os_info id)/gpg" \ + "https://download.docker.com/linux/$(get_os_info id)" \ + "$(get_os_info codename)" \ + "stable" \ + "$(dpkg --print-architecture)" + + # Install or upgrade Docker + if [ "$docker_installed" = true ]; then + msg_info "Checking for Docker updates" + DOCKER_LATEST_VERSION=$(apt-cache policy docker-ce | grep Candidate | awk '{print $2}' 2>/dev/null | cut -d':' -f2 | cut -d'-' -f1 || echo '') + + if [ "$DOCKER_CURRENT_VERSION" != "$DOCKER_LATEST_VERSION" ]; then + msg_info "Updating Docker $DOCKER_CURRENT_VERSION → $DOCKER_LATEST_VERSION" + $STD apt install -y --only-upgrade \ + docker-ce \ + docker-ce-cli \ + containerd.io \ + docker-buildx-plugin \ + docker-compose-plugin || { + msg_error "Failed to update Docker packages" + return 1 + } + msg_ok "Updated Docker to $DOCKER_LATEST_VERSION" + else + msg_ok "Docker is up-to-date ($DOCKER_CURRENT_VERSION)" + fi + else + msg_info "Installing Docker" + $STD apt install -y \ + docker-ce \ + docker-ce-cli \ + containerd.io \ + docker-buildx-plugin \ + docker-compose-plugin || { + msg_error "Failed to install Docker packages" + return 1 + } + + DOCKER_CURRENT_VERSION=$(docker --version | grep -oP '\d+\.\d+\.\d+' | head -1) + msg_ok "Installed Docker $DOCKER_CURRENT_VERSION" + fi + + # Configure daemon.json + local log_driver="${DOCKER_LOG_DRIVER:-journald}" + mkdir -p /etc/docker + if [ ! -f /etc/docker/daemon.json ]; then + cat </etc/docker/daemon.json +{ + "log-driver": "$log_driver" +} +EOF + fi + + # Enable and start Docker + systemctl enable -q --now docker + fi + + # Portainer Management (common for both modes) + if [[ "${DOCKER_PORTAINER:-}" == "true" ]]; then + if [ "$portainer_installed" = true ]; then + msg_info "Checking for Portainer updates" + PORTAINER_CURRENT=$(docker inspect portainer --format='{{.Config.Image}}' 2>/dev/null | cut -d':' -f2) + PORTAINER_LATEST=$(curl -fsSL https://registry.hub.docker.com/v2/repositories/portainer/portainer-ce/tags?page_size=100 | grep -oP '"name":"\K[0-9]+\.[0-9]+\.[0-9]+"' | head -1 | tr -d '"') + + if [ "$PORTAINER_CURRENT" != "$PORTAINER_LATEST" ]; then + read -r -p "${TAB3}Update Portainer $PORTAINER_CURRENT → $PORTAINER_LATEST? " prompt + if [[ ${prompt,,} =~ ^(y|yes)$ ]]; then + msg_info "Updating Portainer" + docker stop portainer + docker rm portainer + docker pull portainer/portainer-ce:latest + docker run -d \ + -p 9000:9000 \ + -p 9443:9443 \ + --name=portainer \ + --restart=always \ + -v /var/run/docker.sock:/var/run/docker.sock \ + -v portainer_data:/data \ + portainer/portainer-ce:latest + msg_ok "Updated Portainer to $PORTAINER_LATEST" + fi + else + msg_ok "Portainer is up-to-date ($PORTAINER_CURRENT)" + fi + else + msg_info "Installing Portainer" + docker volume create portainer_data + docker run -d \ + -p 9000:9000 \ + -p 9443:9443 \ + --name=portainer \ + --restart=always \ + -v /var/run/docker.sock:/var/run/docker.sock \ + -v portainer_data:/data \ + portainer/portainer-ce:latest + + LOCAL_IP=$(hostname -I | awk '{print $1}') + msg_ok "Installed Portainer (http://${LOCAL_IP}:9000)" + fi + fi + + # Interactive Container Update Check + if [[ "${DOCKER_SKIP_UPDATES:-}" != "true" ]] && [ "$docker_installed" = true ]; then + msg_info "Checking for container updates" + + # Get list of running containers with update status + local containers_with_updates=() + local container_info=() + local index=1 + + while IFS= read -r container; do + local name=$(echo "$container" | awk '{print $1}') + local image=$(echo "$container" | awk '{print $2}') + local current_digest=$(docker inspect "$name" --format='{{.Image}}' 2>/dev/null | cut -d':' -f2 | cut -c1-12) + + # Pull latest image digest + docker pull "$image" >/dev/null 2>&1 + local latest_digest=$(docker inspect "$image" --format='{{.Id}}' 2>/dev/null | cut -d':' -f2 | cut -c1-12) + + if [ "$current_digest" != "$latest_digest" ]; then + containers_with_updates+=("$name") + container_info+=("${index}) ${name} (${image})") + ((index++)) + fi + done < <(docker ps --format '{{.Names}} {{.Image}}') + + if [ ${#containers_with_updates[@]} -gt 0 ]; then + echo "" + echo "${TAB3}Container updates available:" + for info in "${container_info[@]}"; do + echo "${TAB3} $info" + done + echo "" + read -r -p "${TAB3}Select containers to update (e.g., 1,3,5 or 'all' or 'none'): " selection + + if [[ ${selection,,} == "all" ]]; then + for container in "${containers_with_updates[@]}"; do + msg_info "Updating container: $container" + docker stop "$container" + docker rm "$container" + # Note: This requires the original docker run command - best to recreate via compose + msg_ok "Stopped and removed $container (please recreate with updated image)" + done + elif [[ ${selection,,} != "none" ]]; then + IFS=',' read -ra SELECTED <<<"$selection" + for num in "${SELECTED[@]}"; do + num=$(echo "$num" | xargs) # trim whitespace + if [[ "$num" =~ ^[0-9]+$ ]] && [ "$num" -ge 1 ] && [ "$num" -le "${#containers_with_updates[@]}" ]; then + container="${containers_with_updates[$((num - 1))]}" + msg_info "Updating container: $container" + docker stop "$container" + docker rm "$container" + msg_ok "Stopped and removed $container (please recreate with updated image)" + fi + done + fi + else + msg_ok "All containers are up-to-date" + fi + fi + + msg_ok "Docker setup completed" +} + +# ------------------------------------------------------------------------------ +# Fetch and deploy from URL +# Downloads an archive (zip, tar.gz, or .deb) from a URL and extracts/installs it +# +# Usage: fetch_and_deploy_from_url "url" "directory" +# url - URL to the archive (zip, tar.gz, or .deb) +# directory - Destination path where the archive will be extracted +# (not used for .deb packages) +# +# Examples: +# fetch_and_deploy_from_url "https://example.com/app.tar.gz" "/opt/myapp" +# fetch_and_deploy_from_url "https://example.com/app.zip" "/opt/myapp" +# fetch_and_deploy_from_url "https://example.com/package.deb" "" +# ------------------------------------------------------------------------------ +function fetch_and_deploy_from_url() { + local url="$1" + local directory="${2:-}" + + if [[ -z "$url" ]]; then + msg_error "URL parameter is required" + return 1 + fi + + local filename="${url##*/}" + + msg_info "Downloading from $url" + + local tmpdir + tmpdir=$(mktemp -d) || { + msg_error "Failed to create temporary directory" + return 1 + } + + curl -fsSL -o "$tmpdir/$filename" "$url" || { + msg_error "Download failed: $url" + rm -rf "$tmpdir" + return 1 + } + + # Auto-detect archive type using file description + local file_desc + file_desc=$(file -b "$tmpdir/$filename") + + local archive_type="unknown" + + if [[ "$file_desc" =~ gzip.*compressed|gzip\ compressed\ data ]]; then + archive_type="tar" + elif [[ "$file_desc" =~ Zip.*archive|ZIP\ archive ]]; then + archive_type="zip" + elif [[ "$file_desc" =~ Debian.*package|Debian\ binary\ package ]]; then + archive_type="deb" + elif [[ "$file_desc" =~ POSIX.*tar.*archive|tar\ archive ]]; then + archive_type="tar" + else + msg_error "Unsupported or unknown archive type: $file_desc" + rm -rf "$tmpdir" + return 1 + fi + + msg_info "Detected archive type: $archive_type (file type: $file_desc)" + + if [[ "$archive_type" == "deb" ]]; then + msg_info "Installing .deb package" + + chmod 644 "$tmpdir/$filename" + $STD apt install -y "$tmpdir/$filename" || { + $STD dpkg -i "$tmpdir/$filename" || { + msg_error "Both apt and dpkg installation failed" + rm -rf "$tmpdir" + return 1 + } + } + + rm -rf "$tmpdir" + msg_ok "Successfully installed .deb package" + return 0 + fi + + if [[ -z "$directory" ]]; then + msg_error "Directory parameter is required for archive extraction" + rm -rf "$tmpdir" + return 1 + fi + + msg_info "Extracting archive to $directory" + + mkdir -p "$directory" + + if [[ "${CLEAN_INSTALL:-0}" == "1" ]]; then + rm -rf "${directory:?}/"* + fi + + local unpack_tmp + unpack_tmp=$(mktemp -d) + + if [[ "$archive_type" == "zip" ]]; then + ensure_dependencies unzip + unzip -q "$tmpdir/$filename" -d "$unpack_tmp" || { + msg_error "Failed to extract ZIP archive" + rm -rf "$tmpdir" "$unpack_tmp" + return 1 + } + elif [[ "$archive_type" == "tar" ]]; then + tar --no-same-owner -xf "$tmpdir/$filename" -C "$unpack_tmp" || { + msg_error "Failed to extract TAR archive" + rm -rf "$tmpdir" "$unpack_tmp" + return 1 + } + fi + + local top_entries + top_entries=$(find "$unpack_tmp" -mindepth 1 -maxdepth 1) + + if [[ "$(echo "$top_entries" | wc -l)" -eq 1 && -d "$top_entries" ]]; then + local inner_dir="$top_entries" + shopt -s dotglob nullglob + if compgen -G "$inner_dir/*" >/dev/null; then + cp -r "$inner_dir"/* "$directory/" || { + msg_error "Failed to copy contents from $inner_dir to $directory" + rm -rf "$tmpdir" "$unpack_tmp" + return 1 + } + else + msg_error "Inner directory is empty: $inner_dir" + rm -rf "$tmpdir" "$unpack_tmp" + return 1 + fi + shopt -u dotglob nullglob + else + shopt -s dotglob nullglob + if compgen -G "$unpack_tmp/*" >/dev/null; then + cp -r "$unpack_tmp"/* "$directory/" || { + msg_error "Failed to copy contents to $directory" + rm -rf "$tmpdir" "$unpack_tmp" + return 1 + } + else + msg_error "Unpacked archive is empty" + rm -rf "$tmpdir" "$unpack_tmp" + return 1 + fi + shopt -u dotglob nullglob + fi + + rm -rf "$tmpdir" "$unpack_tmp" + msg_ok "Successfully deployed archive to $directory" + return 0 +} + +setup_nonfree() { + local sources_file="/etc/apt/sources.list.d/debian-nonfree.sources" + + if [ ! -f "$sources_file" ]; then + cat <$sources_file +Types: deb +URIs: http://deb.debian.org/debian +Suites: trixie trixie-updates +Components: main contrib non-free non-free-firmware + +Types: deb +URIs: http://security.debian.org/debian-security +Suites: trixie-security +Components: main contrib non-free non-free-firmware +EOF + fi + $STD apt update + return 0 +}