Compare commits

..

1 Commits

Author SHA1 Message Date
push-app-to-main[bot]
10b9919f8a Add shlink (ct) 2026-05-10 20:17:29 +00:00
20 changed files with 265 additions and 1379 deletions

View File

@@ -461,29 +461,6 @@ Exercise vigilance regarding copycat or coat-tailing sites that seek to exploit
</details>
## 2026-05-11
### 🆕 New Scripts
- Lychee ([#14424](https://github.com/community-scripts/ProxmoxVE/pull/14424))
### 🚀 Updated Scripts
- #### 🐞 Bug Fixes
- Termix: fix nginx pid path and log paths on update (#) [@MickLesk](https://github.com/MickLesk) ([#14419](https://github.com/community-scripts/ProxmoxVE/pull/14419))
- Nginxproxymanager: restore NPM nginx.conf after OpenResty rebuid [@MickLesk](https://github.com/MickLesk) ([#14421](https://github.com/community-scripts/ProxmoxVE/pull/14421))
- #### 🔧 Refactor
- InvestBrain: add commented reverse proxy config hints to .env [@MickLesk](https://github.com/MickLesk) ([#14422](https://github.com/community-scripts/ProxmoxVE/pull/14422))
### 🧰 Tools
- #### 🐞 Bug Fixes
- Cronmaster: fix unexpected EOF in update_cronmaster script [@MickLesk](https://github.com/MickLesk) ([#14420](https://github.com/community-scripts/ProxmoxVE/pull/14420))
## 2026-05-10
### 🚀 Updated Scripts
@@ -494,10 +471,6 @@ Exercise vigilance regarding copycat or coat-tailing sites that seek to exploit
- karakeep: Fix SERVER_VERSION update [@MickLesk](https://github.com/MickLesk) ([#14378](https://github.com/community-scripts/ProxmoxVE/pull/14378))
- inspIRCd: Fix service not autostarting [@tremor021](https://github.com/tremor021) ([#14368](https://github.com/community-scripts/ProxmoxVE/pull/14368))
- #### 🔧 Refactor
- refactor: webcheck [@CrazyWolf13](https://github.com/CrazyWolf13) ([#14391](https://github.com/community-scripts/ProxmoxVE/pull/14391))
### 💾 Core
- #### 🐞 Bug Fixes

View File

@@ -1,6 +0,0 @@
__ __
/ / __ _______/ /_ ___ ___
/ / / / / / ___/ __ \/ _ \/ _ \
/ /___/ /_/ / /__/ / / / __/ __/
/_____/\__, /\___/_/ /_/\___/\___/
/____/

View File

@@ -1,6 +0,0 @@
_____ ___ _________
/ ___/____ / (_)___/ /_ __(_)___ ___ ___
\__ \/ __ \/ / / __ / / / / / __ `__ \/ _ \
___/ / /_/ / / / /_/ / / / / / / / / / / __/
/____/\____/_/_/\__,_/ /_/ /_/_/ /_/ /_/\___/

View File

@@ -1,73 +0,0 @@
#!/usr/bin/env bash
source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/build.func)
# Copyright (c) 2021-2026 community-scripts ORG
# Author: MickLesk (CanbiZ)
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
# Source: https://github.com/LycheeOrg/Lychee
APP="Lychee"
var_tags="${var_tags:-media;photos;gallery}"
var_cpu="${var_cpu:-2}"
var_ram="${var_ram:-2048}"
var_disk="${var_disk:-8}"
var_os="${var_os:-debian}"
var_version="${var_version:-13}"
var_unprivileged="${var_unprivileged:-1}"
header_info "$APP"
variables
color
catch_errors
function update_script() {
header_info
check_container_storage
check_container_resources
if [[ ! -d /opt/lychee ]]; then
msg_error "No ${APP} Installation Found!"
exit
fi
if check_for_gh_release "lychee" "LycheeOrg/Lychee"; then
msg_info "Stopping Services"
systemctl stop caddy
msg_ok "Stopped Services"
msg_info "Backing up Data"
cp /opt/lychee/.env /opt/lychee.env.bak
cp -r /opt/lychee/storage /opt/lychee_storage_backup
msg_ok "Backed up Data"
CLEAN_INSTALL=1 fetch_and_deploy_gh_release "lychee" "LycheeOrg/Lychee" "prebuild" "latest" "/opt/lychee" "Lychee.zip"
msg_info "Restoring Data"
cp /opt/lychee.env.bak /opt/lychee/.env
rm -f /opt/lychee.env.bak
cp -r /opt/lychee_storage_backup/. /opt/lychee/storage
rm -rf /opt/lychee_storage_backup
msg_ok "Restored Data"
msg_info "Updating Application"
cd /opt/lychee
$STD php artisan migrate --force
$STD php artisan optimize:clear
chmod -R 775 /opt/lychee/storage /opt/lychee/bootstrap/cache
msg_ok "Updated Application"
msg_info "Starting Services"
systemctl start caddy
msg_ok "Started Services"
msg_ok "Updated successfully!"
fi
exit
}
start
build_container
description
msg_ok "Completed Successfully!\n"
echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}"
echo -e "${INFO}${YW} Access it using the following URL:${CL}"
echo -e "${TAB}${GATEWAY}${BGN}http://${IP}${CL}"

View File

@@ -92,11 +92,6 @@ ExecStart=/usr/local/openresty/nginx/sbin/nginx -g 'daemon off;'
[Install]
WantedBy=multi-user.target
EOF
if [ -f /opt/nginxproxymanager/docker/rootfs/etc/nginx/nginx.conf ]; then
cp /opt/nginxproxymanager/docker/rootfs/etc/nginx/nginx.conf /usr/local/openresty/nginx/conf/nginx.conf
sed -i 's+^daemon+#daemon+g' /usr/local/openresty/nginx/conf/nginx.conf
sed -i 's+include conf.d+include /etc/nginx/conf.d+g' /usr/local/openresty/nginx/conf/nginx.conf
fi
sed -i 's/user npm/user root/g; s/^pid/#pid/g' /usr/local/openresty/nginx/conf/nginx.conf
systemctl daemon-reload
systemctl unmask openresty 2>/dev/null || true

View File

@@ -1,77 +0,0 @@
#!/usr/bin/env bash
source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/build.func)
# Copyright (c) 2021-2026 community-scripts ORG
# Author: MickLesk (CanbiZ)
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
# Source: https://www.solidtime.io/
APP="SolidTime"
var_tags="${var_tags:-time-tracking;productivity;business}"
var_cpu="${var_cpu:-4}"
var_ram="${var_ram:-4096}"
var_disk="${var_disk:-8}"
var_os="${var_os:-debian}"
var_version="${var_version:-13}"
var_unprivileged="${var_unprivileged:-1}"
header_info "$APP"
variables
color
catch_errors
function update_script() {
header_info
check_container_storage
check_container_resources
if [[ ! -d /opt/solidtime ]]; then
msg_error "No ${APP} Installation Found!"
exit
fi
if check_for_gh_release "solidtime" "solidtime-io/solidtime"; then
msg_info "Stopping Services"
systemctl stop caddy
msg_ok "Stopped Services"
msg_info "Backing up Data"
cp /opt/solidtime/.env /opt/solidtime.env.bak
cp -r /opt/solidtime/storage /opt/solidtime_storage_backup
msg_ok "Backed up Data"
CLEAN_INSTALL=1 fetch_and_deploy_gh_release "solidtime" "solidtime-io/solidtime" "tarball"
msg_info "Restoring Data"
cp /opt/solidtime.env.bak /opt/solidtime/.env
rm -f /opt/solidtime.env.bak
cp -r /opt/solidtime_storage_backup/. /opt/solidtime/storage
rm -rf /opt/solidtime_storage_backup
msg_ok "Restored Data"
msg_info "Updating Application"
cd /opt/solidtime
$STD composer install --no-dev --optimize-autoloader
$STD npm install
$STD npm run build
$STD php artisan migrate --force
$STD php artisan optimize:clear
chown -R www-data:www-data /opt/solidtime
msg_ok "Updated Application"
msg_info "Starting Services"
systemctl start caddy
msg_ok "Started Services"
msg_ok "Updated successfully!"
fi
exit
}
start
build_container
description
msg_ok "Completed Successfully!\n"
echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}"
echo -e "${INFO}${YW} Access it using the following URL:${CL}"
echo -e "${TAB}${GATEWAY}${BGN}http://${IP}${CL}"
echo -e "${INFO}${YW}HTTPS is not enabled by default (use domain + reverse proxy/TLS if needed).${CL}"

View File

@@ -199,9 +199,7 @@ EOF
cp /etc/nginx/nginx.conf /etc/nginx/nginx.conf.bak
curl -fsSL "https://raw.githubusercontent.com/Termix-SSH/Termix/main/docker/nginx.conf" -o /etc/nginx/nginx.conf
sed -i '/^master_process/d' /etc/nginx/nginx.conf
sed -i 's|pid /tmp/nginx/nginx.pid;|pid /run/nginx.pid;|' /etc/nginx/nginx.conf
sed -i 's|error_log /tmp/nginx/error.log|error_log /var/log/nginx/error.log|' /etc/nginx/nginx.conf
sed -i 's|access_log /tmp/nginx/access.log|access_log /var/log/nginx/access.log|' /etc/nginx/nginx.conf
sed -i '/^pid \/app\/nginx/d' /etc/nginx/nginx.conf
sed -i 's|/app/html|/opt/termix/html|g' /etc/nginx/nginx.conf
sed -i 's|/app/nginx|/opt/termix/nginx|g' /etc/nginx/nginx.conf
sed -i 's|listen ${PORT};|listen 80;|g' /etc/nginx/nginx.conf

View File

@@ -28,7 +28,7 @@ function update_script() {
exit
fi
if check_for_gh_release "web-check" "Lissy93/web-check"; then
if check_for_gh_release "web-check" "CrazyWolf13/web-check"; then
msg_info "Stopping Service"
systemctl stop web-check
msg_ok "Stopped Service"
@@ -38,7 +38,7 @@ function update_script() {
msg_ok "Created backup"
NODE_VERSION="22" NODE_MODULE="yarn" setup_nodejs
CLEAN_INSTALL=1 fetch_and_deploy_gh_release "web-check" "Lissy93/web-check" "tarball"
CLEAN_INSTALL=1 fetch_and_deploy_gh_release "web-check" "CrazyWolf13/web-check" "tarball"
msg_info "Restoring backup"
mv /opt/.env /opt/web-check

View File

@@ -90,11 +90,6 @@ MAIL_PORT=2525
MAIL_FROM_ADDRESS="investbrain@${LOCAL_IP}"
VITE_APP_NAME=Investbrain
# Reverse Proxy Support (uncomment and set APP_URL/ASSET_URL to your domain when using a reverse proxy)
# APP_URL=https://your-domain.com
# ASSET_URL=https://your-domain.com
# TRUSTED_PROXIES=*
EOF
export COMPOSER_ALLOW_SUPERUSER=1
$STD /usr/local/bin/composer install --no-interaction --no-dev --optimize-autoloader

View File

@@ -1,75 +0,0 @@
#!/usr/bin/env bash
# Copyright (c) 2021-2026 community-scripts ORG
# Author: MickLesk (CanbiZ)
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
# Source: https://github.com/LycheeOrg/Lychee
source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
color
verb_ip6
catch_errors
setting_up_container
network_check
update_os
msg_info "Installing Dependencies"
$STD apt install -y \
caddy \
libimage-exiftool-perl \
jpegoptim
msg_ok "Installed Dependencies"
PHP_VERSION="8.4" PHP_FPM="YES" PHP_MODULE="bcmath,ldap,exif,gd,intl,imagick,redis,zip,pdo_pgsql,pcntl" setup_php
PG_VERSION="16" setup_postgresql
PG_DB_NAME="lychee" PG_DB_USER="lychee" setup_postgresql_db
setup_ffmpeg
setup_imagemagick
fetch_and_deploy_gh_release "lychee" "LycheeOrg/Lychee" "prebuild" "latest" "/opt/lychee" "Lychee.zip"
msg_info "Configuring Application"
cd /opt/lychee
cp .env.example .env
APP_KEY=$($STD php artisan key:generate --show)
sed -i "s|^APP_KEY=.*|APP_KEY=${APP_KEY}|" .env
sed -i "s|^APP_ENV=.*|APP_ENV=production|" .env
sed -i "s|^APP_DEBUG=.*|APP_DEBUG=false|" .env
sed -i "s|^APP_URL=.*|APP_URL=http://${LOCAL_IP}|" .env
sed -i "s|^DB_CONNECTION=.*|DB_CONNECTION=pgsql|" .env
sed -i "s|^DB_HOST=.*|DB_HOST=127.0.0.1|" .env
sed -i "s|^DB_PORT=.*|DB_PORT=5432|" .env
sed -i "s|^#\?DB_DATABASE=.*|DB_DATABASE=${PG_DB_NAME}|" .env
sed -i "s|^DB_USERNAME=.*|DB_USERNAME=${PG_DB_USER}|" .env
sed -i "s|^DB_PASSWORD=.*|DB_PASSWORD=${PG_DB_PASS}|" .env
mkdir -p storage/framework/{cache,sessions,views} storage/logs bootstrap/cache public/dist public/uploads public/sym
touch public/dist/user.css public/dist/custom.js
chmod -R 775 storage bootstrap/cache public/dist public/uploads public/sym
msg_ok "Configured Application"
msg_info "Running Database Migrations"
cd /opt/lychee
$STD php artisan migrate --force
msg_ok "Ran Database Migrations"
chown -R www-data:www-data /opt/lychee
msg_info "Configuring Caddy"
PHP_VER=$(php -r 'echo PHP_MAJOR_VERSION . "." . PHP_MINOR_VERSION;')
cat <<EOF >/etc/caddy/Caddyfile
:80 {
root * /opt/lychee/public
php_fastcgi unix//run/php/php${PHP_VER}-fpm.sock
file_server
encode gzip
}
EOF
usermod -aG www-data caddy
msg_ok "Configured Caddy"
systemctl enable -q --now php${PHP_VER}-fpm
systemctl restart caddy
motd_ssh
customize
cleanup_lxc

View File

@@ -1,86 +0,0 @@
#!/usr/bin/env bash
# Copyright (c) 2021-2026 community-scripts ORG
# Author: MickLesk (CanbiZ)
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
# Source: https://www.solidtime.io/
source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
color
verb_ip6
catch_errors
setting_up_container
network_check
update_os
msg_info "Installing Dependencies"
$STD apt install -y caddy
msg_ok "Installed Dependencies"
PHP_VERSION="8.3" PHP_FPM="YES" PHP_MODULES="bcmath,gd,intl,xml,zip,pdo_pgsql,redis,mbstring,curl" setup_php
setup_composer
NODE_VERSION="22" setup_nodejs
PG_VERSION="16" setup_postgresql
PG_DB_NAME="solidtime" PG_DB_USER="solidtime" setup_postgresql_db
fetch_and_deploy_gh_release "solidtime" "solidtime-io/solidtime" "tarball"
msg_info "Setting up SolidTime"
cd /opt/solidtime
cp .env.example .env
sed -i "s|^APP_ENV=.*|APP_ENV=production|" .env
sed -i "s|^APP_DEBUG=.*|APP_DEBUG=false|" .env
sed -i "s|^APP_URL=.*|APP_URL=http://${LOCAL_IP}|" .env
sed -i "s|^APP_ENABLE_REGISTRATION=.*|APP_ENABLE_REGISTRATION=true|" .env
sed -i "s|^DB_CONNECTION=.*|DB_CONNECTION=pgsql|" .env
sed -i "s|^DB_HOST=.*|DB_HOST=127.0.0.1|" .env
sed -i "s|^DB_PORT=.*|DB_PORT=5432|" .env
sed -i "s|^DB_DATABASE=.*|DB_DATABASE=${PG_DB_NAME}|" .env
sed -i "s|^DB_USERNAME=.*|DB_USERNAME=${PG_DB_USER}|" .env
sed -i "s|^DB_PASSWORD=.*|DB_PASSWORD=${PG_DB_PASS}|" .env
sed -i "s|^FILESYSTEM_DISK=.*|FILESYSTEM_DISK=local|" .env
sed -i "s|^PUBLIC_FILESYSTEM_DISK=.*|PUBLIC_FILESYSTEM_DISK=public|" .env
sed -i "s|^MAIL_MAILER=.*|MAIL_MAILER=log|" .env
sed -i "s|^SESSION_SECURE_COOKIE=.*|SESSION_SECURE_COOKIE=false|" .env
grep -q "^SESSION_SECURE_COOKIE=" .env || echo "SESSION_SECURE_COOKIE=false" >>.env
sed -i "s|^APP_FORCE_HTTPS=.*|APP_FORCE_HTTPS=false|" .env
grep -q "^APP_FORCE_HTTPS=" .env || echo "APP_FORCE_HTTPS=false" >>.env
$STD composer install --no-dev --optimize-autoloader
php artisan self-host:generate-keys >/tmp/solidtime.keys 2>/dev/null
while IFS= read -r line; do
KEY="${line%%=*}"
[[ -z "$KEY" || "${KEY:0:1}" == "#" ]] && continue
sed -i "/^${KEY}=/d" .env
echo "$line" >>.env
done </tmp/solidtime.keys
rm -f /tmp/solidtime.keys
$STD npm install
$STD npm run build
rm -rf node_modules
mkdir -p storage/framework/{cache,sessions,views} storage/logs bootstrap/cache
chown -R www-data:www-data /opt/solidtime
chmod -R 775 storage bootstrap/cache
$STD php artisan storage:link
$STD php artisan migrate --force
$STD php artisan passport:client --personal --name="API" -n
$STD php artisan optimize:clear
msg_ok "Set up SolidTime"
msg_info "Configuring Caddy"
PHP_VER=$(php -r 'echo PHP_MAJOR_VERSION . "." . PHP_MINOR_VERSION;')
cat <<EOF >/etc/caddy/Caddyfile
:80 {
root * /opt/solidtime/public
php_fastcgi unix//run/php/php${PHP_VER}-fpm.sock
file_server
encode gzip
}
EOF
usermod -aG www-data caddy
systemctl enable -q --now php${PHP_VER}-fpm
systemctl restart caddy
msg_ok "Configured Caddy"
motd_ssh
customize
cleanup_lxc

View File

@@ -18,10 +18,13 @@ export DEBIAN_FRONTEND=noninteractive
$STD apt -y install --no-install-recommends \
git \
traceroute \
build-essential \
make \
g++ \
traceroute \
xvfb \
dbus \
xorg \
xvfb \
gtk2-engines-pixbuf \
dbus-x11 \
xfonts-base \
@@ -40,13 +43,16 @@ rm -rf /usr/lib/python3.*/EXTERNALLY-MANAGED
msg_ok "Setup Python3"
msg_info "Installing Chromium"
setup_deb822_repo \
"google-chrome" \
"https://dl-ssl.google.com/linux/linux_signing_key.pub" \
"http://dl.google.com/linux/chrome/deb/" \
"stable" \
"main" \
"amd64"
curl -fsSL https://dl-ssl.google.com/linux/linux_signing_key.pub | gpg --dearmor -o /usr/share/keyrings/google-chrome-keyring.gpg
cat <<EOF | sudo tee /etc/apt/sources.list.d/google-chrome.sources >/dev/null
Types: deb
URIs: http://dl.google.com/linux/chrome/deb/
Suites: stable
Components: main
Architectures: amd64
Signed-By: /usr/share/keyrings/google-chrome-keyring.gpg
EOF
$STD apt update
$STD apt -y install \
chromium \
libxss1 \
@@ -58,14 +64,13 @@ msg_info "Setting up Chromium"
chmod 755 /usr/bin/chromium
msg_ok "Setup Chromium"
fetch_and_deploy_gh_release "web-check" "Lissy93/web-check" "tarball"
fetch_and_deploy_gh_release "web-check" "CrazyWolf13/web-check" "tarball"
msg_info "Installing Web-Check (Patience)"
cd /opt/web-check
cat <<'EOF' >/opt/web-check/.env
CHROME_PATH=/usr/bin/chromium
PUPPETEER_EXECUTABLE_PATH=/usr/bin/chromium
PUPPETEER_SKIP_DOWNLOAD='true'
HEADLESS=true
GOOGLE_CLOUD_API_KEY=''
REACT_APP_SHODAN_API_KEY=''

View File

@@ -196,7 +196,7 @@ explain_exit_code() {
103) echo "Validation: Shell is not Bash" ;;
104) echo "Validation: Not running as root (or invoked via sudo)" ;;
105) echo "Validation: Proxmox VE version not supported" ;;
106) echo "Validation: Unsupported architecture (requires amd64 or arm64)" ;;
106) echo "Validation: Architecture not supported (ARM / PiMox)" ;;
107) echo "Validation: Kernel key parameters unreadable" ;;
108) echo "Validation: Kernel key limits exceeded" ;;
109) echo "Proxmox: No available container ID after max attempts" ;;

View File

@@ -52,11 +52,6 @@ variables() {
# as "/tmp/${NSAPP}-${CTID}-${SESSION_ID}.log" (requires CTID, not available here)
CTTYPE="${CTTYPE:-${CT_TYPE:-1}}"
# ARM64 Template default variables
DEBIAN_DEFAULT_CODENAME="trixie"
UBUNTU_DEFAULT_CODENAME="noble"
ALPINE_DEFAULT_VERSION="3.23"
# Parse dev_mode early
parse_dev_mode
@@ -1067,7 +1062,6 @@ load_vars_file() {
var_gateway var_hostname var_ipv6_method var_mac var_mknod var_mount_fs var_mtu
var_net var_nesting var_ns var_os var_protection var_pw var_ram var_tags var_timezone var_tun var_unprivileged
var_verbose var_version var_vlan var_ssh var_ssh_authorized_key var_container_storage var_template_storage var_searchdomain
var_post_install
)
# Whitelist check helper
@@ -1285,7 +1279,6 @@ default_var_settings() {
var_gateway var_hostname var_ipv6_method var_mac var_mknod var_mount_fs var_mtu
var_net var_nesting var_ns var_os var_protection var_pw var_ram var_tags var_timezone var_tun var_unprivileged
var_verbose var_version var_vlan var_ssh var_ssh_authorized_key var_container_storage var_template_storage
var_post_install
)
# Snapshot: environment variables (highest precedence)
@@ -1381,11 +1374,6 @@ var_verbose=no
# GitHub Personal Access Token (optional avoids API rate limits during installs)
# Create at https://github.com/settings/tokens read-only public access is sufficient
# var_github_token=ghp_your_token_here
# Optional post-install script (host-side path to a *.sh on the Proxmox host)
# Runs ON THE HOST after the container is fully provisioned.
# Available env vars: APP, NSAPP, CTID, IP, HN, STORAGE, BRG
# var_post_install=/opt/post-install/myhook.sh
EOF
# Now choose storages (always prompt unless just one exists)
@@ -1464,7 +1452,6 @@ if ! declare -p VAR_WHITELIST >/dev/null 2>&1; then
var_gateway var_hostname var_ipv6_method var_mac var_mknod var_mount_fs var_mtu
var_net var_nesting var_ns var_os var_protection var_pw var_ram var_tags var_timezone var_tun var_unprivileged
var_verbose var_version var_vlan var_ssh var_ssh_authorized_key var_container_storage var_template_storage var_searchdomain
var_post_install
)
fi
@@ -1677,7 +1664,6 @@ _build_current_app_vars_tmp() {
[ -n "$_tpl_storage" ] && echo "var_template_storage=$(_sanitize_value "$_tpl_storage")"
[ -n "$_ct_storage" ] && echo "var_container_storage=$(_sanitize_value "$_ct_storage")"
[ -n "${var_post_install:-}" ] && echo "var_post_install=$(_sanitize_value "${var_post_install}")"
} >"$tmpf"
echo "$tmpf"
@@ -1822,7 +1808,7 @@ advanced_settings() {
TAGS="community-script${var_tags:+;${var_tags}}"
fi
local STEP=1
local MAX_STEP=29
local MAX_STEP=28
# Store values for back navigation - inherit from var_* app defaults
local _ct_type="${var_unprivileged:-1}"
@@ -1856,7 +1842,6 @@ advanced_settings() {
local _enable_mknod="${var_mknod:-0}"
local _mount_fs="${var_mount_fs:-}"
local _protect_ct="${var_protection:-no}"
local _post_install="${var_post_install:-}"
# Detect host timezone for default (if not set via var_timezone)
local _host_timezone=""
@@ -1947,7 +1932,7 @@ advanced_settings() {
# ═══════════════════════════════════════════════════════════════════════════
# STEP 2: Root Password
# ═══════════════════════════════════════════════════════════════════════════
# ════════════════════════════════════════<EFBFBD><EFBFBD><EFBFBD>═══════════════════════════════<EFBFBD><EFBFBD><EFBFBD>══
2)
if PW1=$(whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \
--title "ROOT PASSWORD" \
@@ -2714,61 +2699,9 @@ advanced_settings() {
;;
# ═══════════════════════════════════════════════════════════════════════════
# STEP 28: Optional host-side post-install hook (path on the Proxmox HOST)
# STEP 28: Verbose Mode & Confirmation
# ═══════════════════════════════════════════════════════════════════════════
28)
local _hook_prompt="Optional: absolute path to a *.sh file ON THE PROXMOX HOST.
It runs as root on the HOST (NOT in the LXC) after the container
is fully provisioned and started.
Available env vars: APP, NSAPP, CTID, IP, HN, STORAGE, BRG.
Leave empty to skip."
while true; do
if result=$(whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \
--title "POST-INSTALL HOOK (HOST)" \
--ok-button "Next" --cancel-button "Back" \
--inputbox "$_hook_prompt" 16 70 "${_post_install}" \
3>&1 1>&2 2>&3); then
# Normalize: strip surrounding whitespace
result="$(printf '%s' "$result" | sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//')"
if [[ -z "$result" ]]; then
_post_install=""
((STEP++))
break
fi
# Reject obvious shell-meta sneaking through
if [[ "$result" == *';'* || "$result" == *'$('* || "$result" == *'`'* || "$result" == *'&&'* || "$result" == *'||'* ]]; then
whiptail --backtitle "Proxmox VE Helper Scripts" --title "INVALID PATH" \
--msgbox "Path contains shell metacharacters. Please provide a plain absolute file path." 10 70
continue
fi
if [[ "$result" != /* ]]; then
whiptail --backtitle "Proxmox VE Helper Scripts" --title "INVALID PATH" \
--msgbox "Path must be absolute (start with /).\n\nGot: $result" 10 70
continue
fi
if [[ ! -f "$result" ]]; then
if ! whiptail --backtitle "Proxmox VE Helper Scripts" --title "FILE NOT FOUND" \
--yesno "File does not exist on host:\n\n$result\n\nKeep this path anyway?" 12 70; then
continue
fi
fi
_post_install="$result"
((STEP++))
break
else
((STEP--))
break
fi
done
;;
# ═══════════════════════════════════════════════════════════════════════════
# STEP 29: Verbose Mode & Confirmation
# ═══════════════════════════════════════════════════════════════════════════
29)
local verbose_default_flag="--defaultno"
[[ "$_verbose" == "yes" ]] && verbose_default_flag=""
@@ -2797,11 +2730,6 @@ Leave empty to skip."
local apt_display="${_apt_cacher:-no}"
[[ "$_apt_cacher" == "yes" && -n "$_apt_cacher_ip" ]] && apt_display="$_apt_cacher_ip"
local post_install_display="${_post_install:-(none)}"
local post_install_warn=""
[[ -n "$_post_install" ]] && post_install_warn="
⚠ Hook runs as root on Proxmox HOST (not in LXC)"
local summary="Container Type: $ct_type_desc
Container ID: $_ct_id
Hostname: $_hostname
@@ -2825,8 +2753,7 @@ Features:
Advanced:
Timezone: $tz_display
APT Cacher: $apt_display
Verbose: $_verbose
Post-Install Script: ${post_install_display}${post_install_warn}"
Verbose: $_verbose"
if whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \
--title "CONFIRM SETTINGS" \
@@ -2869,7 +2796,6 @@ Advanced:
APT_CACHER="$_apt_cacher"
APT_CACHER_IP="$_apt_cacher_ip"
VERBOSE="$_verbose"
var_post_install="$_post_install"
# Update var_* based on user choice (for functions that check these)
var_gpu="$_enable_gpu"
@@ -3057,9 +2983,6 @@ echo_default() {
echo -e "${DISKSIZE}${BOLD}${DGN}Disk Size: ${BGN}${DISK_SIZE} GB${CL}"
echo -e "${CPUCORE}${BOLD}${DGN}CPU Cores: ${BGN}${CORE_COUNT}${CL}"
echo -e "${RAMSIZE}${BOLD}${DGN}RAM Size: ${BGN}${RAM_SIZE} MiB${CL}"
if [[ "$(dpkg --print-architecture)" == "arm64" ]]; then
echo -e "${INFO}${BOLD}${DGN}Architecture: ${BGN}arm64${CL}"
fi
if [[ -n "${var_gpu:-}" && "${var_gpu}" == "yes" ]]; then
echo -e "${GPU}${BOLD}${DGN}GPU Passthrough: ${BGN}Enabled${CL}"
fi
@@ -3096,9 +3019,7 @@ install_script() {
pve_check
shell_check
root_check
ensure_whiptail
arch_check
arm64_notice
ssh_check
maxkeys_check
diagnostics_check
@@ -4361,23 +4282,19 @@ EOF
msg_warn "Skipping timezone setup zone '$tz' not found in container"
fi
local _base_pkgs="sudo curl mc gnupg2 jq"
if [[ "${ARCH:-amd64}" == "arm64" ]]; then
_base_pkgs+=" openssh-server wget gcc"
fi
# Detect broken DNS resolver (e.g. Tailscale MagicDNS) and inject public DNS
if ! pct exec "$CTID" -- bash -c "getent hosts deb.debian.org >/dev/null 2>&1 && getent hosts archive.ubuntu.com >/dev/null 2>&1"; then
msg_warn "APT repository DNS resolution failed in container, injecting public DNS servers"
pct exec "$CTID" -- bash -c "echo -e 'nameserver 8.8.8.8\nnameserver 1.1.1.1' >/etc/resolv.conf"
fi
pct exec "$CTID" -- bash -c "apt-get update 2>&1 && apt-get install -y ${_base_pkgs} 2>&1" >>"$BUILD_LOG" 2>&1 || {
pct exec "$CTID" -- bash -c "apt-get update 2>&1 && apt-get install -y sudo curl mc gnupg2 jq 2>&1" >>"$BUILD_LOG" 2>&1 || {
local failed_mirror
failed_mirror=$(pct exec "$CTID" -- bash -c "grep -m1 -oP '(?<=URIs: https?://)[^/]+' /etc/apt/sources.list.d/debian.sources 2>/dev/null || grep -m1 -oP '(?<=deb https?://)[^/]+' /etc/apt/sources.list 2>/dev/null" 2>/dev/null || echo "unknown")
msg_warn "apt-get update failed (${failed_mirror}), trying alternate mirrors..."
local mirror_exit=0
pct exec "$CTID" -- env APT_BASE="$_base_pkgs" bash -c '
pct exec "$CTID" -- bash -c '
APT_BASE="sudo curl mc gnupg2 jq"
DISTRO=$(. /etc/os-release 2>/dev/null && echo "$ID" || echo "debian")
if [ "$DISTRO" = "ubuntu" ]; then
@@ -4487,7 +4404,7 @@ EOF
[ -f \"\$src\" ] && sed -i \"s|URIs: http[s]*://[^/]*/|URIs: http://${custom_mirror}/|g; s|deb http[s]*://[^/]*/|deb http://${custom_mirror}/|g\" \"\$src\"
done
rm -rf /var/lib/apt/lists/*
apt-get update >/dev/null 2>&1 && apt-get install -y ${_base_pkgs} >/dev/null 2>&1
apt-get update >/dev/null 2>&1 && apt-get install -y sudo curl mc gnupg2 jq >/dev/null 2>&1
" && break
msg_warn "Mirror '${custom_mirror}' also failed. Try another or type 'skip'."
done
@@ -4527,9 +4444,7 @@ EOF
# that sends "configuring" status AFTER the host already reported "failed"
export CONTAINER_INSTALLING=true
local _install_script
_install_script="$(curl -fsSL "https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/install/${var_install}.sh")"
lxc-attach -n "$CTID" -- bash -c "$_install_script"
lxc-attach -n "$CTID" -- bash -c "$(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/install/${var_install}.sh)"
local lxc_exit=$?
unset CONTAINER_INSTALLING
@@ -4924,9 +4839,7 @@ EOF
# Re-run install script in existing container (don't destroy/recreate)
set +Eeuo pipefail
trap - ERR
local _install_script
_install_script="$(curl -fsSL "https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/install/${var_install}.sh")"
lxc-attach -n "$CTID" -- bash -c "$_install_script"
lxc-attach -n "$CTID" -- bash -c "$(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/install/${var_install}.sh)"
local apt_retry_exit=$?
set -Eeuo pipefail
trap 'error_handler' ERR
@@ -5684,72 +5597,6 @@ create_lxc_container() {
esac
}
ARCH="$(dpkg --print-architecture)"
# Maps OS type + version to the release variant name used by ARM64 template sources.
arm64_template_variant() {
case "$1:$2" in
debian:12) echo "bookworm" ;;
debian:13) echo "trixie" ;;
debian:) echo "$DEBIAN_DEFAULT_CODENAME" ;;
ubuntu:24.04) echo "noble" ;;
ubuntu:26.04) echo "questing" ;;
ubuntu:) echo "$UBUNTU_DEFAULT_CODENAME" ;;
alpine:*) echo "${2:-$ALPINE_DEFAULT_VERSION}" ;;
*) return 1 ;;
esac
}
# Downloads an ARM64 LXC rootfs template to $1.
# Debian: fetches latest release from community-scripts/debian-arm64-lxc on GitHub.
# Others: fetches from jenkins.linuxcontainers.org.
download_arm64_template() {
local dest="$1" url
mkdir -p "$(dirname "$dest")" || {
msg_error "Cannot create template dir."
exit 207
}
if [[ "$PCT_OSTYPE" == "debian" ]]; then
url=$(
curl -fsSL "https://api.github.com/repos/community-scripts/debian-arm64-lxc/releases/latest" |
jq -r --arg v "$CUSTOM_TEMPLATE_VARIANT" \
'.assets[].browser_download_url | select(test("debian-" + $v + "-arm64-rootfs\\.tar\\.xz$"))' |
head -n1
)
[[ -n "$url" ]] || {
msg_error "Could not find Debian ${CUSTOM_TEMPLATE_VARIANT} ARM64 template URL."
exit 207
}
else
url="https://jenkins.linuxcontainers.org/job/image-${PCT_OSTYPE}/architecture=arm64,release=${CUSTOM_TEMPLATE_VARIANT},variant=default/lastStableBuild/artifact/rootfs.tar.xz"
fi
msg_info "Downloading ${PCT_OSTYPE^} ${CUSTOM_TEMPLATE_VARIANT} ARM64 template"
if ! curl -fsSL -o "$dest" "$url"; then
msg_error "Failed to download ARM64 template from: $url"
exit 208
fi
msg_ok "Downloaded ARM64 LXC template"
}
download_template() {
local dest="${1:-$TEMPLATE_PATH}"
if [[ "$ARCH" == "arm64" ]]; then
download_arm64_template "$dest"
else
pveam download "$TEMPLATE_STORAGE" "$TEMPLATE" >>"${BUILD_LOG:-/dev/null}" 2>&1 || {
msg_error "Failed to download template '$TEMPLATE' to storage '$TEMPLATE_STORAGE'"
exit 222
}
fi
}
# ------------------------------------------------------------------------------
# Required input variables
# ------------------------------------------------------------------------------
@@ -5916,120 +5763,153 @@ create_lxc_container() {
# ------------------------------------------------------------------------------
# Template discovery & validation
# ------------------------------------------------------------------------------
CUSTOM_TEMPLATE_VARIANT=""
TEMPLATE_SEARCH="${PCT_OSTYPE}-${PCT_OSVERSION:-}"
case "$PCT_OSTYPE" in
debian | ubuntu) TEMPLATE_PATTERN="-standard_" ;;
alpine | fedora | rocky | centos) TEMPLATE_PATTERN="-default_" ;;
*) TEMPLATE_PATTERN="" ;;
esac
if [[ "$ARCH" == "arm64" ]]; then
# ARM64: use custom template download from linuxcontainers.org / GitHub
msg_info "Preparing ARM64 template"
msg_info "Searching for template '$TEMPLATE_SEARCH'"
CUSTOM_TEMPLATE_VARIANT=$(arm64_template_variant "$PCT_OSTYPE" "${PCT_OSVERSION:-}") || {
msg_error "No ARM64 template mapping for ${PCT_OSTYPE} ${PCT_OSVERSION:-latest}"
exit 207
}
# Initialize variables
ONLINE_TEMPLATE=""
ONLINE_TEMPLATES=()
TEMPLATE="${PCT_OSTYPE}-${CUSTOM_TEMPLATE_VARIANT}-rootfs.tar.xz"
TEMPLATE_SOURCE="custom-arm64"
# Resolve template path
TEMPLATE_PATH="$(pvesm path "${TEMPLATE_STORAGE}:vztmpl/${TEMPLATE}" 2>/dev/null || true)"
if [[ -z "$TEMPLATE_PATH" ]]; then
local _tpl_base
_tpl_base=$(awk -v s="$TEMPLATE_STORAGE" '
$0 ~ "^[^:]+:[[:space:]]*" s "$" {f=1; next}
f && /^[^[:space:]]/ {f=0}
f && $1 == "path" {print $2; exit}
' /etc/pve/storage.cfg)
TEMPLATE_PATH="${_tpl_base:-/var/lib/vz}/template/cache/$TEMPLATE"
fi
# Download if missing, too small, or corrupt
if [[ ! -f "$TEMPLATE_PATH" ]]; then
download_arm64_template "$TEMPLATE_PATH"
elif [[ "$(stat -c%s "$TEMPLATE_PATH")" -lt 1000000 ]] || ! tar -tf "$TEMPLATE_PATH" &>/dev/null; then
msg_warn "Local template invalid - re-downloading."
rm -f "$TEMPLATE_PATH"
download_arm64_template "$TEMPLATE_PATH"
else
msg_ok "Template ${BL}$TEMPLATE${CL} found locally."
fi
# Step 1: Check local templates first (instant)
mapfile -t LOCAL_TEMPLATES < <(
pveam list "$TEMPLATE_STORAGE" 2>/dev/null |
awk -v search="${TEMPLATE_SEARCH}" -v pattern="${TEMPLATE_PATTERN}" '$1 ~ search && $1 ~ pattern {print $1}' |
sed 's|.*/||' | sort -t - -k 2 -V
)
# Step 2: If local template found, use it immediately (skip pveam update)
if [[ ${#LOCAL_TEMPLATES[@]} -gt 0 ]]; then
TEMPLATE="${LOCAL_TEMPLATES[-1]}"
TEMPLATE_SOURCE="local"
msg_ok "Template search completed"
else
TEMPLATE_SEARCH="${PCT_OSTYPE}-${PCT_OSVERSION:-}"
case "$PCT_OSTYPE" in
debian | ubuntu) TEMPLATE_PATTERN="-standard_" ;;
alpine | fedora | rocky | centos) TEMPLATE_PATTERN="-default_" ;;
*) TEMPLATE_PATTERN="" ;;
esac
# Step 3: No local template - need to check online (this may be slow)
msg_info "No local template found, checking online catalog..."
msg_info "Searching for template '$TEMPLATE_SEARCH'"
# Update catalog with timeout to prevent long hangs
if command -v timeout &>/dev/null; then
if ! timeout 30 pveam update >/dev/null 2>&1; then
msg_warn "Template catalog update timed out (possible network/DNS issue). Run 'pveam update' manually to diagnose."
fi
else
pveam update >/dev/null 2>&1 || msg_warn "Could not update template catalog (pveam update failed)"
fi
# Initialize variables
ONLINE_TEMPLATE=""
ONLINE_TEMPLATES=()
mapfile -t ONLINE_TEMPLATES < <(pveam available -section system 2>/dev/null | grep -E '\.(tar\.zst|tar\.xz|tar\.gz)$' | awk '{print $2}' | grep -E "^${TEMPLATE_SEARCH}.*${TEMPLATE_PATTERN}" | sort -t - -k 2 -V 2>/dev/null || true)
[[ ${#ONLINE_TEMPLATES[@]} -gt 0 ]] && ONLINE_TEMPLATE="${ONLINE_TEMPLATES[-1]}"
# Step 1: Check local templates first (instant)
mapfile -t LOCAL_TEMPLATES < <(
pveam list "$TEMPLATE_STORAGE" 2>/dev/null |
awk -v search="${TEMPLATE_SEARCH}" -v pattern="${TEMPLATE_PATTERN}" '$1 ~ search && $1 ~ pattern {print $1}' |
sed 's|.*/||' | sort -t - -k 2 -V
TEMPLATE="$ONLINE_TEMPLATE"
TEMPLATE_SOURCE="online"
msg_ok "Template search completed"
fi
# If still no template, try to find alternatives
if [[ -z "$TEMPLATE" ]]; then
msg_warn "No template found for ${PCT_OSTYPE} ${PCT_OSVERSION}, searching for alternatives..."
# Get all available versions for this OS type
AVAILABLE_VERSIONS=()
mapfile -t AVAILABLE_VERSIONS < <(
pveam available -section system 2>/dev/null |
grep -E '\.(tar\.zst|tar\.xz|tar\.gz)$' |
awk -F'\t' '{print $1}' |
grep "^${PCT_OSTYPE}-" |
sed -E "s/.*${PCT_OSTYPE}-([0-9]+(\.[0-9]+)?).*/\1/" |
sort -u -V 2>/dev/null
)
# Step 2: If local template found, use it immediately (skip pveam update)
if [[ ${#LOCAL_TEMPLATES[@]} -gt 0 ]]; then
TEMPLATE="${LOCAL_TEMPLATES[-1]}"
TEMPLATE_SOURCE="local"
msg_ok "Template search completed"
else
# Step 3: No local template - need to check online (this may be slow)
msg_info "No local template found, checking online catalog..."
if [[ ${#AVAILABLE_VERSIONS[@]} -gt 0 ]]; then
echo ""
echo "${BL}Available ${PCT_OSTYPE} versions:${CL}"
for i in "${!AVAILABLE_VERSIONS[@]}"; do
echo " [$((i + 1))] ${AVAILABLE_VERSIONS[$i]}"
done
echo ""
read -p "Select version [1-${#AVAILABLE_VERSIONS[@]}] or press Enter to cancel: " choice </dev/tty
# Update catalog with timeout to prevent long hangs
if command -v timeout &>/dev/null; then
if ! timeout 30 pveam update >/dev/null 2>&1; then
msg_warn "Template catalog update timed out (possible network/DNS issue). Run 'pveam update' manually to diagnose."
if [[ "$choice" =~ ^[0-9]+$ ]] && [[ "$choice" -ge 1 ]] && [[ "$choice" -le ${#AVAILABLE_VERSIONS[@]} ]]; then
PCT_OSVERSION="${AVAILABLE_VERSIONS[$((choice - 1))]}"
TEMPLATE_SEARCH="${PCT_OSTYPE}-${PCT_OSVERSION}"
ONLINE_TEMPLATES=()
mapfile -t ONLINE_TEMPLATES < <(
pveam available -section system 2>/dev/null |
grep -E '\.(tar\.zst|tar\.xz|tar\.gz)$' |
awk '{print $2}' |
grep -E "^${TEMPLATE_SEARCH}-.*${TEMPLATE_PATTERN}" |
sort -t - -k 2 -V 2>/dev/null || true
)
if [[ ${#ONLINE_TEMPLATES[@]} -gt 0 ]]; then
TEMPLATE="${ONLINE_TEMPLATES[-1]}"
TEMPLATE_SOURCE="online"
else
msg_error "No templates available for ${PCT_OSTYPE} ${PCT_OSVERSION}"
exit 225
fi
else
pveam update >/dev/null 2>&1 || msg_warn "Could not update template catalog (pveam update failed)"
msg_custom "🚫" "${YW}" "Installation cancelled"
exit 0
fi
ONLINE_TEMPLATES=()
mapfile -t ONLINE_TEMPLATES < <(pveam available -section system 2>/dev/null | grep -E '\.(tar\.zst|tar\.xz|tar\.gz)$' | awk '{print $2}' | grep -E "^${TEMPLATE_SEARCH}.*${TEMPLATE_PATTERN}" | sort -t - -k 2 -V 2>/dev/null || true)
[[ ${#ONLINE_TEMPLATES[@]} -gt 0 ]] && ONLINE_TEMPLATE="${ONLINE_TEMPLATES[-1]}"
TEMPLATE="$ONLINE_TEMPLATE"
TEMPLATE_SOURCE="online"
msg_ok "Template search completed"
else
msg_error "No ${PCT_OSTYPE} templates available at all"
exit 225
fi
fi
# If still no template, try to find alternatives
TEMPLATE_PATH="$(pvesm path $TEMPLATE_STORAGE:vztmpl/$TEMPLATE 2>/dev/null || true)"
if [[ -z "$TEMPLATE_PATH" ]]; then
TEMPLATE_BASE=$(awk -v s="$TEMPLATE_STORAGE" '$1==s {f=1} f && /path/ {print $2; exit}' /etc/pve/storage.cfg)
[[ -n "$TEMPLATE_BASE" ]] && TEMPLATE_PATH="$TEMPLATE_BASE/template/cache/$TEMPLATE"
fi
# If we still don't have a path but have a valid template name, construct it
if [[ -z "$TEMPLATE_PATH" && -n "$TEMPLATE" ]]; then
TEMPLATE_PATH="/var/lib/vz/template/cache/$TEMPLATE"
fi
[[ -n "$TEMPLATE_PATH" ]] || {
if [[ -z "$TEMPLATE" ]]; then
msg_warn "No template found for ${PCT_OSTYPE} ${PCT_OSVERSION}, searching for alternatives..."
msg_error "Template ${PCT_OSTYPE} ${PCT_OSVERSION} not available"
# Get all available versions for this OS type
AVAILABLE_VERSIONS=()
# Get available versions
mapfile -t AVAILABLE_VERSIONS < <(
pveam available -section system 2>/dev/null |
grep -E '\.(tar\.zst|tar\.xz|tar\.gz)$' |
awk -F'\t' '{print $1}' |
grep "^${PCT_OSTYPE}-" |
sed -E "s/.*${PCT_OSTYPE}-([0-9]+(\.[0-9]+)?).*/\1/" |
sort -u -V 2>/dev/null
sed -E 's/.*'"${PCT_OSTYPE}"'-([0-9]+\.[0-9]+).*/\1/' |
grep -E '^[0-9]+\.[0-9]+$' |
sort -u -V 2>/dev/null || sort -u
)
if [[ ${#AVAILABLE_VERSIONS[@]} -gt 0 ]]; then
echo ""
echo "${BL}Available ${PCT_OSTYPE} versions:${CL}"
echo -e "\n${BL}Available versions:${CL}"
for i in "${!AVAILABLE_VERSIONS[@]}"; do
echo " [$((i + 1))] ${AVAILABLE_VERSIONS[$i]}"
done
echo ""
read -p "Select version [1-${#AVAILABLE_VERSIONS[@]}] or press Enter to cancel: " choice </dev/tty
read -p "Select version [1-${#AVAILABLE_VERSIONS[@]}] or Enter to exit: " choice </dev/tty
if [[ "$choice" =~ ^[0-9]+$ ]] && [[ "$choice" -ge 1 ]] && [[ "$choice" -le ${#AVAILABLE_VERSIONS[@]} ]]; then
PCT_OSVERSION="${AVAILABLE_VERSIONS[$((choice - 1))]}"
TEMPLATE_SEARCH="${PCT_OSTYPE}-${PCT_OSVERSION}"
export var_version="${AVAILABLE_VERSIONS[$((choice - 1))]}"
export PCT_OSVERSION="$var_version"
msg_ok "Switched to ${PCT_OSTYPE} ${var_version}"
ONLINE_TEMPLATES=()
# Retry template search with new version
TEMPLATE_SEARCH="${PCT_OSTYPE}-${PCT_OSVERSION:-}"
mapfile -t LOCAL_TEMPLATES < <(
pveam list "$TEMPLATE_STORAGE" 2>/dev/null |
awk -v search="${TEMPLATE_SEARCH}-" -v pattern="${TEMPLATE_PATTERN}" '$1 ~ search && $1 ~ pattern {print $1}' |
sed 's|.*/||' | sort -t - -k 2 -V
)
mapfile -t ONLINE_TEMPLATES < <(
pveam available -section system 2>/dev/null |
grep -E '\.(tar\.zst|tar\.xz|tar\.gz)$' |
@@ -6037,181 +5917,109 @@ create_lxc_container() {
grep -E "^${TEMPLATE_SEARCH}-.*${TEMPLATE_PATTERN}" |
sort -t - -k 2 -V 2>/dev/null || true
)
ONLINE_TEMPLATE=""
[[ ${#ONLINE_TEMPLATES[@]} -gt 0 ]] && ONLINE_TEMPLATE="${ONLINE_TEMPLATES[-1]}"
if [[ ${#ONLINE_TEMPLATES[@]} -gt 0 ]]; then
TEMPLATE="${ONLINE_TEMPLATES[-1]}"
TEMPLATE_SOURCE="online"
if [[ ${#LOCAL_TEMPLATES[@]} -gt 0 ]]; then
TEMPLATE="${LOCAL_TEMPLATES[-1]}"
TEMPLATE_SOURCE="local"
else
msg_error "No templates available for ${PCT_OSTYPE} ${PCT_OSVERSION}"
exit 225
TEMPLATE="$ONLINE_TEMPLATE"
TEMPLATE_SOURCE="online"
fi
TEMPLATE_PATH="$(pvesm path $TEMPLATE_STORAGE:vztmpl/$TEMPLATE 2>/dev/null || true)"
if [[ -z "$TEMPLATE_PATH" ]]; then
TEMPLATE_BASE=$(awk -v s="$TEMPLATE_STORAGE" '$1==s {f=1} f && /path/ {print $2; exit}' /etc/pve/storage.cfg)
[[ -n "$TEMPLATE_BASE" ]] && TEMPLATE_PATH="$TEMPLATE_BASE/template/cache/$TEMPLATE"
fi
# If we still don't have a path but have a valid template name, construct it
if [[ -z "$TEMPLATE_PATH" && -n "$TEMPLATE" ]]; then
TEMPLATE_PATH="/var/lib/vz/template/cache/$TEMPLATE"
fi
[[ -n "$TEMPLATE_PATH" ]] || {
msg_error "Template still not found after version change"
exit 220
}
else
msg_custom "🚫" "${YW}" "Installation cancelled"
exit 0
fi
else
msg_error "No ${PCT_OSTYPE} templates available"
exit 225
exit 220
fi
fi
}
TEMPLATE_PATH="$(pvesm path $TEMPLATE_STORAGE:vztmpl/$TEMPLATE 2>/dev/null || true)"
if [[ -z "$TEMPLATE_PATH" ]]; then
TEMPLATE_BASE=$(awk -v s="$TEMPLATE_STORAGE" '$1==s {f=1} f && /path/ {print $2; exit}' /etc/pve/storage.cfg)
[[ -n "$TEMPLATE_BASE" ]] && TEMPLATE_PATH="$TEMPLATE_BASE/template/cache/$TEMPLATE"
fi
# Validate that we found a template
if [[ -z "$TEMPLATE" ]]; then
msg_error "No template found for ${PCT_OSTYPE} ${PCT_OSVERSION}"
msg_custom "" "${YW}" "Please check:"
msg_custom " •" "${YW}" "Is pveam catalog available? (run: pveam available -section system)"
msg_custom " •" "${YW}" "Does the template exist for your OS version?"
exit 225
fi
# If we still don't have a path but have a valid template name, construct it
if [[ -z "$TEMPLATE_PATH" && -n "$TEMPLATE" ]]; then
TEMPLATE_PATH="/var/lib/vz/template/cache/$TEMPLATE"
fi
msg_ok "Template ${BL}$TEMPLATE${CL} [$TEMPLATE_SOURCE]"
msg_debug "Resolved TEMPLATE_PATH=$TEMPLATE_PATH"
[[ -n "$TEMPLATE_PATH" ]] || {
if [[ -z "$TEMPLATE" ]]; then
msg_error "Template ${PCT_OSTYPE} ${PCT_OSVERSION} not available"
# Get available versions
mapfile -t AVAILABLE_VERSIONS < <(
pveam available -section system 2>/dev/null |
grep "^${PCT_OSTYPE}-" |
sed -E 's/.*'"${PCT_OSTYPE}"'-([0-9]+\.[0-9]+).*/\1/' |
grep -E '^[0-9]+\.[0-9]+$' |
sort -u -V 2>/dev/null || sort -u
)
if [[ ${#AVAILABLE_VERSIONS[@]} -gt 0 ]]; then
echo -e "\n${BL}Available versions:${CL}"
for i in "${!AVAILABLE_VERSIONS[@]}"; do
echo " [$((i + 1))] ${AVAILABLE_VERSIONS[$i]}"
done
echo ""
read -p "Select version [1-${#AVAILABLE_VERSIONS[@]}] or press Enter to exit: " choice </dev/tty
if [[ "$choice" =~ ^[0-9]+$ ]] && [[ "$choice" -ge 1 ]] && [[ "$choice" -le ${#AVAILABLE_VERSIONS[@]} ]]; then
export var_version="${AVAILABLE_VERSIONS[$((choice - 1))]}"
export PCT_OSVERSION="$var_version"
msg_ok "Switched to ${PCT_OSTYPE} ${var_version}"
# Retry template search with new version
TEMPLATE_SEARCH="${PCT_OSTYPE}-${PCT_OSVERSION:-}"
mapfile -t LOCAL_TEMPLATES < <(
pveam list "$TEMPLATE_STORAGE" 2>/dev/null |
awk -v search="${TEMPLATE_SEARCH}-" -v pattern="${TEMPLATE_PATTERN}" '$1 ~ search && $1 ~ pattern {print $1}' |
sed 's|.*/||' | sort -t - -k 2 -V
)
mapfile -t ONLINE_TEMPLATES < <(
pveam available -section system 2>/dev/null |
grep -E '\.(tar\.zst|tar\.xz|tar\.gz)$' |
awk '{print $2}' |
grep -E "^${TEMPLATE_SEARCH}-.*${TEMPLATE_PATTERN}" |
sort -t - -k 2 -V 2>/dev/null || true
)
ONLINE_TEMPLATE=""
[[ ${#ONLINE_TEMPLATES[@]} -gt 0 ]] && ONLINE_TEMPLATE="${ONLINE_TEMPLATES[-1]}"
if [[ ${#LOCAL_TEMPLATES[@]} -gt 0 ]]; then
TEMPLATE="${LOCAL_TEMPLATES[-1]}"
TEMPLATE_SOURCE="local"
else
TEMPLATE="$ONLINE_TEMPLATE"
TEMPLATE_SOURCE="online"
fi
TEMPLATE_PATH="$(pvesm path $TEMPLATE_STORAGE:vztmpl/$TEMPLATE 2>/dev/null || true)"
if [[ -z "$TEMPLATE_PATH" ]]; then
TEMPLATE_BASE=$(awk -v s="$TEMPLATE_STORAGE" '$1==s {f=1} f && /path/ {print $2; exit}' /etc/pve/storage.cfg)
[[ -n "$TEMPLATE_BASE" ]] && TEMPLATE_PATH="$TEMPLATE_BASE/template/cache/$TEMPLATE"
fi
# If we still don't have a path but have a valid template name, construct it
if [[ -z "$TEMPLATE_PATH" && -n "$TEMPLATE" ]]; then
TEMPLATE_PATH="/var/lib/vz/template/cache/$TEMPLATE"
fi
[[ -n "$TEMPLATE_PATH" ]] || {
msg_error "Template still not found after version change"
exit 220
}
else
msg_custom "🚫" "${YW}" "Installation cancelled"
exit 0
fi
else
msg_error "No ${PCT_OSTYPE} templates available"
exit 220
fi
fi
}
# Validate that we found a template
if [[ -z "$TEMPLATE" ]]; then
msg_error "No template found for ${PCT_OSTYPE} ${PCT_OSVERSION}"
msg_custom "" "${YW}" "Please check:"
msg_custom " •" "${YW}" "Is pveam catalog available? (run: pveam available -section system)"
msg_custom " •" "${YW}" "Does the template exist for your OS version?"
exit 225
fi
msg_ok "Template ${BL}$TEMPLATE${CL} [$TEMPLATE_SOURCE]"
msg_debug "Resolved TEMPLATE_PATH=$TEMPLATE_PATH"
NEED_DOWNLOAD=0
if [[ ! -f "$TEMPLATE_PATH" ]]; then
msg_info "Template not present locally, will download it."
NEED_DOWNLOAD=0
if [[ ! -f "$TEMPLATE_PATH" ]]; then
msg_info "Template not present locally will download."
NEED_DOWNLOAD=1
elif [[ ! -r "$TEMPLATE_PATH" ]]; then
msg_error "Template file exists but is not readable check permissions."
exit 221
elif [[ "$(stat -c%s "$TEMPLATE_PATH")" -lt 1000000 ]]; then
if [[ -n "$ONLINE_TEMPLATE" ]]; then
msg_warn "Template file too small (<1MB) re-downloading."
NEED_DOWNLOAD=1
elif [[ ! -r "$TEMPLATE_PATH" ]]; then
msg_error "Template file exists but is not readable, check permissions."
exit 221
elif [[ "$(stat -c%s "$TEMPLATE_PATH")" -lt 1000000 ]]; then
if [[ -n "$ONLINE_TEMPLATE" ]]; then
msg_warn "Template file too small (<1MB), re-downloading."
NEED_DOWNLOAD=1
else
msg_warn "Template looks too small, but no online version exists. Keeping local file."
fi
elif ! tar -tf "$TEMPLATE_PATH" &>/dev/null; then
if [[ -n "$ONLINE_TEMPLATE" ]]; then
msg_warn "Template appears corrupted, re-downloading."
NEED_DOWNLOAD=1
else
msg_warn "Template appears corrupted, but no online version exists. Keeping local file."
fi
else
$STD msg_ok "Template $TEMPLATE is present and valid."
msg_warn "Template looks too small, but no online version exists. Keeping local file."
fi
elif ! tar -tf "$TEMPLATE_PATH" &>/dev/null; then
if [[ -n "$ONLINE_TEMPLATE" ]]; then
msg_warn "Template appears corrupted re-downloading."
NEED_DOWNLOAD=1
else
msg_warn "Template appears corrupted, but no online version exists. Keeping local file."
fi
else
$STD msg_ok "Template $TEMPLATE is present and valid."
fi
if [[ "$TEMPLATE_SOURCE" == "local" && -n "$ONLINE_TEMPLATE" && "$TEMPLATE" != "$ONLINE_TEMPLATE" ]]; then
msg_warn "Local template is outdated: $TEMPLATE (latest available: $ONLINE_TEMPLATE)"
if whiptail --yesno "A newer template is available:\n$ONLINE_TEMPLATE\n\nDo you want to download and use it instead?" 12 70; then
TEMPLATE="$ONLINE_TEMPLATE"
NEED_DOWNLOAD=1
else
msg_custom "" "${BL}" "Continuing with local template $TEMPLATE"
if [[ "$TEMPLATE_SOURCE" == "local" && -n "$ONLINE_TEMPLATE" && "$TEMPLATE" != "$ONLINE_TEMPLATE" ]]; then
msg_warn "Local template is outdated: $TEMPLATE (latest available: $ONLINE_TEMPLATE)"
if whiptail --yesno "A newer template is available:\n$ONLINE_TEMPLATE\n\nDo you want to download and use it instead?" 12 70; then
TEMPLATE="$ONLINE_TEMPLATE"
NEED_DOWNLOAD=1
else
msg_custom "" "${BL}" "Continuing with local template $TEMPLATE"
fi
fi
if [[ "$NEED_DOWNLOAD" -eq 1 ]]; then
[[ -f "$TEMPLATE_PATH" ]] && rm -f "$TEMPLATE_PATH"
for attempt in {1..3}; do
msg_info "Attempt $attempt: Downloading template $TEMPLATE to $TEMPLATE_STORAGE"
if pveam download "$TEMPLATE_STORAGE" "$TEMPLATE" >>"${BUILD_LOG:-/dev/null}" 2>&1; then
msg_ok "Template download successful."
break
fi
fi
if [[ $attempt -eq 3 ]]; then
msg_error "Failed after 3 attempts. Please check network access, permissions, or manually run:\n pveam download $TEMPLATE_STORAGE $TEMPLATE"
exit 222
fi
sleep $((attempt * 5))
done
fi
if [[ "$NEED_DOWNLOAD" -eq 1 ]]; then
[[ -f "$TEMPLATE_PATH" ]] && rm -f "$TEMPLATE_PATH"
for attempt in {1..3}; do
msg_info "Attempt $attempt: Downloading template $TEMPLATE to $TEMPLATE_STORAGE"
if pveam download "$TEMPLATE_STORAGE" "$TEMPLATE" >>"${BUILD_LOG:-/dev/null}" 2>&1; then
msg_ok "Template download successful."
break
fi
if [[ $attempt -eq 3 ]]; then
msg_error "Failed after 3 attempts. Please check network access, permissions, or manually run:\n pveam download $TEMPLATE_STORAGE $TEMPLATE"
exit 222
fi
sleep $((attempt * 5))
done
fi
if ! pveam list "$TEMPLATE_STORAGE" 2>/dev/null | grep -q "$TEMPLATE"; then
msg_error "Template $TEMPLATE not available in storage $TEMPLATE_STORAGE after download."
exit 223
fi
if ! pveam list "$TEMPLATE_STORAGE" 2>/dev/null | grep -q "$TEMPLATE"; then
msg_error "Template $TEMPLATE not available in storage $TEMPLATE_STORAGE after download."
exit 223
fi
# ------------------------------------------------------------------------------
@@ -6288,15 +6096,21 @@ create_lxc_container() {
# Validate template before pct create (while holding lock)
if [[ ! -s "$TEMPLATE_PATH" || "$(stat -c%s "$TEMPLATE_PATH" 2>/dev/null || echo 0)" -lt 1000000 ]]; then
msg_info "Template file missing or too small - downloading"
msg_info "Template file missing or too small downloading"
rm -f "$TEMPLATE_PATH"
download_template
pveam download "$TEMPLATE_STORAGE" "$TEMPLATE" >>"${BUILD_LOG:-/dev/null}" 2>&1 || {
msg_error "Failed to download template '$TEMPLATE' to storage '$TEMPLATE_STORAGE'"
exit 222
}
msg_ok "Template downloaded"
elif ! tar -tf "$TEMPLATE_PATH" &>/dev/null; then
if [[ "$ARCH" == "arm64" || -n "$ONLINE_TEMPLATE" ]]; then
msg_info "Template appears corrupted - re-downloading"
if [[ -n "$ONLINE_TEMPLATE" ]]; then
msg_info "Template appears corrupted re-downloading"
rm -f "$TEMPLATE_PATH"
download_template
pveam download "$TEMPLATE_STORAGE" "$TEMPLATE" >>"${BUILD_LOG:-/dev/null}" 2>&1 || {
msg_error "Failed to re-download template '$TEMPLATE'"
exit 222
}
msg_ok "Template re-downloaded"
else
msg_warn "Template appears corrupted, but no online version exists. Skipping re-download."
@@ -6344,9 +6158,9 @@ create_lxc_container() {
else
# Not a CTID collision - check if template issue and retry with fresh download
if grep -qiE 'unable to open|corrupt|invalid' "$LOGFILE"; then
msg_info "Template may be corrupted - re-downloading"
msg_info "Template may be corrupted re-downloading"
rm -f "$TEMPLATE_PATH"
download_template
pveam download "$TEMPLATE_STORAGE" "$TEMPLATE" >>"${BUILD_LOG:-/dev/null}" 2>&1
msg_ok "Template re-downloaded"
fi
@@ -6359,11 +6173,7 @@ create_lxc_container() {
if [[ ! -f "$LOCAL_TEMPLATE_PATH" ]]; then
msg_ok "Trying local storage fallback"
msg_info "Downloading template to local"
if [[ "$ARCH" == "arm64" ]]; then
download_arm64_template "$LOCAL_TEMPLATE_PATH"
else
pveam download local "$TEMPLATE" >>"${BUILD_LOG:-/dev/null}" 2>&1
fi
pveam download local "$TEMPLATE" >>"${BUILD_LOG:-/dev/null}" 2>&1
msg_ok "Template downloaded to local"
else
msg_ok "Trying local storage fallback"
@@ -6551,40 +6361,6 @@ EOF
systemctl start ping-instances.service
fi
# Optional host-side post-install hook
# Path comes from var_post_install (default.vars / app.vars / advanced settings).
# Runs ON THE PROXMOX HOST after the container is up and configured.
# Exposed env vars: APP, NSAPP, CTID, IP, HN, STORAGE, BRG.
# Output (stdout/stderr) is captured to /var/log/community-scripts/post-install-<CTID>.log
if [[ -n "${var_post_install:-}" ]]; then
local _hook_log_dir="/var/log/community-scripts"
local _hook_log="${_hook_log_dir}/post-install-${CTID}.log"
mkdir -p "$_hook_log_dir" 2>/dev/null || true
if [[ ! -f "${var_post_install}" ]]; then
msg_error "Post-install hook not found on host: ${var_post_install}"
whiptail --backtitle "Proxmox VE Helper Scripts" \
--title "POST-INSTALL HOOK FAILED" \
--msgbox "The configured post-install hook was not found on the Proxmox host:\n\n${var_post_install}\n\nThe LXC was created successfully, but the hook did NOT run." 14 72 || true
else
msg_info "Running post-install hook: ${var_post_install}"
local _hook_rc=0
APP="$APP" NSAPP="${NSAPP:-}" CTID="$CTID" IP="$IP" HN="${HN:-}" \
STORAGE="${STORAGE:-}" BRG="${BRG:-}" \
bash "${var_post_install}" >"${_hook_log}" 2>&1 || _hook_rc=$?
if [[ $_hook_rc -eq 0 ]]; then
msg_ok "Post-install hook completed (log: ${_hook_log})"
else
msg_error "Post-install hook failed (rc=${_hook_rc}) see ${_hook_log}"
local _hook_tail=""
_hook_tail="$(tail -n 15 "${_hook_log}" 2>/dev/null || true)"
whiptail --backtitle "Proxmox VE Helper Scripts" \
--title "POST-INSTALL HOOK FAILED" \
--msgbox "Hook exited with code ${_hook_rc}.\n\nScript: ${var_post_install}\nLog: ${_hook_log}\n\n--- Last log lines ---\n${_hook_tail}\n\nThe LXC itself was created successfully." 22 78 || true
fi
fi
fi
INSTALL_COMPLETE=true
post_update_to_api "done" "none"
}

View File

@@ -344,15 +344,9 @@ pve_check() {
# - Provides link to ARM64-compatible scripts
# ------------------------------------------------------------------------------
arch_check() {
local arch
arch="$(dpkg --print-architecture)"
if [[ "$arch" != "amd64" && "$arch" != "arm64" ]]; then
msg_error "This script requires amd64 or arm64 (detected: $arch)."
sleep 2
exit 106
fi
if [[ "$arch" == "arm64" && "${var_arm64:-}" != "yes" ]]; then
msg_error "This script does not yet support arm64."
if [ "$(dpkg --print-architecture)" != "amd64" ]; then
msg_error "This script will not work with PiMox (ARM architecture detected)."
msg_warn "Visit https://github.com/asylumexp/Proxmox for ARM64 support."
sleep 2
exit 106
fi
@@ -1718,38 +1712,6 @@ function get_lxc_ip() {
export LOCAL_IP
}
# ------------------------------------------------------------------------------
# ensure_whiptail()
#
# - Ensures whiptail is installed
# - Some ARM64 systems will not have whiptail installed
# - Exits with error message if installation fails
# ------------------------------------------------------------------------------
ensure_whiptail() {
command -v whiptail >/dev/null 2>&1 && return 0
msg_info "Installing whiptail"
apt_update_safe
$STD apt-get install -y whiptail || {
msg_error "Failed to install whiptail"
exit 100
}
msg_ok "Installed whiptail"
}
# ------------------------------------------------------------------------------
# arm64_notice()
#
# - Shows a short warning when running scripts on ARM64 systems
# ------------------------------------------------------------------------------
arm64_notice() {
[[ "$(dpkg --print-architecture)" == "arm64" ]] || return 0
whiptail --backtitle "Proxmox VE Helper Scripts" \
--title "ARM64 SUPPORT" \
--ok-button "Continue" \
--msgbox "ARM64 support is in active development.\n\nSome scripts, packages, or application releases may not be fully tested or working yet." 10 68
}
# ==============================================================================
# SIGNAL TRAPS
# ==============================================================================

View File

@@ -99,7 +99,7 @@ if ! declare -f explain_exit_code &>/dev/null; then
103) echo "Validation: Shell is not Bash" ;;
104) echo "Validation: Not running as root (or invoked via sudo)" ;;
105) echo "Validation: Proxmox VE version not supported" ;;
106) echo "Validation: Unsupported architecture (requires amd64 or arm64)" ;;
106) echo "Validation: Architecture not supported (ARM / PiMox)" ;;
107) echo "Validation: Kernel key parameters unreadable" ;;
108) echo "Validation: Kernel key limits exceeded" ;;
109) echo "Proxmox: No available container ID after max attempts" ;;

View File

@@ -3060,14 +3060,10 @@ function fetch_and_deploy_codeberg_release() {
# Fall back to architecture heuristic
if [[ -z "$url_match" ]]; then
for u in $assets; do
[[ "$u" =~ \.deb$ ]] || continue
if [[ "${arch,,}" =~ ^(amd64|x86_64)$ ]]; then
[[ "$u" =~ (amd64|x86_64).*\.deb$ ]] || continue
elif [[ "${arch,,}" =~ ^(arm64|aarch64)$ ]]; then
[[ "$u" =~ (arm64|aarch64).*\.deb$ ]] || continue
if [[ "$u" =~ ($arch|amd64|x86_64|aarch64|arm64).*\.deb$ ]]; then
url_match="$u"
break
fi
url_match="$u"
break
done
fi
@@ -3364,11 +3360,7 @@ _gh_scan_older_releases() {
done)
fi
if [[ "$has_match" != "true" ]]; then
if [[ "${arch,,}" =~ ^(amd64|x86_64)$ ]]; then
has_match=$(echo "$releases_list" | jq -r ".[$i].assets[].browser_download_url" | grep -qE '(amd64|x86_64).*\.deb$' && echo true)
elif [[ "${arch,,}" =~ ^(arm64|aarch64)$ ]]; then
has_match=$(echo "$releases_list" | jq -r ".[$i].assets[].browser_download_url" | grep -qE '(arm64|aarch64).*\.deb$' && echo true)
fi
has_match=$(echo "$releases_list" | jq -r ".[$i].assets[].browser_download_url" | grep -qE "($arch|amd64|x86_64|aarch64|arm64).*\.deb$" && echo true)
fi
if [[ "$has_match" != "true" ]]; then
has_match=$(echo "$releases_list" | jq -r ".[$i].assets[].browser_download_url" | grep -qE '\.deb$' && echo true)
@@ -3574,14 +3566,10 @@ function fetch_and_deploy_gh_release() {
# If no match via explicit pattern, fall back to architecture heuristic
if [[ -z "$url_match" ]]; then
for u in $assets; do
[[ "$u" =~ \.deb$ ]] || continue
if [[ "${arch,,}" =~ ^(amd64|x86_64)$ ]]; then
[[ "$u" =~ (amd64|x86_64).*\.deb$ ]] || continue
elif [[ "${arch,,}" =~ ^(arm64|aarch64)$ ]]; then
[[ "$u" =~ (arm64|aarch64).*\.deb$ ]] || continue
if [[ "$u" =~ ($arch|amd64|x86_64|aarch64|arm64).*\.deb$ ]]; then
url_match="$u"
break
fi
url_match="$u"
break
done
fi
@@ -3612,14 +3600,10 @@ function fetch_and_deploy_gh_release() {
fi
if [[ -z "$url_match" ]]; then
for u in $assets; do
[[ "$u" =~ \.deb$ ]] || continue
if [[ "${arch,,}" =~ ^(amd64|x86_64)$ ]]; then
[[ "$u" =~ (amd64|x86_64).*\.deb$ ]] || continue
elif [[ "${arch,,}" =~ ^(arm64|aarch64)$ ]]; then
[[ "$u" =~ (arm64|aarch64).*\.deb$ ]] || continue
if [[ "$u" =~ ($arch|amd64|x86_64|aarch64|arm64).*\.deb$ ]]; then
url_match="$u"
break
fi
url_match="$u"
break
done
fi
if [[ -z "$url_match" ]]; then
@@ -3987,12 +3971,7 @@ function setup_ffmpeg() {
# Binary fallback mode
if [[ "$TYPE" == "binary" ]]; then
local ffmpeg_arch
case "$(dpkg --print-architecture 2>/dev/null || echo amd64)" in
arm64) ffmpeg_arch="arm64" ;;
*) ffmpeg_arch="amd64" ;;
esac
if ! CURL_TIMEOUT=300 curl_with_retry "https://johnvansickle.com/ffmpeg/releases/ffmpeg-release-${ffmpeg_arch}-static.tar.xz" "$TMP_DIR/ffmpeg.tar.xz"; then
if ! CURL_TIMEOUT=300 curl_with_retry "https://johnvansickle.com/ffmpeg/releases/ffmpeg-release-amd64-static.tar.xz" "$TMP_DIR/ffmpeg.tar.xz"; then
msg_error "Failed to download FFmpeg binary"
rm -rf "$TMP_DIR"
return 250
@@ -4079,17 +4058,7 @@ function setup_ffmpeg() {
# If no source download (either VERSION empty or download failed), use binary
if [[ -z "$VERSION" ]]; then
msg_info "Setup FFmpeg from pre-built binary"
local ffmpeg_arch detected_arch
detected_arch="$(dpkg --print-architecture 2>/dev/null || true)"
if [[ -z "$detected_arch" ]]; then
detected_arch="$(uname -m 2>/dev/null || true)"
fi
case "$detected_arch" in
arm64 | aarch64) ffmpeg_arch="arm64" ;;
amd64 | x86_64) ffmpeg_arch="amd64" ;;
*) ffmpeg_arch="amd64" ;;
esac
if ! CURL_TIMEOUT=300 curl_with_retry "https://johnvansickle.com/ffmpeg/releases/ffmpeg-release-${ffmpeg_arch}-static.tar.xz" "$TMP_DIR/ffmpeg.tar.xz"; then
if ! CURL_TIMEOUT=300 curl_with_retry "https://johnvansickle.com/ffmpeg/releases/ffmpeg-release-amd64-static.tar.xz" "$TMP_DIR/ffmpeg.tar.xz"; then
msg_error "Failed to download FFmpeg pre-built binary"
rm -rf "$TMP_DIR"
return 250
@@ -8265,19 +8234,7 @@ function setup_yq() {
msg_info "Setup yq $LATEST_VERSION"
fi
local yq_arch detected_arch
if command -v dpkg &>/dev/null; then
detected_arch="$(dpkg --print-architecture 2>/dev/null)"
else
detected_arch="$(uname -m 2>/dev/null)"
fi
case "$detected_arch" in
arm64 | aarch64) yq_arch="arm64" ;;
amd64 | x86_64) yq_arch="amd64" ;;
*) yq_arch="amd64" ;;
esac
if ! curl_with_retry "https://github.com/${GITHUB_REPO}/releases/download/v${LATEST_VERSION}/yq_linux_${yq_arch}" "$TMP_DIR/yq"; then
if ! curl_with_retry "https://github.com/${GITHUB_REPO}/releases/download/v${LATEST_VERSION}/yq_linux_amd64" "$TMP_DIR/yq"; then
msg_error "Failed to download yq"
rm -rf "$TMP_DIR"
return 250

View File

@@ -147,7 +147,7 @@ EOF
# Create update script
msg_info "Creating update script"
ensure_usr_local_bin_persist
cat <<'EOF' >/usr/local/bin/update_cronmaster
cat <<EOF >/usr/local/bin/update_cronmaster
#!/usr/bin/env bash
# CronMaster Update Script
type=update bash -c "$(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/tools/addon/cronmaster.sh)"

View File

@@ -68,24 +68,6 @@ function uninstall() {
# ==============================================================================
function update() {
if check_for_gh_release "qbittorrent-exporter" "martabal/qbittorrent-exporter"; then
if [[ "$(printf '%s\n' "2.0.0" "$CHECK_UPDATE_RELEASE" | sort -V | tail -n1)" == "$CHECK_UPDATE_RELEASE" ]] && \
! grep -q "QBITTORRENT_API_KEY" "$CONFIG_PATH" 2>/dev/null; then
echo ""
msg_warn "Version 2.0.0 introduces a breaking change: username/password login has been replaced by an API key."
echo -e "${TAB3}${INFO} You must create an API key in qBittorrent under Tools > Options > Web UI > API key"
echo ""
echo -n "${TAB3}Enter your qBittorrent API key (or press Enter to abort): "
read -r QBITTORRENT_API_KEY
if [[ -z "$QBITTORRENT_API_KEY" ]]; then
msg_warn "No API key provided. Update aborted."
exit 0
fi
sed -i '/^QBITTORRENT_USERNAME=/d' "$CONFIG_PATH"
sed -i '/^QBITTORRENT_PASSWORD=/d' "$CONFIG_PATH"
echo "QBITTORRENT_API_KEY=\"${QBITTORRENT_API_KEY}\"" >>"$CONFIG_PATH"
msg_ok "API key saved to configuration"
fi
msg_info "Stopping service"
if [[ "$OS" == "Alpine" ]]; then
rc-service qbittorrent-exporter stop &>/dev/null
@@ -118,9 +100,10 @@ function update() {
# INSTALL
# ==============================================================================
function install() {
read -erp "${TAB3}Enter URL of qBittorrent, example: (http://127.0.0.1:8080): " QBITTORRENT_BASE_URL
echo -e "${TAB3}${INFO} Create an API key in qBittorrent under Tools > Options > Web UI > API key"
read -erp "${TAB3}Enter qBittorrent API key: " QBITTORRENT_API_KEY
read -erp "Enter URL of qBittorrent, example: (http://127.0.0.1:8080): " QBITTORRENT_BASE_URL
read -erp "Enter qBittorrent username: " QBITTORRENT_USERNAME
read -rsp "Enter qBittorrent password: " QBITTORRENT_PASSWORD
printf "\n"
fetch_and_deploy_gh_release "qbittorrent-exporter" "martabal/qbittorrent-exporter" "tarball" "latest"
setup_go
@@ -133,7 +116,8 @@ function install() {
cat <<EOF >"$CONFIG_PATH"
# https://github.com/martabal/qbittorrent-exporter?tab=readme-ov-file#parameters
QBITTORRENT_BASE_URL="${QBITTORRENT_BASE_URL}"
QBITTORRENT_API_KEY="${QBITTORRENT_API_KEY}"
QBITTORRENT_USERNAME="${QBITTORRENT_USERNAME}"
QBITTORRENT_PASSWORD="${QBITTORRENT_PASSWORD}"
EOF
msg_ok "Created configuration"

View File

@@ -1,436 +0,0 @@
#!/usr/bin/env bash
# ============================================================================
# Community-Scripts ProxmoxVE — Post-Install Hook: Example Library
# ----------------------------------------------------------------------------
# This file is NOT meant to be executed as-is.
# It is a collection of complete, copy-pasteable example hooks for the
# optional `var_post_install` feature in build.func.
#
# HOW IT WORKS
# ------------
# In the ct/*.sh CT scripts (or via Advanced Settings → Step 28) you can
# point `var_post_install` to an absolute path on the Proxmox HOST, e.g.:
#
# # in /root/.community-scripts/default.vars
# var_post_install=/opt/community-scripts/hooks/notify.sh
#
# # OR per-app, in app.vars
# var_post_install=/opt/community-scripts/hooks/vaultwarden-postprovision.sh
#
# # OR interactively in the Advanced Settings whiptail (Step 28).
#
# The hook runs ON THE PROXMOX HOST (NOT inside the LXC) as root,
# AFTER the container is fully provisioned, started and the description
# is set. stdout/stderr is captured to:
#
# /var/log/community-scripts/post-install-<CTID>.log
#
# AVAILABLE ENV VARIABLES
# -----------------------
# APP - Pretty name (e.g. "Vaultwarden")
# NSAPP - Slug / lowercase (e.g. "vaultwarden")
# CTID - Numeric container ID (e.g. "103")
# IP - IPv4 address of the LXC (e.g. "192.168.1.50")
# HN - Hostname (e.g. "vaultwarden")
# STORAGE - Storage where the rootfs lives (e.g. "local-lvm")
# BRG - Bridge (e.g. "vmbr0")
#
# GENERAL TIPS
# ------------
# - Use `set -euo pipefail` so failures actually surface.
# - Use `|| true` on best-effort steps you do not want to abort the hook.
# - The file just needs to be a valid script. `+x` is optional — it is
# invoked via `bash <path>`. Shebang is honored only if you call it
# yourself; otherwise the shebang line is purely cosmetic.
# - If the hook exits non-zero, the user gets a whiptail popup with the
# last 15 log lines. The LXC creation itself is NOT rolled back.
# - Keep hooks idempotent — they may be re-run if you recreate a CT.
#
# HOW TO USE THIS FILE
# --------------------
# 1. Copy ONE example block (between the BEGIN/END markers) into a new
# file on the Proxmox host, e.g. /opt/community-scripts/hooks/notify.sh
# 2. chmod +x /opt/community-scripts/hooks/notify.sh (optional)
# 3. Set var_post_install in default.vars / app.vars or pick the path
# in Advanced Settings.
# ============================================================================
# ============================================================================
# ▼▼▼ EXAMPLE 1 — BEGIN ▼▼▼
# ----------------------------------------------------------------------------
# Name : minimal-logger.sh
# Purpose : Append every newly created LXC to a single CSV-ish log.
# Difficulty : ⭐ Beginner
# Side effects: Writes to /var/log/community-scripts/created-lxcs.log
# Use case : You just want a paper trail of "what got created when".
# ============================================================================
#!/usr/bin/env bash
set -euo pipefail
LOG_DIR="/var/log/community-scripts"
LOG_FILE="${LOG_DIR}/created-lxcs.log"
mkdir -p "$LOG_DIR"
# Header on first use
if [[ ! -s "$LOG_FILE" ]]; then
echo "timestamp;ctid;app;hostname;ip;bridge;storage" >"$LOG_FILE"
fi
printf '%s;%s;%s;%s;%s;%s;%s\n' \
"$(date -Iseconds)" \
"${CTID}" \
"${APP}" \
"${HN}" \
"${IP}" \
"${BRG}" \
"${STORAGE}" \
>>"$LOG_FILE"
echo "Logged ${APP} (CTID=${CTID}) to ${LOG_FILE}"
# ▲▲▲ EXAMPLE 1 — END ▲▲▲
# ============================================================================
# ▼▼▼ EXAMPLE 2 — BEGIN ▼▼▼
# ----------------------------------------------------------------------------
# Name : discord-gotify-notify.sh
# Purpose : Send a rich Discord embed AND a Gotify push notification
# whenever a new LXC is provisioned.
# Difficulty : ⭐⭐ Intermediate
# Requires : curl on the host (default), reachable webhook URLs.
# Side effects: Outbound HTTPS to Discord + your Gotify server.
# ============================================================================
#!/usr/bin/env bash
set -euo pipefail
# --- CONFIG (edit me) -------------------------------------------------------
DISCORD_WEBHOOK="https://discord.com/api/webhooks/XXXXXXXX/YYYYYYYY"
GOTIFY_URL="https://gotify.example.com"
GOTIFY_TOKEN="AbCdEfGhIjKlMnO"
GOTIFY_PRIORITY=5
# ----------------------------------------------------------------------------
# Resolve the Proxmox node's hostname for context
NODE="$(hostname -s)"
TS="$(date -Iseconds)"
# --- Discord embed ----------------------------------------------------------
read -r -d '' DISCORD_PAYLOAD <<JSON || true
{
"username": "Proxmox - ${NODE}",
"avatar_url": "https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/images/logo-81x112.png",
"embeds": [{
"title": "✅ ${APP} LXC created",
"description": "A new community-script LXC has been provisioned on **${NODE}**.",
"color": 3066993,
"timestamp": "${TS}",
"fields": [
{"name": "CTID", "value": "${CTID}", "inline": true},
{"name": "Hostname", "value": "${HN}", "inline": true},
{"name": "App", "value": "${APP}", "inline": true},
{"name": "IP", "value": "${IP}", "inline": true},
{"name": "Bridge", "value": "${BRG}", "inline": true},
{"name": "Storage", "value": "${STORAGE}", "inline": true}
],
"footer": {"text": "community-scripts.org"}
}]
}
JSON
curl -fsS --max-time 10 \
-H "Content-Type: application/json" \
-X POST "$DISCORD_WEBHOOK" \
--data "$DISCORD_PAYLOAD" \
>/dev/null ||
echo "WARN: Discord webhook failed (non-fatal)"
# --- Gotify push ------------------------------------------------------------
curl -fsS --max-time 10 \
-H "X-Gotify-Key: ${GOTIFY_TOKEN}" \
-F "title=Proxmox: ${APP} LXC created" \
-F "message=CTID=${CTID} IP=${IP} HN=${HN} on ${NODE}" \
-F "priority=${GOTIFY_PRIORITY}" \
"${GOTIFY_URL}/message" \
>/dev/null ||
echo "WARN: Gotify push failed (non-fatal)"
echo "Notifications dispatched for CTID=${CTID}"
# ▲▲▲ EXAMPLE 2 — END ▲▲▲
# ============================================================================
# ▼▼▼ EXAMPLE 3 — BEGIN ▼▼▼
# ----------------------------------------------------------------------------
# Name : auto-pool-tags-backup.sh
# Purpose : Add the new LXC to a Proxmox pool, append cluster-wide tags,
# register a DNS record in pi-hole, and trigger an immediate
# snapshot backup to a configured storage.
# Difficulty : ⭐⭐⭐ Advanced
# Requires : pvesh, pct, vzdump (host-side; available by default on PVE),
# a reachable pi-hole admin API.
# ============================================================================
#!/usr/bin/env bash
set -euo pipefail
# --- CONFIG (edit me) -------------------------------------------------------
TARGET_POOL="auto-lxc"
EXTRA_TAGS=("auto-provisioned" "${NSAPP}") # community-script tag is set by build.func
BACKUP_STORAGE="pbs-main" # set to "" to skip initial backup
PIHOLE_HOST="192.168.1.5"
PIHOLE_PASSWORD="changeme" # web-UI password
DNS_DOMAIN="lan" # FQDN will be ${HN}.${DNS_DOMAIN}
# ----------------------------------------------------------------------------
# 1) Ensure the pool exists, then attach the CT
if ! pvesh get "/pools/${TARGET_POOL}" >/dev/null 2>&1; then
echo "Creating pool: ${TARGET_POOL}"
pvesh create /pools --poolid "${TARGET_POOL}" --comment "Auto-created by post-install hook" || true
fi
echo "Adding CTID=${CTID} to pool=${TARGET_POOL}"
pvesh set "/pools/${TARGET_POOL}" --vms "${CTID}" || echo "WARN: pool attach failed (non-fatal)"
# 2) Merge new tags with existing ones (preserve community-script etc.)
CURRENT_TAGS="$(pct config "${CTID}" | awk -F': ' '/^tags:/{print $2}')"
declare -A TAG_SET
IFS=';' read -r -a CUR_ARR <<<"${CURRENT_TAGS:-}"
for t in "${CUR_ARR[@]}"; do [[ -n "$t" ]] && TAG_SET["$t"]=1; done
for t in "${EXTRA_TAGS[@]}"; do [[ -n "$t" ]] && TAG_SET["$t"]=1; done
NEW_TAGS="$(
IFS=';'
echo "${!TAG_SET[*]}"
)"
echo "Setting tags: ${NEW_TAGS}"
pct set "${CTID}" --tags "${NEW_TAGS}" || echo "WARN: tag update failed (non-fatal)"
# 3) Register DNS in pi-hole (custom DNS record)
FQDN="${HN}.${DNS_DOMAIN}"
echo "Registering DNS: ${FQDN}${IP} on pi-hole ${PIHOLE_HOST}"
SID="$(curl -fsS --max-time 5 \
-d "pw=${PIHOLE_PASSWORD}" \
"http://${PIHOLE_HOST}/api/auth" 2>/dev/null |
sed -nE 's/.*"sid":"([^"]+)".*/\1/p' || true)"
if [[ -n "${SID}" ]]; then
curl -fsS --max-time 5 -X PUT \
-H "Content-Type: application/json" \
-H "sid: ${SID}" \
-d "{\"hosts\":[\"${IP} ${FQDN}\"]}" \
"http://${PIHOLE_HOST}/api/config/dns/hosts" >/dev/null ||
echo "WARN: pi-hole DNS update failed (non-fatal)"
curl -fsS --max-time 5 -X DELETE -H "sid: ${SID}" "http://${PIHOLE_HOST}/api/auth" >/dev/null || true
else
echo "WARN: could not obtain pi-hole session (skipping DNS)"
fi
# 4) Initial backup (best-effort, can take a few minutes)
if [[ -n "${BACKUP_STORAGE}" ]]; then
if pvesh get "/storage/${BACKUP_STORAGE}" >/dev/null 2>&1; then
echo "Triggering initial backup of CTID=${CTID} to ${BACKUP_STORAGE}"
vzdump "${CTID}" \
--storage "${BACKUP_STORAGE}" \
--mode snapshot \
--compress zstd \
--notes-template "Initial backup of ${APP} (CTID=${CTID})" \
--notification-mode auto ||
echo "WARN: initial backup failed (non-fatal)"
else
echo "Backup storage '${BACKUP_STORAGE}' not found — skipping."
fi
fi
echo "Post-provision routine complete for ${APP} (CTID=${CTID})"
# ▲▲▲ EXAMPLE 3 — END ▲▲▲
# ============================================================================
# ▼▼▼ EXAMPLE 4 — BEGIN ▼▼▼
# ----------------------------------------------------------------------------
# Name : inject-ssh-and-monitoring.sh
# Purpose : Push the host's admin SSH key into the new LXC, install the
# Beszel monitoring agent inside the container, and register
# an Uptime-Kuma HTTP push monitor for the LXC's IP.
# Difficulty : ⭐⭐⭐ Advanced
# Requires : pct (host), curl (inside LXC), reachable Beszel hub +
# Uptime-Kuma push URL.
# ============================================================================
#!/usr/bin/env bash
set -euo pipefail
# --- CONFIG (edit me) -------------------------------------------------------
ADMIN_KEY="/root/.ssh/admin_ed25519.pub"
BESZEL_HUB_URL="http://192.168.1.10:8090"
BESZEL_AGENT_KEY="ssh-ed25519 AAAA... beszel@hub" # public key of the hub
UPTIME_KUMA_PUSH_BASE="http://uptime.lan/api/push/abc123"
# ----------------------------------------------------------------------------
# 1) Inject the admin SSH key
if [[ -f "${ADMIN_KEY}" ]]; then
echo "Pushing admin SSH key into CTID=${CTID}"
pct exec "${CTID}" -- mkdir -p /root/.ssh
pct exec "${CTID}" -- chmod 700 /root/.ssh
pct push "${CTID}" "${ADMIN_KEY}" /root/.ssh/authorized_keys
pct exec "${CTID}" -- chmod 600 /root/.ssh/authorized_keys
else
echo "WARN: ${ADMIN_KEY} not found on host — skipping SSH key injection"
fi
# 2) Wait for outbound networking inside the CT (max 30 s)
echo "Waiting for network inside CTID=${CTID}"
for _ in $(seq 1 30); do
if pct exec "${CTID}" -- bash -c 'getent hosts deb.debian.org >/dev/null 2>&1'; then
break
fi
sleep 1
done
# 3) Install Beszel agent inside the LXC
echo "Installing Beszel agent inside CTID=${CTID}"
pct exec "${CTID}" -- bash -s <<'AGENT_INSTALL' || echo "WARN: Beszel install failed"
set -euo pipefail
ARCH="$(uname -m)"
case "$ARCH" in
x86_64) ARCH_TAG=amd64 ;;
aarch64) ARCH_TAG=arm64 ;;
*) echo "Unsupported arch: $ARCH"; exit 1 ;;
esac
TMP=$(mktemp -d)
cd "$TMP"
curl -fsSL "https://github.com/henrygd/beszel/releases/latest/download/beszel-agent_linux_${ARCH_TAG}.tar.gz" \
| tar -xz
install -m 0755 beszel-agent /usr/local/bin/beszel-agent
cat >/etc/systemd/system/beszel-agent.service <<UNIT
[Unit]
Description=Beszel Agent
After=network-online.target
Wants=network-online.target
[Service]
Environment="PORT=45876"
Environment="KEY=__KEY_PLACEHOLDER__"
ExecStart=/usr/local/bin/beszel-agent
Restart=always
[Install]
WantedBy=multi-user.target
UNIT
AGENT_INSTALL
# Inject the configured public key into the unit file (avoids quoting hell)
pct exec "${CTID}" -- sed -i "s|__KEY_PLACEHOLDER__|${BESZEL_AGENT_KEY}|" \
/etc/systemd/system/beszel-agent.service
pct exec "${CTID}" -- systemctl daemon-reload
pct exec "${CTID}" -- systemctl enable --now beszel-agent.service ||
echo "WARN: could not start beszel-agent"
# 4) Register an Uptime-Kuma push monitor (host-side, just sends one ping)
echo "Pinging Uptime-Kuma push monitor for ${HN}"
curl -fsS --max-time 5 \
--get \
--data-urlencode "status=up" \
--data-urlencode "msg=created by community-scripts" \
--data-urlencode "ping=1" \
--data-urlencode "label=${HN}" \
"${UPTIME_KUMA_PUSH_BASE}" >/dev/null ||
echo "WARN: Uptime-Kuma push failed (non-fatal)"
echo "Provisioned monitoring for ${APP} (CTID=${CTID}, IP=${IP})"
# ▲▲▲ EXAMPLE 4 — END ▲▲▲
# ============================================================================
# ▼▼▼ EXAMPLE 5 — BEGIN ▼▼▼
# ----------------------------------------------------------------------------
# Name : per-app-router.sh
# Purpose : Single dispatcher hook that runs different actions
# depending on the app being installed (NSAPP). Useful when
# you want ONE hook for the whole cluster but distinct
# behavior for, e.g., databases vs media services.
# Difficulty : ⭐⭐⭐ Advanced
# ============================================================================
#!/usr/bin/env bash
set -euo pipefail
# --- CONFIG (edit me) -------------------------------------------------------
DEFAULT_DNS_SUFFIX="lan"
PROM_FILE_SD_DIR="/etc/prometheus/file_sd" # on the host that runs Prometheus
# ----------------------------------------------------------------------------
log() { printf '[%s] %s\n' "$(date +%H:%M:%S)" "$*"; }
# ---------- shared helpers --------------------------------------------------
register_prometheus_target() {
local job="$1" port="$2"
local file="${PROM_FILE_SD_DIR}/${job}.json"
mkdir -p "${PROM_FILE_SD_DIR}"
if [[ ! -f "$file" ]]; then echo "[]" >"$file"; fi
python3 - "$file" "${IP}:${port}" "${HN}" "${NSAPP}" <<'PY'
import json, sys
path, target, hn, app = sys.argv[1:5]
data = json.load(open(path))
# Avoid duplicates
data = [b for b in data if target not in b.get("targets", [])]
data.append({"targets": [target], "labels": {"hostname": hn, "app": app}})
json.dump(data, open(path, "w"), indent=2)
PY
log "Registered Prometheus target ${IP}:${port} in ${file}"
}
set_ct_options() {
local cores="$1" mem="$2" desc="$3"
pct set "${CTID}" --cores "${cores}" --memory "${mem}" || true
pct set "${CTID}" --description "${desc}" || true
}
# ---------- per-app dispatch ------------------------------------------------
log "Dispatching post-install for NSAPP=${NSAPP} CTID=${CTID}"
case "${NSAPP}" in
# ------ Databases ---------------------------------------------------------
postgresql | mariadb | mongodb | redis | valkey)
log "Database role: bumping resources & adding to backup-critical pool"
set_ct_options 4 4096 "DB: ${APP}"
pvesh set /pools/db-critical --vms "${CTID}" 2>/dev/null || true
register_prometheus_target "${NSAPP}-exporter" 9187
;;
# ------ *arr media stack --------------------------------------------------
sonarr | radarr | prowlarr | lidarr | readarr | bazarr)
log "Media-arr role: tagging + Sonarr/Radarr API webhook"
pct set "${CTID}" --tags "community-script;media;arr-stack" || true
curl -fsS --max-time 5 -X POST \
"http://media-hub.${DEFAULT_DNS_SUFFIX}/hooks/arr-added" \
-H "Content-Type: application/json" \
-d "{\"app\":\"${NSAPP}\",\"ctid\":${CTID},\"ip\":\"${IP}\"}" \
>/dev/null || log "WARN: media-hub webhook failed"
;;
# ------ Web apps that should sit behind NPM/Traefik ----------------------
vaultwarden | paperless-ngx | nextcloud | immich | bookstack)
log "Web app role: registering reverse-proxy entry"
curl -fsS --max-time 5 -X POST \
"http://traefik.${DEFAULT_DNS_SUFFIX}/api/dynamic-add" \
-H "Content-Type: application/json" \
-d "$(
cat <<JSON
{
"name": "${HN}",
"host": "${HN}.${DEFAULT_DNS_SUFFIX}",
"backend": "http://${IP}",
"app": "${NSAPP}"
}
JSON
)" >/dev/null || log "WARN: traefik registration failed"
register_prometheus_target "blackbox-http" 80
;;
# ------ Default fallback --------------------------------------------------
*)
log "No special handling for ${NSAPP} — applying generic defaults"
register_prometheus_target "node-exporter" 9100
;;
esac
log "Finished dispatcher for ${APP} (CTID=${CTID})"
# ▲▲▲ EXAMPLE 5 — END ▲▲▲
# ============================================================================
# END OF EXAMPLES
# ============================================================================