mirror of
https://github.com/community-scripts/ProxmoxVE.git
synced 2026-05-04 15:48:50 +02:00
Compare commits
4 Commits
feat/post-
...
gitlab_sup
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e0b327c2b6 | ||
|
|
6ba9a7f76e | ||
|
|
d671227310 | ||
|
|
28c936298d |
23
CHANGELOG.md
23
CHANGELOG.md
@@ -448,32 +448,11 @@ Exercise vigilance regarding copycat or coat-tailing sites that seek to exploit
|
||||
|
||||
</details>
|
||||
|
||||
## 2026-05-01
|
||||
|
||||
### 🚀 Updated Scripts
|
||||
|
||||
- #### 🐞 Bug Fixes
|
||||
|
||||
- paperless-ngx: refresh NLTK data on update [@kurtislanderson](https://github.com/kurtislanderson) ([#14144](https://github.com/community-scripts/ProxmoxVE/pull/14144))
|
||||
- [Pelican Panel] stop deleting the public storage [@LetterN](https://github.com/LetterN) ([#14145](https://github.com/community-scripts/ProxmoxVE/pull/14145))
|
||||
|
||||
- #### 🔧 Refactor
|
||||
|
||||
- Mail-Archiver: update dependencies [@tremor021](https://github.com/tremor021) ([#14152](https://github.com/community-scripts/ProxmoxVE/pull/14152))
|
||||
|
||||
## 2026-04-30
|
||||
|
||||
### 🆕 New Scripts
|
||||
|
||||
- Nagios ([#14126](https://github.com/community-scripts/ProxmoxVE/pull/14126))
|
||||
- Neko ([#14121](https://github.com/community-scripts/ProxmoxVE/pull/14121))
|
||||
|
||||
### 🚀 Updated Scripts
|
||||
|
||||
- #### 🐞 Bug Fixes
|
||||
|
||||
- alpine-docker: install openssl as core dependency | alpine-komodo: check & install openssl if missing [@MickLesk](https://github.com/MickLesk) ([#14134](https://github.com/community-scripts/ProxmoxVE/pull/14134))
|
||||
- endurain: update source references to Codeberg [@MickLesk](https://github.com/MickLesk) ([#14128](https://github.com/community-scripts/ProxmoxVE/pull/14128))
|
||||
- Neko ([#14121](https://github.com/community-scripts/ProxmoxVE/pull/14121))
|
||||
|
||||
### 💾 Core
|
||||
|
||||
|
||||
@@ -3,7 +3,7 @@ source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxV
|
||||
# Copyright (c) 2021-2026 community-scripts ORG
|
||||
# Author: johanngrobe
|
||||
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
|
||||
# Source: https://codeberg.org/endurain-project/endurain
|
||||
# Source: https://github.com/joaovitoriasilva/endurain
|
||||
|
||||
APP="Endurain"
|
||||
var_tags="${var_tags:-sport;social-media}"
|
||||
@@ -28,7 +28,7 @@ function update_script() {
|
||||
msg_error "No ${APP} installation found!"
|
||||
exit 233
|
||||
fi
|
||||
if check_for_codeberg_release "endurain" "endurain-project/endurain"; then
|
||||
if check_for_gh_release "endurain" "endurain-project/endurain"; then
|
||||
msg_info "Stopping Service"
|
||||
systemctl stop endurain
|
||||
msg_ok "Stopped Service"
|
||||
@@ -38,7 +38,7 @@ function update_script() {
|
||||
cp /opt/endurain/frontend/app/dist/env.js /opt/endurain.env.js
|
||||
msg_ok "Created Backup"
|
||||
|
||||
CLEAN_INSTALL=1 fetch_and_deploy_codeberg_release "endurain" "endurain-project/endurain" "tarball" "latest" "/opt/endurain"
|
||||
CLEAN_INSTALL=1 fetch_and_deploy_gh_release "endurain" "endurain-project/endurain" "tarball" "latest" "/opt/endurain"
|
||||
|
||||
msg_info "Preparing Update"
|
||||
cd /opt/endurain
|
||||
|
||||
@@ -1,6 +0,0 @@
|
||||
_ __ _
|
||||
/ | / /___ _____ _(_)___ _____
|
||||
/ |/ / __ `/ __ `/ / __ \/ ___/
|
||||
/ /| / /_/ / /_/ / / /_/ (__ )
|
||||
/_/ |_/\__,_/\__, /_/\____/____/
|
||||
/____/
|
||||
@@ -28,8 +28,6 @@ function update_script() {
|
||||
exit
|
||||
fi
|
||||
|
||||
ensure_dependencies libgssapi-krb5-2
|
||||
|
||||
if check_for_gh_release "mail-archiver" "s1t5/mail-archiver"; then
|
||||
msg_info "Stopping Mail-Archiver"
|
||||
systemctl stop mail-archiver
|
||||
|
||||
90
ct/nagios.sh
90
ct/nagios.sh
@@ -1,90 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/build.func)
|
||||
# Copyright (c) 2021-2026 community-scripts ORG
|
||||
# Author: CanbiZ (MickLesk)
|
||||
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
|
||||
# Source: https://github.com/NagiosEnterprises/nagioscore
|
||||
|
||||
APP="Nagios"
|
||||
var_tags="${var_tags:-monitoring;alerts;infrastructure}"
|
||||
var_cpu="${var_cpu:-2}"
|
||||
var_ram="${var_ram:-2048}"
|
||||
var_disk="${var_disk:-20}"
|
||||
var_os="${var_os:-debian}"
|
||||
var_version="${var_version:-13}"
|
||||
var_unprivileged="${var_unprivileged:-1}"
|
||||
|
||||
header_info "$APP"
|
||||
variables
|
||||
color
|
||||
catch_errors
|
||||
|
||||
function update_script() {
|
||||
header_info
|
||||
check_container_storage
|
||||
check_container_resources
|
||||
|
||||
if [[ ! -f /usr/local/nagios/etc/nagios.cfg ]]; then
|
||||
msg_error "No ${APP} Installation Found!"
|
||||
exit
|
||||
fi
|
||||
|
||||
msg_info "Backing up Configuration"
|
||||
cp -a /usr/local/nagios/etc /opt/nagios-etc-backup
|
||||
msg_ok "Backed up Configuration"
|
||||
|
||||
if check_for_gh_release "nagios" "NagiosEnterprises/nagioscore"; then
|
||||
msg_info "Stopping Nagios"
|
||||
systemctl stop nagios
|
||||
msg_ok "Stopped Nagios"
|
||||
|
||||
CLEAN_INSTALL=1 fetch_and_deploy_gh_release "nagios" "NagiosEnterprises/nagioscore" "tarball"
|
||||
|
||||
msg_info "Building Nagios Core"
|
||||
cd /opt/nagios
|
||||
$STD ./configure --with-httpd-conf=/etc/apache2/sites-enabled
|
||||
$STD make all
|
||||
$STD make install-groups-users
|
||||
usermod -a -G nagios www-data
|
||||
$STD make install
|
||||
$STD make install-daemoninit
|
||||
$STD make install-commandmode
|
||||
$STD make install-webconf
|
||||
$STD a2enmod rewrite
|
||||
$STD a2enmod cgi
|
||||
msg_ok "Built Nagios Core"
|
||||
|
||||
msg_info "Starting Nagios"
|
||||
systemctl restart apache2
|
||||
systemctl start nagios
|
||||
msg_ok "Started Nagios"
|
||||
fi
|
||||
|
||||
if check_for_gh_release "nagios-plugins" "nagios-plugins/nagios-plugins"; then
|
||||
CLEAN_INSTALL=1 fetch_and_deploy_gh_release "nagios-plugins" "nagios-plugins/nagios-plugins" "tarball"
|
||||
msg_info "Building Nagios Plugins"
|
||||
cd /opt/nagios-plugins
|
||||
$STD ./tools/setup
|
||||
$STD ./configure
|
||||
$STD make
|
||||
$STD make install
|
||||
msg_ok "Built Nagios Plugins"
|
||||
fi
|
||||
|
||||
msg_info "Restoring Configuration"
|
||||
rm -rf /usr/local/nagios/etc
|
||||
cp -a /opt/nagios-etc-backup /usr/local/nagios/etc
|
||||
rm -rf /opt/nagios-etc-backup
|
||||
msg_ok "Restored Configuration"
|
||||
msg_ok "Updated successfully!"
|
||||
exit
|
||||
}
|
||||
|
||||
start
|
||||
build_container
|
||||
description
|
||||
|
||||
msg_ok "Completed Successfully!\n"
|
||||
echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}"
|
||||
echo -e "${INFO}${YW} Access it using the following URL:${CL}"
|
||||
echo -e "${TAB}${GATEWAY}${BGN}http://${IP}/nagios${CL}"
|
||||
@@ -164,14 +164,6 @@ function update_script() {
|
||||
fi
|
||||
fi
|
||||
|
||||
msg_info "Updating NLTK Data"
|
||||
cd /opt/paperless
|
||||
$STD uv run python -m nltk.downloader -d /usr/share/nltk_data snowball_data
|
||||
$STD uv run python -m nltk.downloader -d /usr/share/nltk_data stopwords
|
||||
$STD uv run python -m nltk.downloader -d /usr/share/nltk_data punkt_tab ||
|
||||
$STD uv run python -m nltk.downloader -d /usr/share/nltk_data punkt
|
||||
msg_ok "Updated NLTK Data"
|
||||
|
||||
msg_info "Starting all Paperless-ngx Services"
|
||||
systemctl start paperless-consumer paperless-webserver paperless-scheduler paperless-task-queue
|
||||
sleep 1
|
||||
|
||||
@@ -45,21 +45,15 @@ function update_script() {
|
||||
$STD php artisan down
|
||||
msg_ok "Stopped Service"
|
||||
|
||||
cp -a /opt/pelican-panel/.env /opt/backup
|
||||
cp -a /opt/pelican-panel/storage/app/public /opt/backup/storage/app/
|
||||
|
||||
cp -r /opt/pelican-panel/.env /opt/
|
||||
SQLITE_INSTALL=$(ls /opt/pelican-panel/database/*.sqlite 1>/dev/null 2>&1 && echo "true" || echo "false")
|
||||
$SQLITE_INSTALL && cp -r /opt/pelican-panel/database/*.sqlite /opt/backup
|
||||
|
||||
find /opt/pelican-panel -mindepth 1 -maxdepth 1 ! -name 'backup' ! -name 'plugins' -exec rm -rf {} +
|
||||
|
||||
$SQLITE_INSTALL && cp -r /opt/pelican-panel/database/*.sqlite /opt/
|
||||
rm -rf * .*
|
||||
fetch_and_deploy_gh_release "pelican-panel" "pelican-dev/panel" "prebuild" "latest" "/opt/pelican-panel" "panel.tar.gz"
|
||||
|
||||
msg_info "Updating Pelican Panel"
|
||||
cp -a /opt/backup/.env /opt/pelican-panel/
|
||||
$SQLITE_INSTALL && mv /opt/backup/*.sqlite /opt/pelican-panel/database/
|
||||
cp -a /opt/backup/storage/app/public /opt/pelican-panel/storage/app/
|
||||
|
||||
mv /opt/.env /opt/pelican-panel/
|
||||
$SQLITE_INSTALL && mv /opt/*.sqlite /opt/pelican-panel/database/
|
||||
$STD composer install --no-dev --optimize-autoloader --no-interaction
|
||||
$STD php artisan p:environment:setup
|
||||
$STD php artisan view:clear
|
||||
|
||||
@@ -14,7 +14,7 @@ network_check
|
||||
update_os
|
||||
|
||||
msg_info "Installing Dependencies"
|
||||
$STD apk add tzdata openssl
|
||||
$STD apk add tzdata
|
||||
msg_ok "Installed Dependencies"
|
||||
|
||||
msg_info "Installing Docker"
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
# Copyright (c) 2021-2026 community-scripts ORG
|
||||
# Author: johanngrobe
|
||||
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
|
||||
# Source: https://codeberg.org/endurain-project/endurain
|
||||
# Source: https://github.com/joaovitoriasilva/endurain
|
||||
|
||||
source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
|
||||
color
|
||||
@@ -21,7 +21,7 @@ PYTHON_VERSION="3.13" setup_uv
|
||||
NODE_VERSION="24" setup_nodejs
|
||||
PG_VERSION="17" PG_MODULES="postgis" setup_postgresql
|
||||
PG_DB_NAME="enduraindb" PG_DB_USER="endurain" setup_postgresql_db
|
||||
fetch_and_deploy_codeberg_release "endurain" "endurain-project/endurain" "tarball" "latest" "/opt/endurain"
|
||||
fetch_and_deploy_gh_release "endurain" "endurain-project/endurain" "tarball" "latest" "/opt/endurain"
|
||||
|
||||
msg_info "Setting up Endurain"
|
||||
cd /opt/endurain
|
||||
|
||||
@@ -22,8 +22,7 @@ setup_deb822_repo \
|
||||
"main"
|
||||
$STD apt install -y \
|
||||
dotnet-sdk-10.0 \
|
||||
aspnetcore-runtime-8.0 \
|
||||
libgssapi-krb5-2
|
||||
aspnetcore-runtime-8.0
|
||||
msg_ok "Installed Dependencies"
|
||||
|
||||
PG_VERSION="17" setup_postgresql
|
||||
|
||||
@@ -1,78 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright (c) 2021-2026 community-scripts ORG
|
||||
# Author: CanbiZ (MickLesk)
|
||||
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
|
||||
# Source: https://github.com/NagiosEnterprises/nagioscore
|
||||
|
||||
source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
|
||||
color
|
||||
verb_ip6
|
||||
catch_errors
|
||||
setting_up_container
|
||||
network_check
|
||||
update_os
|
||||
|
||||
msg_info "Installing Dependencies"
|
||||
$STD apt install -y \
|
||||
autoconf \
|
||||
automake \
|
||||
build-essential \
|
||||
bc \
|
||||
dc \
|
||||
gawk \
|
||||
gettext \
|
||||
gperf \
|
||||
libgd-dev \
|
||||
libmcrypt-dev \
|
||||
libnet-snmp-perl \
|
||||
libssl-dev \
|
||||
snmp \
|
||||
apache2 \
|
||||
apache2-utils
|
||||
msg_ok "Installed Dependencies"
|
||||
|
||||
PHP_APACHE="YES" setup_php
|
||||
|
||||
fetch_and_deploy_gh_release "nagios" "NagiosEnterprises/nagioscore" "tarball"
|
||||
|
||||
msg_info "Building Nagios Core"
|
||||
cd /opt/nagios
|
||||
$STD ./configure --with-httpd-conf=/etc/apache2/sites-enabled
|
||||
$STD make all
|
||||
$STD make install-groups-users
|
||||
usermod -a -G nagios www-data
|
||||
$STD make install
|
||||
$STD make install-daemoninit
|
||||
$STD make install-commandmode
|
||||
$STD make install-config
|
||||
$STD make install-webconf
|
||||
$STD a2enmod rewrite
|
||||
$STD a2enmod cgi
|
||||
msg_ok "Built Nagios Core"
|
||||
|
||||
fetch_and_deploy_gh_release "nagios-plugins" "nagios-plugins/nagios-plugins" "tarball"
|
||||
|
||||
msg_info "Building Nagios Plugins"
|
||||
cd /opt/nagios-plugins
|
||||
$STD ./tools/setup
|
||||
$STD ./configure
|
||||
$STD make
|
||||
$STD make install
|
||||
msg_ok "Built Nagios Plugins"
|
||||
|
||||
msg_info "Configuring Web Authentication"
|
||||
$STD htpasswd -bc /usr/local/nagios/etc/htpasswd.users nagiosadmin nagiosadmin
|
||||
chown root:www-data /usr/local/nagios/etc/htpasswd.users
|
||||
chmod 640 /usr/local/nagios/etc/htpasswd.users
|
||||
msg_ok "Configured Web Authentication"
|
||||
|
||||
msg_info "Starting Services"
|
||||
systemctl enable -q apache2
|
||||
systemctl restart apache2
|
||||
systemctl enable -q --now nagios
|
||||
msg_ok "Started Services"
|
||||
|
||||
motd_ssh
|
||||
customize
|
||||
cleanup_lxc
|
||||
109
misc/build.func
109
misc/build.func
@@ -1062,7 +1062,6 @@ load_vars_file() {
|
||||
var_gateway var_hostname var_ipv6_method var_mac var_mknod var_mount_fs var_mtu
|
||||
var_net var_nesting var_ns var_os var_protection var_pw var_ram var_tags var_timezone var_tun var_unprivileged
|
||||
var_verbose var_version var_vlan var_ssh var_ssh_authorized_key var_container_storage var_template_storage var_searchdomain
|
||||
var_post_install
|
||||
)
|
||||
|
||||
# Whitelist check helper
|
||||
@@ -1280,7 +1279,6 @@ default_var_settings() {
|
||||
var_gateway var_hostname var_ipv6_method var_mac var_mknod var_mount_fs var_mtu
|
||||
var_net var_nesting var_ns var_os var_protection var_pw var_ram var_tags var_timezone var_tun var_unprivileged
|
||||
var_verbose var_version var_vlan var_ssh var_ssh_authorized_key var_container_storage var_template_storage
|
||||
var_post_install
|
||||
)
|
||||
|
||||
# Snapshot: environment variables (highest precedence)
|
||||
@@ -1376,11 +1374,6 @@ var_verbose=no
|
||||
# GitHub Personal Access Token (optional – avoids API rate limits during installs)
|
||||
# Create at https://github.com/settings/tokens – read-only public access is sufficient
|
||||
# var_github_token=ghp_your_token_here
|
||||
|
||||
# Optional post-install script (host-side path to a *.sh on the Proxmox host)
|
||||
# Runs ON THE HOST after the container is fully provisioned.
|
||||
# Available env vars: APP, NSAPP, CTID, IP, HN, STORAGE, BRG
|
||||
# var_post_install=/opt/post-install/myhook.sh
|
||||
EOF
|
||||
|
||||
# Now choose storages (always prompt unless just one exists)
|
||||
@@ -1459,7 +1452,6 @@ if ! declare -p VAR_WHITELIST >/dev/null 2>&1; then
|
||||
var_gateway var_hostname var_ipv6_method var_mac var_mknod var_mount_fs var_mtu
|
||||
var_net var_nesting var_ns var_os var_protection var_pw var_ram var_tags var_timezone var_tun var_unprivileged
|
||||
var_verbose var_version var_vlan var_ssh var_ssh_authorized_key var_container_storage var_template_storage var_searchdomain
|
||||
var_post_install
|
||||
)
|
||||
fi
|
||||
|
||||
@@ -1672,7 +1664,6 @@ _build_current_app_vars_tmp() {
|
||||
|
||||
[ -n "$_tpl_storage" ] && echo "var_template_storage=$(_sanitize_value "$_tpl_storage")"
|
||||
[ -n "$_ct_storage" ] && echo "var_container_storage=$(_sanitize_value "$_ct_storage")"
|
||||
[ -n "${var_post_install:-}" ] && echo "var_post_install=$(_sanitize_value "${var_post_install}")"
|
||||
} >"$tmpf"
|
||||
|
||||
echo "$tmpf"
|
||||
@@ -1817,7 +1808,7 @@ advanced_settings() {
|
||||
TAGS="community-script${var_tags:+;${var_tags}}"
|
||||
fi
|
||||
local STEP=1
|
||||
local MAX_STEP=29
|
||||
local MAX_STEP=28
|
||||
|
||||
# Store values for back navigation - inherit from var_* app defaults
|
||||
local _ct_type="${var_unprivileged:-1}"
|
||||
@@ -1851,7 +1842,6 @@ advanced_settings() {
|
||||
local _enable_mknod="${var_mknod:-0}"
|
||||
local _mount_fs="${var_mount_fs:-}"
|
||||
local _protect_ct="${var_protection:-no}"
|
||||
local _post_install="${var_post_install:-}"
|
||||
|
||||
# Detect host timezone for default (if not set via var_timezone)
|
||||
local _host_timezone=""
|
||||
@@ -2709,61 +2699,9 @@ advanced_settings() {
|
||||
;;
|
||||
|
||||
# ═══════════════════════════════════════════════════════════════════════════
|
||||
# STEP 28: Optional host-side post-install hook (path on the Proxmox HOST)
|
||||
# STEP 28: Verbose Mode & Confirmation
|
||||
# ═══════════════════════════════════════════════════════════════════════════
|
||||
28)
|
||||
local _hook_prompt="Optional: absolute path to a *.sh file ON THE PROXMOX HOST.
|
||||
|
||||
It runs as root on the HOST (NOT in the LXC) after the container
|
||||
is fully provisioned and started.
|
||||
|
||||
Available env vars: APP, NSAPP, CTID, IP, HN, STORAGE, BRG.
|
||||
|
||||
Leave empty to skip."
|
||||
while true; do
|
||||
if result=$(whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \
|
||||
--title "POST-INSTALL HOOK (HOST)" \
|
||||
--ok-button "Next" --cancel-button "Back" \
|
||||
--inputbox "$_hook_prompt" 16 70 "${_post_install}" \
|
||||
3>&1 1>&2 2>&3); then
|
||||
# Normalize: strip surrounding whitespace
|
||||
result="$(printf '%s' "$result" | sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//')"
|
||||
if [[ -z "$result" ]]; then
|
||||
_post_install=""
|
||||
((STEP++))
|
||||
break
|
||||
fi
|
||||
# Reject obvious shell-meta sneaking through
|
||||
if [[ "$result" == *';'* || "$result" == *'$('* || "$result" == *'`'* || "$result" == *'&&'* || "$result" == *'||'* ]]; then
|
||||
whiptail --backtitle "Proxmox VE Helper Scripts" --title "INVALID PATH" \
|
||||
--msgbox "Path contains shell metacharacters. Please provide a plain absolute file path." 10 70
|
||||
continue
|
||||
fi
|
||||
if [[ "$result" != /* ]]; then
|
||||
whiptail --backtitle "Proxmox VE Helper Scripts" --title "INVALID PATH" \
|
||||
--msgbox "Path must be absolute (start with /).\n\nGot: $result" 10 70
|
||||
continue
|
||||
fi
|
||||
if [[ ! -f "$result" ]]; then
|
||||
if ! whiptail --backtitle "Proxmox VE Helper Scripts" --title "FILE NOT FOUND" \
|
||||
--yesno "File does not exist on host:\n\n$result\n\nKeep this path anyway?" 12 70; then
|
||||
continue
|
||||
fi
|
||||
fi
|
||||
_post_install="$result"
|
||||
((STEP++))
|
||||
break
|
||||
else
|
||||
((STEP--))
|
||||
break
|
||||
fi
|
||||
done
|
||||
;;
|
||||
|
||||
# ═══════════════════════════════════════════════════════════════════════════
|
||||
# STEP 29: Verbose Mode & Confirmation
|
||||
# ═══════════════════════════════════════════════════════════════════════════
|
||||
29)
|
||||
local verbose_default_flag="--defaultno"
|
||||
[[ "$_verbose" == "yes" ]] && verbose_default_flag=""
|
||||
|
||||
@@ -2792,11 +2730,6 @@ Leave empty to skip."
|
||||
local apt_display="${_apt_cacher:-no}"
|
||||
[[ "$_apt_cacher" == "yes" && -n "$_apt_cacher_ip" ]] && apt_display="$_apt_cacher_ip"
|
||||
|
||||
local post_install_display="${_post_install:-(none)}"
|
||||
local post_install_warn=""
|
||||
[[ -n "$_post_install" ]] && post_install_warn="
|
||||
⚠ Hook runs as root on Proxmox HOST (not in LXC)"
|
||||
|
||||
local summary="Container Type: $ct_type_desc
|
||||
Container ID: $_ct_id
|
||||
Hostname: $_hostname
|
||||
@@ -2820,8 +2753,7 @@ Features:
|
||||
Advanced:
|
||||
Timezone: $tz_display
|
||||
APT Cacher: $apt_display
|
||||
Verbose: $_verbose
|
||||
Post-Install Script: ${post_install_display}${post_install_warn}"
|
||||
Verbose: $_verbose"
|
||||
|
||||
if whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \
|
||||
--title "CONFIRM SETTINGS" \
|
||||
@@ -2864,7 +2796,6 @@ Advanced:
|
||||
APT_CACHER="$_apt_cacher"
|
||||
APT_CACHER_IP="$_apt_cacher_ip"
|
||||
VERBOSE="$_verbose"
|
||||
var_post_install="$_post_install"
|
||||
|
||||
# Update var_* based on user choice (for functions that check these)
|
||||
var_gpu="$_enable_gpu"
|
||||
@@ -6374,40 +6305,6 @@ EOF
|
||||
systemctl start ping-instances.service
|
||||
fi
|
||||
|
||||
# Optional host-side post-install hook
|
||||
# Path comes from var_post_install (default.vars / app.vars / advanced settings).
|
||||
# Runs ON THE PROXMOX HOST after the container is up and configured.
|
||||
# Exposed env vars: APP, NSAPP, CTID, IP, HN, STORAGE, BRG.
|
||||
# Output (stdout/stderr) is captured to /var/log/community-scripts/post-install-<CTID>.log
|
||||
if [[ -n "${var_post_install:-}" ]]; then
|
||||
local _hook_log_dir="/var/log/community-scripts"
|
||||
local _hook_log="${_hook_log_dir}/post-install-${CTID}.log"
|
||||
mkdir -p "$_hook_log_dir" 2>/dev/null || true
|
||||
|
||||
if [[ ! -f "${var_post_install}" ]]; then
|
||||
msg_error "Post-install hook not found on host: ${var_post_install}"
|
||||
whiptail --backtitle "Proxmox VE Helper Scripts" \
|
||||
--title "POST-INSTALL HOOK FAILED" \
|
||||
--msgbox "The configured post-install hook was not found on the Proxmox host:\n\n${var_post_install}\n\nThe LXC was created successfully, but the hook did NOT run." 14 72 || true
|
||||
else
|
||||
msg_info "Running post-install hook: ${var_post_install}"
|
||||
local _hook_rc=0
|
||||
APP="$APP" NSAPP="${NSAPP:-}" CTID="$CTID" IP="$IP" HN="${HN:-}" \
|
||||
STORAGE="${STORAGE:-}" BRG="${BRG:-}" \
|
||||
bash "${var_post_install}" >"${_hook_log}" 2>&1 || _hook_rc=$?
|
||||
if [[ $_hook_rc -eq 0 ]]; then
|
||||
msg_ok "Post-install hook completed (log: ${_hook_log})"
|
||||
else
|
||||
msg_error "Post-install hook failed (rc=${_hook_rc}) – see ${_hook_log}"
|
||||
local _hook_tail=""
|
||||
_hook_tail="$(tail -n 15 "${_hook_log}" 2>/dev/null || true)"
|
||||
whiptail --backtitle "Proxmox VE Helper Scripts" \
|
||||
--title "POST-INSTALL HOOK FAILED" \
|
||||
--msgbox "Hook exited with code ${_hook_rc}.\n\nScript: ${var_post_install}\nLog: ${_hook_log}\n\n--- Last log lines ---\n${_hook_tail}\n\nThe LXC itself was created successfully." 22 78 || true
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
INSTALL_COMPLETE=true
|
||||
post_update_to_api "done" "none"
|
||||
}
|
||||
|
||||
651
misc/tools.func
651
misc/tools.func
@@ -8665,3 +8665,654 @@ EOF
|
||||
$STD apt update
|
||||
return 0
|
||||
}
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Get latest GitLab release version.
|
||||
# Usage: get_latest_gitlab_release "owner/repo" [strip_v]
|
||||
# ------------------------------------------------------------------------------
|
||||
get_latest_gitlab_release() {
|
||||
local repo="$1"
|
||||
local strip_v="${2:-true}"
|
||||
|
||||
local repo_encoded
|
||||
repo_encoded=$(printf '%s' "$repo" | sed 's|/|%2F|g')
|
||||
|
||||
local header=()
|
||||
[[ -n "${GITLAB_TOKEN:-}" ]] && header=(-H "PRIVATE-TOKEN: $GITLAB_TOKEN")
|
||||
|
||||
local temp_file
|
||||
temp_file=$(mktemp)
|
||||
|
||||
local http_code
|
||||
http_code=$(curl --connect-timeout 10 --max-time 30 -sSL \
|
||||
-w "%{http_code}" -o "$temp_file" \
|
||||
"${header[@]}" \
|
||||
"https://gitlab.com/api/v4/projects/$repo_encoded/releases?per_page=1&order_by=released_at&sort=desc" 2>/dev/null) || true
|
||||
|
||||
if [[ "$http_code" != "200" ]]; then
|
||||
rm -f "$temp_file"
|
||||
msg_warn "GitLab API call failed for ${repo} (HTTP ${http_code})"
|
||||
return 22
|
||||
fi
|
||||
|
||||
local version
|
||||
version=$(jq -r '.[0].tag_name // empty' "$temp_file")
|
||||
rm -f "$temp_file"
|
||||
|
||||
if [[ -z "$version" ]]; then
|
||||
msg_error "Could not determine latest version for ${repo}"
|
||||
return 250
|
||||
fi
|
||||
|
||||
if [[ "$strip_v" == "true" ]]; then
|
||||
[[ "$version" =~ ^v[0-9] ]] && version="${version:1}"
|
||||
fi
|
||||
|
||||
echo "$version"
|
||||
}
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Checks for new GitLab release (latest tag).
|
||||
#
|
||||
# Description:
|
||||
# - Queries the GitLab API for the latest release tag
|
||||
# - Compares it to a local cached version (~/.<app>)
|
||||
# - If newer, sets global CHECK_UPDATE_RELEASE and returns 0
|
||||
#
|
||||
# Usage:
|
||||
# if check_for_gl_release "myapp" "owner/repo" [optional] "v1.2.3"; then
|
||||
# # trigger update...
|
||||
# fi
|
||||
# exit 0
|
||||
# } (end of update_script not from the function)
|
||||
#
|
||||
# Notes:
|
||||
# - Requires `jq` (auto-installed if missing)
|
||||
# - Supports GITLAB_TOKEN env var for private/rate-limited repos
|
||||
# - Does not modify anything, only checks version state
|
||||
# ------------------------------------------------------------------------------
|
||||
check_for_gl_release() {
|
||||
local app="$1"
|
||||
local source="$2"
|
||||
local pinned_version_in="${3:-}" # optional
|
||||
local pin_reason="${4:-}" # optional reason shown to user
|
||||
local app_lc="${app,,}"
|
||||
local current_file="$HOME/.${app_lc}"
|
||||
|
||||
msg_info "Checking for update: ${app}"
|
||||
|
||||
# DNS check
|
||||
if ! getent hosts gitlab.com >/dev/null 2>&1; then
|
||||
msg_error "Network error: cannot resolve gitlab.com"
|
||||
return 6
|
||||
fi
|
||||
|
||||
ensure_dependencies jq
|
||||
|
||||
local repo_encoded
|
||||
repo_encoded=$(printf '%s' "$repo" | sed 's|/|%2F|g')
|
||||
echo "$source" | sed 's|/|%2F|g')
|
||||
|
||||
local header=()
|
||||
[[ -n "${GITLAB_TOKEN:-}" ]] && header=(-H "PRIVATE-TOKEN: $GITLAB_TOKEN")
|
||||
|
||||
local releases_json="" http_code=""
|
||||
|
||||
# For pinned versions, try to fetch the specific release tag first
|
||||
if [[ -n "$pinned_version_in" ]]; then
|
||||
local pinned_encoded="${pinned_version_in//\//%2F}"
|
||||
http_code=$(curl -sSL --max-time 20 -w "%{http_code}" -o /tmp/gl_check.json \
|
||||
"${header[@]}" \
|
||||
"https://gitlab.com/api/v4/projects/$repo_encoded/releases/$pinned_encoded" 2>/dev/null) || true
|
||||
if [[ "$http_code" == "200" ]] && [[ -s /tmp/gl_check.json ]]; then
|
||||
releases_json="[$(</tmp/gl_check.json)]"
|
||||
fi
|
||||
rm -f /tmp/gl_check.json
|
||||
fi
|
||||
|
||||
# Fetch full releases list if needed
|
||||
if [[ -z "$releases_json" ]]; then
|
||||
http_code=$(curl -sSL --max-time 20 -w "%{http_code}" -o /tmp/gl_check.json \
|
||||
"${header[@]}" \
|
||||
"https://gitlab.com/api/v4/projects/$repo_encoded/releases?per_page=100&order_by=released_at&sort=desc" 2>/dev/null) || true
|
||||
|
||||
if [[ "$http_code" == "200" ]] && [[ -s /tmp/gl_check.json ]]; then
|
||||
releases_json=$(</tmp/gl_check.json)
|
||||
elif [[ "$http_code" == "401" ]]; then
|
||||
msg_error "GitLab API authentication failed (HTTP 401)."
|
||||
if [[ -n "${GITLAB_TOKEN:-}" ]]; then
|
||||
msg_error "Your GITLAB_TOKEN appears to be invalid or expired."
|
||||
else
|
||||
msg_error "The repository may require authentication. Try: export GITLAB_TOKEN=\"glpat-your_token\""
|
||||
fi
|
||||
rm -f /tmp/gl_check.json
|
||||
return 22
|
||||
elif [[ "$http_code" == "404" ]]; then
|
||||
msg_error "GitLab project not found (HTTP 404). Ensure '${source}' is correct and publicly accessible."
|
||||
rm -f /tmp/gl_check.json
|
||||
return 22
|
||||
elif [[ "$http_code" == "429" ]]; then
|
||||
msg_error "GitLab API rate limit exceeded (HTTP 429)."
|
||||
msg_error "To increase the limit, export a GitLab token: export GITLAB_TOKEN=\"glpat-your_token_here\""
|
||||
rm -f /tmp/gl_check.json
|
||||
return 22
|
||||
elif [[ "$http_code" == "000" || -z "$http_code" ]]; then
|
||||
msg_error "GitLab API connection failed (no response)."
|
||||
msg_error "Check your network/DNS: curl -sSL https://gitlab.com/api/v4/version"
|
||||
rm -f /tmp/gl_check.json
|
||||
return 7
|
||||
else
|
||||
msg_error "Unable to fetch releases for ${app} (HTTP ${http_code})"
|
||||
rm -f /tmp/gl_check.json
|
||||
return 22
|
||||
fi
|
||||
rm -f /tmp/gl_check.json
|
||||
fi
|
||||
|
||||
mapfile -t raw_tags < <(jq -r '.[] | .tag_name' <<<"$releases_json")
|
||||
if ((${#raw_tags[@]} == 0)); then
|
||||
msg_error "No releases found for ${app} on GitLab"
|
||||
return 250
|
||||
fi
|
||||
|
||||
local clean_tags=()
|
||||
for t in "${raw_tags[@]}"; do
|
||||
# Only strip leading 'v' when followed by a digit (e.g. v1.2.3)
|
||||
if [[ "$t" =~ ^v[0-9] ]]; then
|
||||
clean_tags+=("${t:1}")
|
||||
else
|
||||
clean_tags+=("$t")
|
||||
fi
|
||||
done
|
||||
|
||||
local latest_raw="${raw_tags[0]}"
|
||||
local latest_clean="${clean_tags[0]}"
|
||||
|
||||
# current installed (stored without v)
|
||||
local current=""
|
||||
if [[ -f "$current_file" ]]; then
|
||||
current="$(<"$current_file")"
|
||||
else
|
||||
# Migration: search for any /opt/*_version.txt
|
||||
local legacy_files
|
||||
mapfile -t legacy_files < <(find /opt -maxdepth 1 -type f -name "*_version.txt" 2>/dev/null)
|
||||
if ((${#legacy_files[@]} == 1)); then
|
||||
current="$(<"${legacy_files[0]}")"
|
||||
echo "${current#v}" >"$current_file"
|
||||
rm -f "${legacy_files[0]}"
|
||||
fi
|
||||
fi
|
||||
if [[ "$current" =~ ^v[0-9] ]]; then
|
||||
current="${current:1}"
|
||||
fi
|
||||
|
||||
# Pinned version handling
|
||||
if [[ -n "$pinned_version_in" ]]; then
|
||||
local pin_clean
|
||||
if [[ "$pinned_version_in" =~ ^v[0-9] ]]; then
|
||||
pin_clean="${pinned_version_in:1}"
|
||||
else
|
||||
pin_clean="$pinned_version_in"
|
||||
fi
|
||||
local match_raw=""
|
||||
for i in "${!clean_tags[@]}"; do
|
||||
if [[ "${clean_tags[$i]}" == "$pin_clean" ]]; then
|
||||
match_raw="${raw_tags[$i]}"
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
if [[ -z "$match_raw" ]]; then
|
||||
msg_error "Pinned version ${pinned_version_in} not found upstream"
|
||||
return 250
|
||||
fi
|
||||
|
||||
if [[ "$current" != "$pin_clean" ]]; then
|
||||
CHECK_UPDATE_RELEASE="$match_raw"
|
||||
msg_ok "Update available: ${app} ${current:-not installed} → ${pin_clean}"
|
||||
return 0
|
||||
fi
|
||||
|
||||
if [[ -n "$pin_reason" ]]; then
|
||||
msg_ok "No update available: ${app} (${current}) - update held back: ${pin_reason}"
|
||||
else
|
||||
msg_ok "No update available: ${app} (${current}) - update temporarily held back due to issues with newer releases"
|
||||
fi
|
||||
return 1
|
||||
fi
|
||||
|
||||
# No pinning → use latest
|
||||
if [[ -z "$current" || "$current" != "$latest_clean" ]]; then
|
||||
CHECK_UPDATE_RELEASE="$latest_raw"
|
||||
msg_ok "Update available: ${app} ${current:-not installed} → ${latest_clean}"
|
||||
return 0
|
||||
fi
|
||||
|
||||
msg_ok "No update available: ${app} (${latest_clean})"
|
||||
return 1
|
||||
}
|
||||
|
||||
function fetch_and_deploy_gl_release() {
|
||||
local app="$1"
|
||||
local repo="$2"
|
||||
local mode="${3:-tarball}"
|
||||
local version="${var_appversion:-${4:-latest}}"
|
||||
local target="${5:-/opt/$app}"
|
||||
local asset_pattern="${6:-}"
|
||||
|
||||
if [[ -z "$app" ]]; then
|
||||
app="${repo##*/}"
|
||||
if [[ -z "$app" ]]; then
|
||||
msg_error "fetch_and_deploy_gl_release requires app name or valid repo"
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
|
||||
local app_lc=$(echo "${app,,}" | tr -d ' ')
|
||||
local version_file="$HOME/.${app_lc}"
|
||||
|
||||
local api_timeout="--connect-timeout 10 --max-time 60"
|
||||
local download_timeout="--connect-timeout 15 --max-time 900"
|
||||
|
||||
local current_version=""
|
||||
[[ -f "$version_file" ]] && current_version=$(<"$version_file")
|
||||
|
||||
ensure_dependencies jq
|
||||
|
||||
local repo_encoded
|
||||
repo_encoded=$(printf '%s' "$repo" | sed 's|/|%2F|g')
|
||||
echo "$repo" | sed 's|/|%2F|g')
|
||||
|
||||
local api_base="https://gitlab.com/api/v4/projects/$repo_encoded/releases"
|
||||
local api_url
|
||||
if [[ "$version" != "latest" ]]; then
|
||||
api_url="$api_base/$version"
|
||||
else
|
||||
api_url="$api_base?per_page=1&order_by=released_at&sort=desc"
|
||||
fi
|
||||
|
||||
local header=()
|
||||
[[ -n "${GITLAB_TOKEN:-}" ]] && header=(-H "PRIVATE-TOKEN: $GITLAB_TOKEN")
|
||||
|
||||
local max_retries=3 retry_delay=2 attempt=1 success=false http_code
|
||||
|
||||
while ((attempt <= max_retries)); do
|
||||
http_code=$(curl $api_timeout -sSL -w "%{http_code}" -o /tmp/gl_rel.json "${header[@]}" "$api_url" 2>/dev/null) || true
|
||||
if [[ "$http_code" == "200" ]]; then
|
||||
success=true
|
||||
break
|
||||
elif [[ "$http_code" == "429" ]]; then
|
||||
if ((attempt < max_retries)); then
|
||||
msg_warn "GitLab API rate limit hit, retrying in ${retry_delay}s... (attempt $attempt/$max_retries)"
|
||||
sleep "$retry_delay"
|
||||
retry_delay=$((retry_delay * 2))
|
||||
fi
|
||||
else
|
||||
sleep "$retry_delay"
|
||||
fi
|
||||
((attempt++))
|
||||
done
|
||||
|
||||
if ! $success; then
|
||||
if [[ "$http_code" == "401" ]]; then
|
||||
msg_error "GitLab API authentication failed (HTTP 401)."
|
||||
if [[ -n "${GITLAB_TOKEN:-}" ]]; then
|
||||
msg_error "Your GITLAB_TOKEN appears to be invalid or expired."
|
||||
else
|
||||
msg_error "The repository may require authentication. Try: export GITLAB_TOKEN=\"glpat-your_token\""
|
||||
fi
|
||||
elif [[ "$http_code" == "404" ]]; then
|
||||
msg_error "GitLab project or release not found (HTTP 404)."
|
||||
msg_error "Ensure '$repo' is correct and the project is accessible."
|
||||
elif [[ "$http_code" == "429" ]]; then
|
||||
msg_error "GitLab API rate limit exceeded (HTTP 429)."
|
||||
msg_error "To increase the limit, export a GitLab token before running the script:"
|
||||
msg_error " export GITLAB_TOKEN=\"glpat-your_token_here\""
|
||||
elif [[ "$http_code" == "000" || -z "$http_code" ]]; then
|
||||
msg_error "GitLab API connection failed (no response)."
|
||||
msg_error "Check your network/DNS: curl -sSL https://gitlab.com/api/v4/version"
|
||||
else
|
||||
msg_error "Failed to fetch release metadata (HTTP $http_code)"
|
||||
fi
|
||||
return 1
|
||||
fi
|
||||
|
||||
local json tag_name
|
||||
json=$(</tmp/gl_rel.json)
|
||||
|
||||
if [[ "$version" == "latest" ]]; then
|
||||
json=$(echo "$json" | jq '.[0] // empty')
|
||||
if [[ -z "$json" || "$json" == "null" ]]; then
|
||||
msg_error "No releases found for $repo on GitLab"
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
|
||||
tag_name=$(echo "$json" | jq -r '.tag_name // empty')
|
||||
if [[ -z "$tag_name" ]]; then
|
||||
msg_error "Could not determine tag name from release metadata"
|
||||
return 1
|
||||
fi
|
||||
[[ "$tag_name" =~ ^v[0-9] ]] && version="${tag_name:1}" || version="$tag_name"
|
||||
local version_safe="${version//\//-}"
|
||||
|
||||
if [[ "$current_version" == "$version" ]]; then
|
||||
$STD msg_ok "$app is already up-to-date (v$version)"
|
||||
return 0
|
||||
fi
|
||||
|
||||
local tmpdir
|
||||
tmpdir=$(mktemp -d) || return 1
|
||||
local filename=""
|
||||
|
||||
msg_info "Fetching GitLab release: $app ($version)"
|
||||
|
||||
_gl_asset_urls() {
|
||||
local release_json="$1"
|
||||
echo "$release_json" | jq -r '
|
||||
(.assets.links // [])[] | .direct_asset_url // .url
|
||||
'
|
||||
}
|
||||
|
||||
### Tarball Mode ###
|
||||
if [[ "$mode" == "tarball" || "$mode" == "source" ]]; then
|
||||
local direct_tarball_url="https://gitlab.com/$repo/-/archive/$tag_name/${app_lc}-${version_safe}.tar.gz"
|
||||
filename="${app_lc}-${version_safe}.tar.gz"
|
||||
|
||||
curl $download_timeout -fsSL "${header[@]}" -o "$tmpdir/$filename" "$direct_tarball_url" || {
|
||||
msg_error "Download failed: $direct_tarball_url"
|
||||
rm -rf "$tmpdir"
|
||||
return 1
|
||||
}
|
||||
|
||||
mkdir -p "$target"
|
||||
if [[ "${CLEAN_INSTALL:-0}" == "1" ]]; then
|
||||
rm -rf "${target:?}/"*
|
||||
fi
|
||||
|
||||
tar --no-same-owner -xzf "$tmpdir/$filename" -C "$tmpdir" || {
|
||||
msg_error "Failed to extract tarball"
|
||||
rm -rf "$tmpdir"
|
||||
return 1
|
||||
}
|
||||
local unpack_dir
|
||||
unpack_dir=$(find "$tmpdir" -mindepth 1 -maxdepth 1 -type d | head -n1)
|
||||
|
||||
shopt -s dotglob nullglob
|
||||
cp -r "$unpack_dir"/* "$target/"
|
||||
shopt -u dotglob nullglob
|
||||
|
||||
### Binary Mode ###
|
||||
elif [[ "$mode" == "binary" ]]; then
|
||||
local arch
|
||||
arch=$(dpkg --print-architecture 2>/dev/null || uname -m)
|
||||
[[ "$arch" == "x86_64" ]] && arch="amd64"
|
||||
[[ "$arch" == "aarch64" ]] && arch="arm64"
|
||||
|
||||
local assets url_match=""
|
||||
assets=$(_gl_asset_urls "$json")
|
||||
|
||||
if [[ -n "$asset_pattern" ]]; then
|
||||
for u in $assets; do
|
||||
case "${u##*/}" in
|
||||
$asset_pattern)
|
||||
url_match="$u"
|
||||
break
|
||||
;;
|
||||
esac
|
||||
done
|
||||
fi
|
||||
|
||||
if [[ -z "$url_match" ]]; then
|
||||
for u in $assets; do
|
||||
if [[ "$u" =~ ($arch|amd64|x86_64|aarch64|arm64).*\.deb$ ]]; then
|
||||
url_match="$u"
|
||||
break
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
if [[ -z "$url_match" ]]; then
|
||||
for u in $assets; do
|
||||
[[ "$u" =~ \.deb$ ]] && url_match="$u" && break
|
||||
done
|
||||
fi
|
||||
|
||||
if [[ -z "$url_match" ]]; then
|
||||
local fallback_json
|
||||
if fallback_json=$(_gl_scan_older_releases "$repo" "$repo_encoded" "https://gitlab.com" "binary" "$asset_pattern" "$tag_name"); then
|
||||
json="$fallback_json"
|
||||
tag_name=$(echo "$json" | jq -r '.tag_name // empty')
|
||||
[[ "$tag_name" =~ ^v[0-9] ]] && version="${tag_name:1}" || version="$tag_name"
|
||||
msg_info "Fetching GitLab release: $app ($version)"
|
||||
assets=$(_gl_asset_urls "$json")
|
||||
if [[ -n "$asset_pattern" ]]; then
|
||||
for u in $assets; do
|
||||
case "${u##*/}" in $asset_pattern)
|
||||
url_match="$u"
|
||||
break
|
||||
;;
|
||||
esac
|
||||
done
|
||||
fi
|
||||
if [[ -z "$url_match" ]]; then
|
||||
for u in $assets; do
|
||||
[[ "$u" =~ ($arch|amd64|x86_64|aarch64|arm64).*\.deb$ ]] && url_match="$u" && break
|
||||
done
|
||||
fi
|
||||
if [[ -z "$url_match" ]]; then
|
||||
for u in $assets; do
|
||||
[[ "$u" =~ \.deb$ ]] && url_match="$u" && break
|
||||
done
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ -z "$url_match" ]]; then
|
||||
msg_error "No suitable .deb asset found for $app"
|
||||
rm -rf "$tmpdir"
|
||||
return 1
|
||||
fi
|
||||
|
||||
filename="${url_match##*/}"
|
||||
curl $download_timeout -fsSL "${header[@]}" -o "$tmpdir/$filename" "$url_match" || {
|
||||
msg_error "Download failed: $url_match"
|
||||
rm -rf "$tmpdir"
|
||||
return 1
|
||||
}
|
||||
|
||||
chmod 644 "$tmpdir/$filename"
|
||||
local dpkg_opts=""
|
||||
[[ "${DPKG_FORCE_CONFOLD:-}" == "1" ]] && dpkg_opts="-o Dpkg::Options::=--force-confold"
|
||||
[[ "${DPKG_FORCE_CONFNEW:-}" == "1" ]] && dpkg_opts="-o Dpkg::Options::=--force-confnew"
|
||||
DEBIAN_FRONTEND=noninteractive SYSTEMD_OFFLINE=1 $STD apt install -y $dpkg_opts "$tmpdir/$filename" || {
|
||||
SYSTEMD_OFFLINE=1 $STD dpkg -i "$tmpdir/$filename" || {
|
||||
msg_error "Both apt and dpkg installation failed"
|
||||
rm -rf "$tmpdir"
|
||||
return 1
|
||||
}
|
||||
}
|
||||
|
||||
### Prebuild Mode ###
|
||||
elif [[ "$mode" == "prebuild" ]]; then
|
||||
local pattern="${6%\"}"
|
||||
pattern="${pattern#\"}"
|
||||
[[ -z "$pattern" ]] && {
|
||||
msg_error "Mode 'prebuild' requires 6th parameter (asset filename pattern)"
|
||||
rm -rf "$tmpdir"
|
||||
return 1
|
||||
}
|
||||
|
||||
local asset_url=""
|
||||
for u in $(_gl_asset_urls "$json"); do
|
||||
filename_candidate="${u##*/}"
|
||||
case "$filename_candidate" in
|
||||
$pattern)
|
||||
asset_url="$u"
|
||||
break
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
if [[ -z "$asset_url" ]]; then
|
||||
local fallback_json
|
||||
if fallback_json=$(_gl_scan_older_releases "$repo" "$repo_encoded" "https://gitlab.com" "prebuild" "$pattern" "$tag_name"); then
|
||||
json="$fallback_json"
|
||||
tag_name=$(echo "$json" | jq -r '.tag_name // empty')
|
||||
[[ "$tag_name" =~ ^v[0-9] ]] && version="${tag_name:1}" || version="$tag_name"
|
||||
msg_info "Fetching GitLab release: $app ($version)"
|
||||
for u in $(_gl_asset_urls "$json"); do
|
||||
filename_candidate="${u##*/}"
|
||||
case "$filename_candidate" in $pattern)
|
||||
asset_url="$u"
|
||||
break
|
||||
;;
|
||||
esac
|
||||
done
|
||||
fi
|
||||
fi
|
||||
|
||||
[[ -z "$asset_url" ]] && {
|
||||
msg_error "No asset matching '$pattern' found"
|
||||
rm -rf "$tmpdir"
|
||||
return 1
|
||||
}
|
||||
|
||||
filename="${asset_url##*/}"
|
||||
curl $download_timeout -fsSL "${header[@]}" -o "$tmpdir/$filename" "$asset_url" || {
|
||||
msg_error "Download failed: $asset_url"
|
||||
rm -rf "$tmpdir"
|
||||
return 1
|
||||
}
|
||||
|
||||
local unpack_tmp
|
||||
unpack_tmp=$(mktemp -d)
|
||||
mkdir -p "$target"
|
||||
if [[ "${CLEAN_INSTALL:-0}" == "1" ]]; then
|
||||
rm -rf "${target:?}/"*
|
||||
fi
|
||||
|
||||
if [[ "$filename" == *.zip ]]; then
|
||||
ensure_dependencies unzip
|
||||
unzip -q "$tmpdir/$filename" -d "$unpack_tmp" || {
|
||||
msg_error "Failed to extract ZIP archive"
|
||||
rm -rf "$tmpdir" "$unpack_tmp"
|
||||
return 1
|
||||
}
|
||||
elif [[ "$filename" == *.tar.* || "$filename" == *.tgz || "$filename" == *.txz ]]; then
|
||||
tar --no-same-owner -xf "$tmpdir/$filename" -C "$unpack_tmp" || {
|
||||
msg_error "Failed to extract TAR archive"
|
||||
rm -rf "$tmpdir" "$unpack_tmp"
|
||||
return 1
|
||||
}
|
||||
else
|
||||
msg_error "Unsupported archive format: $filename"
|
||||
rm -rf "$tmpdir" "$unpack_tmp"
|
||||
return 1
|
||||
fi
|
||||
|
||||
local top_entries inner_dir
|
||||
top_entries=$(find "$unpack_tmp" -mindepth 1 -maxdepth 1)
|
||||
if [[ "$(echo "$top_entries" | wc -l)" -eq 1 && -d "$top_entries" ]]; then
|
||||
inner_dir="$top_entries"
|
||||
shopt -s dotglob nullglob
|
||||
if compgen -G "$inner_dir/*" >/dev/null; then
|
||||
cp -r "$inner_dir"/* "$target/" || {
|
||||
msg_error "Failed to copy contents from $inner_dir to $target"
|
||||
rm -rf "$tmpdir" "$unpack_tmp"
|
||||
return 1
|
||||
}
|
||||
else
|
||||
msg_error "Inner directory is empty: $inner_dir"
|
||||
rm -rf "$tmpdir" "$unpack_tmp"
|
||||
return 1
|
||||
fi
|
||||
shopt -u dotglob nullglob
|
||||
else
|
||||
shopt -s dotglob nullglob
|
||||
if compgen -G "$unpack_tmp/*" >/dev/null; then
|
||||
cp -r "$unpack_tmp"/* "$target/" || {
|
||||
msg_error "Failed to copy contents to $target"
|
||||
rm -rf "$tmpdir" "$unpack_tmp"
|
||||
return 1
|
||||
}
|
||||
else
|
||||
msg_error "Unpacked archive is empty"
|
||||
rm -rf "$tmpdir" "$unpack_tmp"
|
||||
return 1
|
||||
fi
|
||||
shopt -u dotglob nullglob
|
||||
fi
|
||||
|
||||
### Singlefile Mode ###
|
||||
elif [[ "$mode" == "singlefile" ]]; then
|
||||
local pattern="${6%\"}"
|
||||
pattern="${pattern#\"}"
|
||||
[[ -z "$pattern" ]] && {
|
||||
msg_error "Mode 'singlefile' requires 6th parameter (asset filename pattern)"
|
||||
rm -rf "$tmpdir"
|
||||
return 1
|
||||
}
|
||||
|
||||
local asset_url=""
|
||||
for u in $(_gl_asset_urls "$json"); do
|
||||
filename_candidate="${u##*/}"
|
||||
case "$filename_candidate" in
|
||||
$pattern)
|
||||
asset_url="$u"
|
||||
break
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
if [[ -z "$asset_url" ]]; then
|
||||
local fallback_json
|
||||
if fallback_json=$(_gl_scan_older_releases "$repo" "$repo_encoded" "https://gitlab.com" "singlefile" "$pattern" "$tag_name"); then
|
||||
json="$fallback_json"
|
||||
tag_name=$(echo "$json" | jq -r '.tag_name // empty')
|
||||
[[ "$tag_name" =~ ^v[0-9] ]] && version="${tag_name:1}" || version="$tag_name"
|
||||
msg_info "Fetching GitLab release: $app ($version)"
|
||||
for u in $(_gl_asset_urls "$json"); do
|
||||
filename_candidate="${u##*/}"
|
||||
case "$filename_candidate" in $pattern)
|
||||
asset_url="$u"
|
||||
break
|
||||
;;
|
||||
esac
|
||||
done
|
||||
fi
|
||||
fi
|
||||
|
||||
[[ -z "$asset_url" ]] && {
|
||||
msg_error "No asset matching '$pattern' found"
|
||||
rm -rf "$tmpdir"
|
||||
return 1
|
||||
}
|
||||
|
||||
filename="${asset_url##*/}"
|
||||
mkdir -p "$target"
|
||||
|
||||
local use_filename="${USE_ORIGINAL_FILENAME:-false}"
|
||||
local target_file="$app"
|
||||
[[ "$use_filename" == "true" ]] && target_file="$filename"
|
||||
|
||||
curl $download_timeout -fsSL "${header[@]}" -o "$target/$target_file" "$asset_url" || {
|
||||
msg_error "Download failed: $asset_url"
|
||||
rm -rf "$tmpdir"
|
||||
return 1
|
||||
}
|
||||
|
||||
if [[ "$target_file" != *.jar && -f "$target/$target_file" ]]; then
|
||||
chmod +x "$target/$target_file"
|
||||
fi
|
||||
|
||||
else
|
||||
msg_error "Unknown mode: $mode"
|
||||
rm -rf "$tmpdir"
|
||||
return 1
|
||||
fi
|
||||
|
||||
echo "$version" >"$version_file"
|
||||
msg_ok "Deployed: $app ($version)"
|
||||
rm -rf "$tmpdir"
|
||||
}
|
||||
|
||||
@@ -151,23 +151,6 @@ function check_proxmox_host() {
|
||||
# ==============================================================================
|
||||
# CHECK / INSTALL DOCKER
|
||||
# ==============================================================================
|
||||
function ensure_openssl() {
|
||||
if command -v openssl &>/dev/null; then
|
||||
return
|
||||
fi
|
||||
msg_info "Installing openssl"
|
||||
if [[ -f /etc/alpine-release ]]; then
|
||||
$STD apk add openssl
|
||||
elif command -v apt-get &>/dev/null; then
|
||||
$STD apt-get update
|
||||
$STD apt-get install -y openssl
|
||||
else
|
||||
msg_error "openssl is required but could not be installed automatically."
|
||||
exit 10
|
||||
fi
|
||||
msg_ok "Installed openssl"
|
||||
}
|
||||
|
||||
function check_or_install_docker() {
|
||||
if command -v docker &>/dev/null; then
|
||||
msg_ok "Docker $(docker --version | cut -d' ' -f3 | tr -d ',') is available"
|
||||
@@ -177,7 +160,6 @@ function check_or_install_docker() {
|
||||
msg_error "Docker Compose plugin is not available. Please install it."
|
||||
exit 10
|
||||
fi
|
||||
ensure_openssl
|
||||
return
|
||||
fi
|
||||
|
||||
@@ -201,8 +183,6 @@ function check_or_install_docker() {
|
||||
$STD sh <(curl -fsSL https://get.docker.com)
|
||||
fi
|
||||
msg_ok "Installed Docker"
|
||||
|
||||
ensure_openssl
|
||||
}
|
||||
|
||||
# ==============================================================================
|
||||
|
||||
@@ -1,436 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
# ============================================================================
|
||||
# Community-Scripts ProxmoxVE — Post-Install Hook: Example Library
|
||||
# ----------------------------------------------------------------------------
|
||||
# This file is NOT meant to be executed as-is.
|
||||
# It is a collection of complete, copy-pasteable example hooks for the
|
||||
# optional `var_post_install` feature in build.func.
|
||||
#
|
||||
# HOW IT WORKS
|
||||
# ------------
|
||||
# In the ct/*.sh CT scripts (or via Advanced Settings → Step 28) you can
|
||||
# point `var_post_install` to an absolute path on the Proxmox HOST, e.g.:
|
||||
#
|
||||
# # in /root/.community-scripts/default.vars
|
||||
# var_post_install=/opt/community-scripts/hooks/notify.sh
|
||||
#
|
||||
# # OR per-app, in app.vars
|
||||
# var_post_install=/opt/community-scripts/hooks/vaultwarden-postprovision.sh
|
||||
#
|
||||
# # OR interactively in the Advanced Settings whiptail (Step 28).
|
||||
#
|
||||
# The hook runs ON THE PROXMOX HOST (NOT inside the LXC) as root,
|
||||
# AFTER the container is fully provisioned, started and the description
|
||||
# is set. stdout/stderr is captured to:
|
||||
#
|
||||
# /var/log/community-scripts/post-install-<CTID>.log
|
||||
#
|
||||
# AVAILABLE ENV VARIABLES
|
||||
# -----------------------
|
||||
# APP - Pretty name (e.g. "Vaultwarden")
|
||||
# NSAPP - Slug / lowercase (e.g. "vaultwarden")
|
||||
# CTID - Numeric container ID (e.g. "103")
|
||||
# IP - IPv4 address of the LXC (e.g. "192.168.1.50")
|
||||
# HN - Hostname (e.g. "vaultwarden")
|
||||
# STORAGE - Storage where the rootfs lives (e.g. "local-lvm")
|
||||
# BRG - Bridge (e.g. "vmbr0")
|
||||
#
|
||||
# GENERAL TIPS
|
||||
# ------------
|
||||
# - Use `set -euo pipefail` so failures actually surface.
|
||||
# - Use `|| true` on best-effort steps you do not want to abort the hook.
|
||||
# - The file just needs to be a valid script. `+x` is optional — it is
|
||||
# invoked via `bash <path>`. Shebang is honored only if you call it
|
||||
# yourself; otherwise the shebang line is purely cosmetic.
|
||||
# - If the hook exits non-zero, the user gets a whiptail popup with the
|
||||
# last 15 log lines. The LXC creation itself is NOT rolled back.
|
||||
# - Keep hooks idempotent — they may be re-run if you recreate a CT.
|
||||
#
|
||||
# HOW TO USE THIS FILE
|
||||
# --------------------
|
||||
# 1. Copy ONE example block (between the BEGIN/END markers) into a new
|
||||
# file on the Proxmox host, e.g. /opt/community-scripts/hooks/notify.sh
|
||||
# 2. chmod +x /opt/community-scripts/hooks/notify.sh (optional)
|
||||
# 3. Set var_post_install in default.vars / app.vars or pick the path
|
||||
# in Advanced Settings.
|
||||
# ============================================================================
|
||||
|
||||
# ============================================================================
|
||||
# ▼▼▼ EXAMPLE 1 — BEGIN ▼▼▼
|
||||
# ----------------------------------------------------------------------------
|
||||
# Name : minimal-logger.sh
|
||||
# Purpose : Append every newly created LXC to a single CSV-ish log.
|
||||
# Difficulty : ⭐ Beginner
|
||||
# Side effects: Writes to /var/log/community-scripts/created-lxcs.log
|
||||
# Use case : You just want a paper trail of "what got created when".
|
||||
# ============================================================================
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
LOG_DIR="/var/log/community-scripts"
|
||||
LOG_FILE="${LOG_DIR}/created-lxcs.log"
|
||||
|
||||
mkdir -p "$LOG_DIR"
|
||||
|
||||
# Header on first use
|
||||
if [[ ! -s "$LOG_FILE" ]]; then
|
||||
echo "timestamp;ctid;app;hostname;ip;bridge;storage" >"$LOG_FILE"
|
||||
fi
|
||||
|
||||
printf '%s;%s;%s;%s;%s;%s;%s\n' \
|
||||
"$(date -Iseconds)" \
|
||||
"${CTID}" \
|
||||
"${APP}" \
|
||||
"${HN}" \
|
||||
"${IP}" \
|
||||
"${BRG}" \
|
||||
"${STORAGE}" \
|
||||
>>"$LOG_FILE"
|
||||
|
||||
echo "Logged ${APP} (CTID=${CTID}) to ${LOG_FILE}"
|
||||
# ▲▲▲ EXAMPLE 1 — END ▲▲▲
|
||||
|
||||
# ============================================================================
|
||||
# ▼▼▼ EXAMPLE 2 — BEGIN ▼▼▼
|
||||
# ----------------------------------------------------------------------------
|
||||
# Name : discord-gotify-notify.sh
|
||||
# Purpose : Send a rich Discord embed AND a Gotify push notification
|
||||
# whenever a new LXC is provisioned.
|
||||
# Difficulty : ⭐⭐ Intermediate
|
||||
# Requires : curl on the host (default), reachable webhook URLs.
|
||||
# Side effects: Outbound HTTPS to Discord + your Gotify server.
|
||||
# ============================================================================
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
# --- CONFIG (edit me) -------------------------------------------------------
|
||||
DISCORD_WEBHOOK="https://discord.com/api/webhooks/XXXXXXXX/YYYYYYYY"
|
||||
GOTIFY_URL="https://gotify.example.com"
|
||||
GOTIFY_TOKEN="AbCdEfGhIjKlMnO"
|
||||
GOTIFY_PRIORITY=5
|
||||
# ----------------------------------------------------------------------------
|
||||
|
||||
# Resolve the Proxmox node's hostname for context
|
||||
NODE="$(hostname -s)"
|
||||
TS="$(date -Iseconds)"
|
||||
|
||||
# --- Discord embed ----------------------------------------------------------
|
||||
read -r -d '' DISCORD_PAYLOAD <<JSON || true
|
||||
{
|
||||
"username": "Proxmox - ${NODE}",
|
||||
"avatar_url": "https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/images/logo-81x112.png",
|
||||
"embeds": [{
|
||||
"title": "✅ ${APP} LXC created",
|
||||
"description": "A new community-script LXC has been provisioned on **${NODE}**.",
|
||||
"color": 3066993,
|
||||
"timestamp": "${TS}",
|
||||
"fields": [
|
||||
{"name": "CTID", "value": "${CTID}", "inline": true},
|
||||
{"name": "Hostname", "value": "${HN}", "inline": true},
|
||||
{"name": "App", "value": "${APP}", "inline": true},
|
||||
{"name": "IP", "value": "${IP}", "inline": true},
|
||||
{"name": "Bridge", "value": "${BRG}", "inline": true},
|
||||
{"name": "Storage", "value": "${STORAGE}", "inline": true}
|
||||
],
|
||||
"footer": {"text": "community-scripts.org"}
|
||||
}]
|
||||
}
|
||||
JSON
|
||||
|
||||
curl -fsS --max-time 10 \
|
||||
-H "Content-Type: application/json" \
|
||||
-X POST "$DISCORD_WEBHOOK" \
|
||||
--data "$DISCORD_PAYLOAD" \
|
||||
>/dev/null ||
|
||||
echo "WARN: Discord webhook failed (non-fatal)"
|
||||
|
||||
# --- Gotify push ------------------------------------------------------------
|
||||
curl -fsS --max-time 10 \
|
||||
-H "X-Gotify-Key: ${GOTIFY_TOKEN}" \
|
||||
-F "title=Proxmox: ${APP} LXC created" \
|
||||
-F "message=CTID=${CTID} IP=${IP} HN=${HN} on ${NODE}" \
|
||||
-F "priority=${GOTIFY_PRIORITY}" \
|
||||
"${GOTIFY_URL}/message" \
|
||||
>/dev/null ||
|
||||
echo "WARN: Gotify push failed (non-fatal)"
|
||||
|
||||
echo "Notifications dispatched for CTID=${CTID}"
|
||||
# ▲▲▲ EXAMPLE 2 — END ▲▲▲
|
||||
|
||||
# ============================================================================
|
||||
# ▼▼▼ EXAMPLE 3 — BEGIN ▼▼▼
|
||||
# ----------------------------------------------------------------------------
|
||||
# Name : auto-pool-tags-backup.sh
|
||||
# Purpose : Add the new LXC to a Proxmox pool, append cluster-wide tags,
|
||||
# register a DNS record in pi-hole, and trigger an immediate
|
||||
# snapshot backup to a configured storage.
|
||||
# Difficulty : ⭐⭐⭐ Advanced
|
||||
# Requires : pvesh, pct, vzdump (host-side; available by default on PVE),
|
||||
# a reachable pi-hole admin API.
|
||||
# ============================================================================
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
# --- CONFIG (edit me) -------------------------------------------------------
|
||||
TARGET_POOL="auto-lxc"
|
||||
EXTRA_TAGS=("auto-provisioned" "${NSAPP}") # community-script tag is set by build.func
|
||||
BACKUP_STORAGE="pbs-main" # set to "" to skip initial backup
|
||||
PIHOLE_HOST="192.168.1.5"
|
||||
PIHOLE_PASSWORD="changeme" # web-UI password
|
||||
DNS_DOMAIN="lan" # FQDN will be ${HN}.${DNS_DOMAIN}
|
||||
# ----------------------------------------------------------------------------
|
||||
|
||||
# 1) Ensure the pool exists, then attach the CT
|
||||
if ! pvesh get "/pools/${TARGET_POOL}" >/dev/null 2>&1; then
|
||||
echo "Creating pool: ${TARGET_POOL}"
|
||||
pvesh create /pools --poolid "${TARGET_POOL}" --comment "Auto-created by post-install hook" || true
|
||||
fi
|
||||
echo "Adding CTID=${CTID} to pool=${TARGET_POOL}"
|
||||
pvesh set "/pools/${TARGET_POOL}" --vms "${CTID}" || echo "WARN: pool attach failed (non-fatal)"
|
||||
|
||||
# 2) Merge new tags with existing ones (preserve community-script etc.)
|
||||
CURRENT_TAGS="$(pct config "${CTID}" | awk -F': ' '/^tags:/{print $2}')"
|
||||
declare -A TAG_SET
|
||||
IFS=';' read -r -a CUR_ARR <<<"${CURRENT_TAGS:-}"
|
||||
for t in "${CUR_ARR[@]}"; do [[ -n "$t" ]] && TAG_SET["$t"]=1; done
|
||||
for t in "${EXTRA_TAGS[@]}"; do [[ -n "$t" ]] && TAG_SET["$t"]=1; done
|
||||
NEW_TAGS="$(
|
||||
IFS=';'
|
||||
echo "${!TAG_SET[*]}"
|
||||
)"
|
||||
echo "Setting tags: ${NEW_TAGS}"
|
||||
pct set "${CTID}" --tags "${NEW_TAGS}" || echo "WARN: tag update failed (non-fatal)"
|
||||
|
||||
# 3) Register DNS in pi-hole (custom DNS record)
|
||||
FQDN="${HN}.${DNS_DOMAIN}"
|
||||
echo "Registering DNS: ${FQDN} → ${IP} on pi-hole ${PIHOLE_HOST}"
|
||||
SID="$(curl -fsS --max-time 5 \
|
||||
-d "pw=${PIHOLE_PASSWORD}" \
|
||||
"http://${PIHOLE_HOST}/api/auth" 2>/dev/null |
|
||||
sed -nE 's/.*"sid":"([^"]+)".*/\1/p' || true)"
|
||||
|
||||
if [[ -n "${SID}" ]]; then
|
||||
curl -fsS --max-time 5 -X PUT \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "sid: ${SID}" \
|
||||
-d "{\"hosts\":[\"${IP} ${FQDN}\"]}" \
|
||||
"http://${PIHOLE_HOST}/api/config/dns/hosts" >/dev/null ||
|
||||
echo "WARN: pi-hole DNS update failed (non-fatal)"
|
||||
curl -fsS --max-time 5 -X DELETE -H "sid: ${SID}" "http://${PIHOLE_HOST}/api/auth" >/dev/null || true
|
||||
else
|
||||
echo "WARN: could not obtain pi-hole session (skipping DNS)"
|
||||
fi
|
||||
|
||||
# 4) Initial backup (best-effort, can take a few minutes)
|
||||
if [[ -n "${BACKUP_STORAGE}" ]]; then
|
||||
if pvesh get "/storage/${BACKUP_STORAGE}" >/dev/null 2>&1; then
|
||||
echo "Triggering initial backup of CTID=${CTID} to ${BACKUP_STORAGE}"
|
||||
vzdump "${CTID}" \
|
||||
--storage "${BACKUP_STORAGE}" \
|
||||
--mode snapshot \
|
||||
--compress zstd \
|
||||
--notes-template "Initial backup of ${APP} (CTID=${CTID})" \
|
||||
--notification-mode auto ||
|
||||
echo "WARN: initial backup failed (non-fatal)"
|
||||
else
|
||||
echo "Backup storage '${BACKUP_STORAGE}' not found — skipping."
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "Post-provision routine complete for ${APP} (CTID=${CTID})"
|
||||
# ▲▲▲ EXAMPLE 3 — END ▲▲▲
|
||||
|
||||
# ============================================================================
|
||||
# ▼▼▼ EXAMPLE 4 — BEGIN ▼▼▼
|
||||
# ----------------------------------------------------------------------------
|
||||
# Name : inject-ssh-and-monitoring.sh
|
||||
# Purpose : Push the host's admin SSH key into the new LXC, install the
|
||||
# Beszel monitoring agent inside the container, and register
|
||||
# an Uptime-Kuma HTTP push monitor for the LXC's IP.
|
||||
# Difficulty : ⭐⭐⭐ Advanced
|
||||
# Requires : pct (host), curl (inside LXC), reachable Beszel hub +
|
||||
# Uptime-Kuma push URL.
|
||||
# ============================================================================
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
# --- CONFIG (edit me) -------------------------------------------------------
|
||||
ADMIN_KEY="/root/.ssh/admin_ed25519.pub"
|
||||
BESZEL_HUB_URL="http://192.168.1.10:8090"
|
||||
BESZEL_AGENT_KEY="ssh-ed25519 AAAA... beszel@hub" # public key of the hub
|
||||
UPTIME_KUMA_PUSH_BASE="http://uptime.lan/api/push/abc123"
|
||||
# ----------------------------------------------------------------------------
|
||||
|
||||
# 1) Inject the admin SSH key
|
||||
if [[ -f "${ADMIN_KEY}" ]]; then
|
||||
echo "Pushing admin SSH key into CTID=${CTID}"
|
||||
pct exec "${CTID}" -- mkdir -p /root/.ssh
|
||||
pct exec "${CTID}" -- chmod 700 /root/.ssh
|
||||
pct push "${CTID}" "${ADMIN_KEY}" /root/.ssh/authorized_keys
|
||||
pct exec "${CTID}" -- chmod 600 /root/.ssh/authorized_keys
|
||||
else
|
||||
echo "WARN: ${ADMIN_KEY} not found on host — skipping SSH key injection"
|
||||
fi
|
||||
|
||||
# 2) Wait for outbound networking inside the CT (max 30 s)
|
||||
echo "Waiting for network inside CTID=${CTID}…"
|
||||
for _ in $(seq 1 30); do
|
||||
if pct exec "${CTID}" -- bash -c 'getent hosts deb.debian.org >/dev/null 2>&1'; then
|
||||
break
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
|
||||
# 3) Install Beszel agent inside the LXC
|
||||
echo "Installing Beszel agent inside CTID=${CTID}"
|
||||
pct exec "${CTID}" -- bash -s <<'AGENT_INSTALL' || echo "WARN: Beszel install failed"
|
||||
set -euo pipefail
|
||||
ARCH="$(uname -m)"
|
||||
case "$ARCH" in
|
||||
x86_64) ARCH_TAG=amd64 ;;
|
||||
aarch64) ARCH_TAG=arm64 ;;
|
||||
*) echo "Unsupported arch: $ARCH"; exit 1 ;;
|
||||
esac
|
||||
TMP=$(mktemp -d)
|
||||
cd "$TMP"
|
||||
curl -fsSL "https://github.com/henrygd/beszel/releases/latest/download/beszel-agent_linux_${ARCH_TAG}.tar.gz" \
|
||||
| tar -xz
|
||||
install -m 0755 beszel-agent /usr/local/bin/beszel-agent
|
||||
|
||||
cat >/etc/systemd/system/beszel-agent.service <<UNIT
|
||||
[Unit]
|
||||
Description=Beszel Agent
|
||||
After=network-online.target
|
||||
Wants=network-online.target
|
||||
[Service]
|
||||
Environment="PORT=45876"
|
||||
Environment="KEY=__KEY_PLACEHOLDER__"
|
||||
ExecStart=/usr/local/bin/beszel-agent
|
||||
Restart=always
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
UNIT
|
||||
AGENT_INSTALL
|
||||
|
||||
# Inject the configured public key into the unit file (avoids quoting hell)
|
||||
pct exec "${CTID}" -- sed -i "s|__KEY_PLACEHOLDER__|${BESZEL_AGENT_KEY}|" \
|
||||
/etc/systemd/system/beszel-agent.service
|
||||
|
||||
pct exec "${CTID}" -- systemctl daemon-reload
|
||||
pct exec "${CTID}" -- systemctl enable --now beszel-agent.service ||
|
||||
echo "WARN: could not start beszel-agent"
|
||||
|
||||
# 4) Register an Uptime-Kuma push monitor (host-side, just sends one ping)
|
||||
echo "Pinging Uptime-Kuma push monitor for ${HN}"
|
||||
curl -fsS --max-time 5 \
|
||||
--get \
|
||||
--data-urlencode "status=up" \
|
||||
--data-urlencode "msg=created by community-scripts" \
|
||||
--data-urlencode "ping=1" \
|
||||
--data-urlencode "label=${HN}" \
|
||||
"${UPTIME_KUMA_PUSH_BASE}" >/dev/null ||
|
||||
echo "WARN: Uptime-Kuma push failed (non-fatal)"
|
||||
|
||||
echo "Provisioned monitoring for ${APP} (CTID=${CTID}, IP=${IP})"
|
||||
# ▲▲▲ EXAMPLE 4 — END ▲▲▲
|
||||
|
||||
# ============================================================================
|
||||
# ▼▼▼ EXAMPLE 5 — BEGIN ▼▼▼
|
||||
# ----------------------------------------------------------------------------
|
||||
# Name : per-app-router.sh
|
||||
# Purpose : Single dispatcher hook that runs different actions
|
||||
# depending on the app being installed (NSAPP). Useful when
|
||||
# you want ONE hook for the whole cluster but distinct
|
||||
# behavior for, e.g., databases vs media services.
|
||||
# Difficulty : ⭐⭐⭐ Advanced
|
||||
# ============================================================================
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
# --- CONFIG (edit me) -------------------------------------------------------
|
||||
DEFAULT_DNS_SUFFIX="lan"
|
||||
PROM_FILE_SD_DIR="/etc/prometheus/file_sd" # on the host that runs Prometheus
|
||||
# ----------------------------------------------------------------------------
|
||||
|
||||
log() { printf '[%s] %s\n' "$(date +%H:%M:%S)" "$*"; }
|
||||
|
||||
# ---------- shared helpers --------------------------------------------------
|
||||
register_prometheus_target() {
|
||||
local job="$1" port="$2"
|
||||
local file="${PROM_FILE_SD_DIR}/${job}.json"
|
||||
mkdir -p "${PROM_FILE_SD_DIR}"
|
||||
if [[ ! -f "$file" ]]; then echo "[]" >"$file"; fi
|
||||
python3 - "$file" "${IP}:${port}" "${HN}" "${NSAPP}" <<'PY'
|
||||
import json, sys
|
||||
path, target, hn, app = sys.argv[1:5]
|
||||
data = json.load(open(path))
|
||||
# Avoid duplicates
|
||||
data = [b for b in data if target not in b.get("targets", [])]
|
||||
data.append({"targets": [target], "labels": {"hostname": hn, "app": app}})
|
||||
json.dump(data, open(path, "w"), indent=2)
|
||||
PY
|
||||
log "Registered Prometheus target ${IP}:${port} in ${file}"
|
||||
}
|
||||
|
||||
set_ct_options() {
|
||||
local cores="$1" mem="$2" desc="$3"
|
||||
pct set "${CTID}" --cores "${cores}" --memory "${mem}" || true
|
||||
pct set "${CTID}" --description "${desc}" || true
|
||||
}
|
||||
|
||||
# ---------- per-app dispatch ------------------------------------------------
|
||||
log "Dispatching post-install for NSAPP=${NSAPP} CTID=${CTID}"
|
||||
|
||||
case "${NSAPP}" in
|
||||
|
||||
# ------ Databases ---------------------------------------------------------
|
||||
postgresql | mariadb | mongodb | redis | valkey)
|
||||
log "Database role: bumping resources & adding to backup-critical pool"
|
||||
set_ct_options 4 4096 "DB: ${APP}"
|
||||
pvesh set /pools/db-critical --vms "${CTID}" 2>/dev/null || true
|
||||
register_prometheus_target "${NSAPP}-exporter" 9187
|
||||
;;
|
||||
|
||||
# ------ *arr media stack --------------------------------------------------
|
||||
sonarr | radarr | prowlarr | lidarr | readarr | bazarr)
|
||||
log "Media-arr role: tagging + Sonarr/Radarr API webhook"
|
||||
pct set "${CTID}" --tags "community-script;media;arr-stack" || true
|
||||
curl -fsS --max-time 5 -X POST \
|
||||
"http://media-hub.${DEFAULT_DNS_SUFFIX}/hooks/arr-added" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "{\"app\":\"${NSAPP}\",\"ctid\":${CTID},\"ip\":\"${IP}\"}" \
|
||||
>/dev/null || log "WARN: media-hub webhook failed"
|
||||
;;
|
||||
|
||||
# ------ Web apps that should sit behind NPM/Traefik ----------------------
|
||||
vaultwarden | paperless-ngx | nextcloud | immich | bookstack)
|
||||
log "Web app role: registering reverse-proxy entry"
|
||||
curl -fsS --max-time 5 -X POST \
|
||||
"http://traefik.${DEFAULT_DNS_SUFFIX}/api/dynamic-add" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "$(
|
||||
cat <<JSON
|
||||
{
|
||||
"name": "${HN}",
|
||||
"host": "${HN}.${DEFAULT_DNS_SUFFIX}",
|
||||
"backend": "http://${IP}",
|
||||
"app": "${NSAPP}"
|
||||
}
|
||||
JSON
|
||||
)" >/dev/null || log "WARN: traefik registration failed"
|
||||
register_prometheus_target "blackbox-http" 80
|
||||
;;
|
||||
|
||||
# ------ Default fallback --------------------------------------------------
|
||||
*)
|
||||
log "No special handling for ${NSAPP} — applying generic defaults"
|
||||
register_prometheus_target "node-exporter" 9100
|
||||
;;
|
||||
esac
|
||||
|
||||
log "Finished dispatcher for ${APP} (CTID=${CTID})"
|
||||
# ▲▲▲ EXAMPLE 5 — END ▲▲▲
|
||||
|
||||
# ============================================================================
|
||||
# END OF EXAMPLES
|
||||
# ============================================================================
|
||||
Reference in New Issue
Block a user