Compare commits

..

44 Commits

Author SHA1 Message Date
community-scripts-pr-app[bot]
3233646831 Update CHANGELOG.md (#12432)
Co-authored-by: github-actions[bot] <github-actions[bot]@users.noreply.github.com>
2026-02-28 21:46:04 +00:00
Mazian
b122de1f99 Update .env output with useful information (#12401) 2026-02-28 22:45:44 +01:00
community-scripts-pr-app[bot]
c3d34736e8 Update CHANGELOG.md (#12431)
Co-authored-by: github-actions[bot] <github-actions[bot]@users.noreply.github.com>
2026-02-28 21:40:51 +00:00
CanbiZ (MickLesk)
a77c95750c fix(gramps-web): install Gramps addons and fix GRAMPSHOME path (#12387) 2026-02-28 22:40:25 +01:00
community-scripts-pr-app[bot]
e10b7fba82 Update CHANGELOG.md (#12430)
Co-authored-by: github-actions[bot] <github-actions[bot]@users.noreply.github.com>
2026-02-28 20:41:41 +00:00
Chris
dd37770b86 Fix sed command to fully replace line in postgresql.conf (#12429) 2026-02-28 21:41:17 +01:00
community-scripts-pr-app[bot]
c85809e512 Update CHANGELOG.md (#12428)
Co-authored-by: github-actions[bot] <github-actions[bot]@users.noreply.github.com>
2026-02-28 19:30:59 +00:00
Chris
edb86a7150 [FIX] Immich: fix Openvino memory leak during OCR; improve HW-accelerated ML performance (#12426) 2026-02-28 20:30:37 +01:00
community-scripts-pr-app[bot]
d0e107d707 Update CHANGELOG.md (#12427)
Co-authored-by: github-actions[bot] <github-actions[bot]@users.noreply.github.com>
2026-02-28 18:58:02 +00:00
Chris
187ccc4f1c BookLore: add additional JVM flags (#12421) 2026-02-28 19:57:38 +01:00
community-scripts-pr-app[bot]
20cecec4ad chore: update github-versions.json (#12425)
Co-authored-by: github-actions[bot] <github-actions[bot]@users.noreply.github.com>
2026-02-28 18:06:23 +00:00
community-scripts-pr-app[bot]
8c28126479 Update CHANGELOG.md (#12424)
Co-authored-by: github-actions[bot] <github-actions[bot]@users.noreply.github.com>
2026-02-28 17:13:32 +00:00
Josef Glatz
85213ea8b0 Fix default tag for ioBroker LXC install (#12423) 2026-02-28 18:13:07 +01:00
community-scripts-pr-app[bot]
1858aeee03 Update CHANGELOG.md (#12420)
Co-authored-by: github-actions[bot] <github-actions[bot]@users.noreply.github.com>
2026-02-28 13:52:27 +00:00
community-scripts-pr-app[bot]
7ce4313ba4 Update CHANGELOG.md (#12419)
Co-authored-by: github-actions[bot] <github-actions[bot]@users.noreply.github.com>
2026-02-28 13:52:16 +00:00
hraphael
fd498b0efb Update ombi.sh (#12412) 2026-02-28 14:52:01 +01:00
community-scripts-pr-app[bot]
cdbcf098d2 Update CHANGELOG.md (#12418)
Co-authored-by: github-actions[bot] <github-actions[bot]@users.noreply.github.com>
2026-02-28 13:51:51 +00:00
CanbiZ (MickLesk)
fddc47064d core: read from /dev/tty in all interactive prompts | fix empty or cropped logs due build process (#12406) 2026-02-28 14:51:26 +01:00
community-scripts-pr-app[bot]
16ae89cd88 Update CHANGELOG.md (#12417)
Co-authored-by: github-actions[bot] <github-actions[bot]@users.noreply.github.com>
2026-02-28 13:46:47 +00:00
community-scripts-pr-app[bot]
f2d88b7b4f Update CHANGELOG.md (#12416)
Co-authored-by: github-actions[bot] <github-actions[bot]@users.noreply.github.com>
2026-02-28 13:46:27 +00:00
CanbiZ (MickLesk)
187c96f777 fix(dawarich): add missing build deps and handle seed failure (#12410) 2026-02-28 14:46:21 +01:00
CanbiZ (MickLesk)
f974ac4773 pangolin: increase hdd to 10G (#12409) 2026-02-28 14:45:59 +01:00
community-scripts-pr-app[bot]
869985dc60 chore: update github-versions.json (#12415)
Co-authored-by: github-actions[bot] <github-actions[bot]@users.noreply.github.com>
2026-02-28 12:08:40 +00:00
CanbiZ (MickLesk)
1e391e22ce Add build-essential and python3 dependencies
Ensure build-essential and python3 are installed as prerequisites. ct/pangolin.sh now calls ensure_dependencies for these packages before setting up Node.js, and install/pangolin-install.sh adds them to the apt install list so build/runtime requirements are present during install.
2026-02-28 10:21:39 +01:00
community-scripts-pr-app[bot]
dae03cf80e Update .app files (#12407)
Co-authored-by: GitHub Actions <github-actions[bot]@users.noreply.github.com>
2026-02-28 09:04:07 +01:00
community-scripts-pr-app[bot]
1edcc106e3 Update CHANGELOG.md (#12408)
Co-authored-by: github-actions[bot] <github-actions[bot]@users.noreply.github.com>
2026-02-28 08:03:37 +00:00
Chris
34eb094143 Delete Palmr (#12399) 2026-02-28 09:03:08 +01:00
community-scripts-pr-app[bot]
683c0d4e8b chore: update github-versions.json (#12405)
Co-authored-by: github-actions[bot] <github-actions[bot]@users.noreply.github.com>
2026-02-28 06:10:08 +00:00
community-scripts-pr-app[bot]
9d8c544f83 Update CHANGELOG.md (#12403)
Co-authored-by: github-actions[bot] <github-actions[bot]@users.noreply.github.com>
2026-02-28 00:18:45 +00:00
community-scripts-pr-app[bot]
5967d51769 chore: update github-versions.json (#12402)
Co-authored-by: github-actions[bot] <github-actions[bot]@users.noreply.github.com>
2026-02-28 00:18:17 +00:00
community-scripts-pr-app[bot]
c37af920df chore: update github-versions.json (#12398)
Co-authored-by: github-actions[bot] <github-actions[bot]@users.noreply.github.com>
2026-02-27 18:12:17 +00:00
community-scripts-pr-app[bot]
5f08bf2e98 Update CHANGELOG.md (#12393)
Co-authored-by: github-actions[bot] <github-actions[bot]@users.noreply.github.com>
2026-02-27 13:40:05 +00:00
CanbiZ (MickLesk)
a2dc3f44d3 feat: graceful fallback for apt-get update failures (#12386)
Add apt_update_safe() function that warns instead of aborting when apt-get update fails (e.g. enterprise repo 401 Unauthorized). Shows a helpful hint about disabling the enterprise repo when no subscription is active. Replaces direct  apt-get update calls in build.func and install.func.
2026-02-27 14:39:39 +01:00
community-scripts-pr-app[bot]
9e9dfd6947 Update CHANGELOG.md (#12392)
Co-authored-by: github-actions[bot] <github-actions[bot]@users.noreply.github.com>
2026-02-27 12:59:23 +00:00
CanbiZ (MickLesk)
774bbbc6d5 core: Improve error outputs across core functions (#12378)
* Improve error outputs across core functions

* Update tools.func
2026-02-27 13:59:02 +01:00
community-scripts-pr-app[bot]
c7a1d4bd13 Update .app files (#12389)
Co-authored-by: GitHub Actions <github-actions[bot]@users.noreply.github.com>
2026-02-27 13:55:59 +01:00
community-scripts-pr-app[bot]
a6d56700d9 Update CHANGELOG.md (#12391)
Co-authored-by: github-actions[bot] <github-actions[bot]@users.noreply.github.com>
2026-02-27 12:55:33 +00:00
community-scripts-pr-app[bot]
ed22dc806d Update CHANGELOG.md (#12390)
Co-authored-by: github-actions[bot] <github-actions[bot]@users.noreply.github.com>
2026-02-27 12:55:12 +00:00
community-scripts-pr-app[bot]
b129614679 Update date in json (#12388)
Co-authored-by: GitHub Actions <github-actions[bot]@users.noreply.github.com>
2026-02-27 12:55:05 +00:00
push-app-to-main[bot]
154374a2d1 Strapi (#12320)
* Add strapi (ct)

* Update strapi.sh

* Update date_created in strapi.json

* fix(strapi): use official upgrade tool instead of npm install

Replace 'npm install' with 'npx @strapi/upgrade minor --yes' which properly updates dependencies in package.json and runs codemods. See https://docs.strapi.io/cms/upgrade-tool

---------

Co-authored-by: push-app-to-main[bot] <203845782+push-app-to-main[bot]@users.noreply.github.com>
Co-authored-by: CanbiZ (MickLesk) <47820557+MickLesk@users.noreply.github.com>
2026-02-27 13:54:45 +01:00
community-scripts-pr-app[bot]
5ec5f980dc chore: update github-versions.json (#12385)
Co-authored-by: github-actions[bot] <github-actions[bot]@users.noreply.github.com>
2026-02-27 12:11:22 +00:00
community-scripts-pr-app[bot]
b83c378667 Update CHANGELOG.md (#12384)
Co-authored-by: github-actions[bot] <github-actions[bot]@users.noreply.github.com>
2026-02-27 08:45:20 +00:00
juronja
981e62d53d TrueNAS VM: filter out new nightlies with MASTER (#12355)
* filter out new nightlies with MASTER

* reversed the quotes
2026-02-27 09:44:57 +01:00
community-scripts-pr-app[bot]
03028a9a9b chore: update github-versions.json (#12382)
Co-authored-by: github-actions[bot] <github-actions[bot]@users.noreply.github.com>
2026-02-27 06:18:51 +00:00
30 changed files with 711 additions and 476 deletions

View File

@@ -407,8 +407,55 @@ Exercise vigilance regarding copycat or coat-tailing sites that seek to exploit
</details>
## 2026-02-28
### 🚀 Updated Scripts
- Update Reactive Resume install script with useful .env information for reverse proxy setup [@Mazianni](https://github.com/Mazianni) ([#12401](https://github.com/community-scripts/ProxmoxVE/pull/12401))
- #### 🐞 Bug Fixes
- gramps-web: install addons (FilterRules) for relationship diagram [@MickLesk](https://github.com/MickLesk) ([#12387](https://github.com/community-scripts/ProxmoxVE/pull/12387))
- [Fix] Immich: Change `sed` command to fully replace line in postgresql.conf [@vhsdream](https://github.com/vhsdream) ([#12429](https://github.com/community-scripts/ProxmoxVE/pull/12429))
- [FIX] Immich: fix Openvino memory leak during OCR; improve HW-accelerated ML performance [@vhsdream](https://github.com/vhsdream) ([#12426](https://github.com/community-scripts/ProxmoxVE/pull/12426))
- Fix default tag for ioBroker LXC install [@josefglatz](https://github.com/josefglatz) ([#12423](https://github.com/community-scripts/ProxmoxVE/pull/12423))
- Ombi: Add database.json [@hraphael](https://github.com/hraphael) ([#12412](https://github.com/community-scripts/ProxmoxVE/pull/12412))
- Dawarich: add missing build deps and handle seed failure [@MickLesk](https://github.com/MickLesk) ([#12410](https://github.com/community-scripts/ProxmoxVE/pull/12410))
- pangolin: increase hdd to 10G [@MickLesk](https://github.com/MickLesk) ([#12409](https://github.com/community-scripts/ProxmoxVE/pull/12409))
- #### ✨ New Features
- BookLore: add additional JVM flags [@vhsdream](https://github.com/vhsdream) ([#12421](https://github.com/community-scripts/ProxmoxVE/pull/12421))
### 🗑️ Deleted Scripts
- Delete Palmr [@vhsdream](https://github.com/vhsdream) ([#12399](https://github.com/community-scripts/ProxmoxVE/pull/12399))
### 💾 Core
- #### 🐞 Bug Fixes
- core: read from /dev/tty in all interactive prompts | fix empty or cropped logs due build process [@MickLesk](https://github.com/MickLesk) ([#12406](https://github.com/community-scripts/ProxmoxVE/pull/12406))
## 2026-02-27
### 🆕 New Scripts
- Strapi ([#12320](https://github.com/community-scripts/ProxmoxVE/pull/12320))
### 🚀 Updated Scripts
- #### 🐞 Bug Fixes
- TrueNAS VM: filter out new nightlies with MASTER [@juronja](https://github.com/juronja) ([#12355](https://github.com/community-scripts/ProxmoxVE/pull/12355))
### 💾 Core
- #### ✨ New Features
- core: graceful fallback for apt-get update failures [@MickLesk](https://github.com/MickLesk) ([#12386](https://github.com/community-scripts/ProxmoxVE/pull/12386))
- core: Improve error outputs across core functions [@MickLesk](https://github.com/MickLesk) ([#12378](https://github.com/community-scripts/ProxmoxVE/pull/12378))
## 2026-02-26
### 🆕 New Scripts

View File

@@ -91,7 +91,7 @@ function update_script() {
echo "SERVER_PORT=6060" >>/opt/booklore_storage/.env
fi
sed -i 's|ExecStart=/usr/bin/java -jar|ExecStart=/usr/bin/java -XX:+UseG1GC -XX:+UseStringDeduplication -XX:+UseCompactObjectHeaders -jar|' /etc/systemd/system/booklore.service
sed -i 's|ExecStart=.*|ExecStart=/usr/bin/java -XX:+UseG1GC -XX:+UseStringDeduplication -XX:+UseCompactObjectHeaders -XX:MaxRAMPercentage=75.0 -XX:+ExitOnOutOfMemoryError -jar /opt/booklore/dist/app.jar|' /etc/systemd/system/booklore.service
systemctl daemon-reload
msg_info "Starting Service"

View File

@@ -29,6 +29,8 @@ function update_script() {
exit
fi
ensure_dependencies libgeos++-dev libxml2-dev libxslt-dev libjemalloc-dev
if check_for_gh_release "dawarich" "Freika/dawarich"; then
msg_info "Stopping Services"
systemctl stop dawarich-web dawarich-worker

View File

@@ -51,11 +51,23 @@ function update_script() {
cd /opt/gramps-web-api
GRAMPS_API_CONFIG=/opt/gramps-web/config/config.cfg \
ALEMBIC_CONFIG=/opt/gramps-web-api/alembic.ini \
GRAMPSHOME=/opt/gramps-web/data/gramps \
GRAMPSHOME=/opt/gramps-web/data \
GRAMPS_DATABASE_PATH=/opt/gramps-web/data/gramps/grampsdb \
$STD /opt/gramps-web/venv/bin/python3 -m gramps_webapi user migrate
msg_ok "Applied Database Migration"
msg_info "Updating Gramps Addons"
GRAMPS_VERSION=$(/opt/gramps-web/venv/bin/python3 -c "import gramps.version; print('%s%s' % (gramps.version.VERSION_TUPLE[0], gramps.version.VERSION_TUPLE[1]))" 2>/dev/null || echo "60")
GRAMPS_PLUGINS_DIR="/opt/gramps-web/data/gramps/gramps${GRAMPS_VERSION}/plugins"
mkdir -p "$GRAMPS_PLUGINS_DIR"
$STD wget -q https://github.com/gramps-project/addons/archive/refs/heads/master.zip -O /tmp/gramps-addons.zip
for addon in FilterRules JSON; do
unzip -p /tmp/gramps-addons.zip "addons-master/gramps${GRAMPS_VERSION}/download/${addon}.addon.tgz" |
tar -xz -C "$GRAMPS_PLUGINS_DIR"
done
rm -f /tmp/gramps-addons.zip
msg_ok "Updated Gramps Addons"
msg_info "Starting Service"
systemctl start gramps-web
msg_ok "Started Service"

View File

@@ -1,6 +0,0 @@
____ __
/ __ \____ _/ /___ ___ _____
/ /_/ / __ `/ / __ `__ \/ ___/
/ ____/ /_/ / / / / / / / /
/_/ \__,_/_/_/ /_/ /_/_/

6
ct/headers/strapi Normal file
View File

@@ -0,0 +1,6 @@
_____ __ _
/ ___// /__________ _____ (_)
\__ \/ __/ ___/ __ `/ __ \/ /
___/ / /_/ / / /_/ / /_/ / /
/____/\__/_/ \__,_/ .___/_/
/_/

View File

@@ -72,9 +72,9 @@ EOF
SOURCE_DIR=${STAGING_DIR}/image-source
cd /tmp
if [[ -f ~/.intel_version ]]; then
curl -fsSLO https://raw.githubusercontent.com/immich-app/base-images/refs/heads/main/server/Dockerfile
curl -fsSLO https://raw.githubusercontent.com/immich-app/immich/refs/heads/main/machine-learning/Dockerfile
readarray -t INTEL_URLS < <(
sed -n "/intel-[igc|opencl]/p" ./Dockerfile | awk '{print $2}'
sed -n "/intel-[igc|opencl]/p" ./Dockerfile | awk '{print $3}'
sed -n "/libigdgmm12/p" ./Dockerfile | awk '{print $3}'
)
INTEL_RELEASE="$(grep "intel-opencl-icd_" ./Dockerfile | awk -F '_' '{print $2}')"
@@ -214,9 +214,9 @@ EOF
export VIRTUAL_ENV="${ML_DIR}"/ml-venv
if [[ -f ~/.openvino ]]; then
msg_info "Updating HW-accelerated machine-learning"
$STD uv add --no-sync --optional openvino onnxruntime-openvino==1.20.0 --active -n -p python3.12 --managed-python
$STD sudo --preserve-env=VIRTUAL_ENV -nu immich uv sync --extra openvino --no-dev --active --link-mode copy -n -p python3.12 --managed-python
patchelf --clear-execstack "${VIRTUAL_ENV}/lib/python3.12/site-packages/onnxruntime/capi/onnxruntime_pybind11_state.cpython-312-x86_64-linux-gnu.so"
$STD uv add --no-sync --optional openvino onnxruntime-openvino==1.24.1 --active -n -p python3.13 --managed-python
$STD sudo --preserve-env=VIRTUAL_ENV -nu immich uv sync --extra openvino --no-dev --active --link-mode copy -n -p python3.13 --managed-python
patchelf --clear-execstack "${VIRTUAL_ENV}/lib/python3.13/site-packages/onnxruntime/capi/onnxruntime_pybind11_state.cpython-313-x86_64-linux-gnu.so"
msg_ok "Updated HW-accelerated machine-learning"
else
msg_info "Updating machine-learning"

View File

@@ -6,7 +6,7 @@ source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxV
# Source: https://www.iobroker.net/#en/intro | Github: https://github.com/ioBroker/ioBroker.js-controller
APP="ioBroker"
var_tags="${var_tags:-automtation}"
var_tags="${var_tags:-automation}"
var_cpu="${var_cpu:-2}"
var_ram="${var_ram:-2048}"
var_disk="${var_disk:-8}"

View File

@@ -36,6 +36,7 @@ function update_script() {
[[ -f /opt/ombi/Ombi.db ]] && mv /opt/ombi/Ombi.db /opt
[[ -f /opt/ombi/OmbiExternal.db ]] && mv /opt/ombi/OmbiExternal.db /opt
[[ -f /opt/ombi/OmbiSettings.db ]] && mv /opt/ombi/OmbiSettings.db /opt
[[ -f /opt/ombi/database.json ]] && mv /opt/ombi/database.json /opt
msg_ok "Backup created"
rm -rf /opt/ombi
@@ -43,6 +44,7 @@ function update_script() {
[[ -f /opt/Ombi.db ]] && mv /opt/Ombi.db /opt/ombi
[[ -f /opt/OmbiExternal.db ]] && mv /opt/OmbiExternal.db /opt/ombi
[[ -f /opt/OmbiSettings.db ]] && mv /opt/OmbiSettings.db /opt/ombi
[[ -f /opt/database.json ]] && mv /opt/database.json /opt/ombi
msg_info "Starting Service"
systemctl start ombi

View File

@@ -1,75 +0,0 @@
#!/usr/bin/env bash
source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/build.func)
# Copyright (c) 2021-2026 community-scripts ORG
# Author: vhsdream
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
# Source: https://github.com/kyantech/Palmr
APP="Palmr"
var_tags="${var_tags:-files}"
var_cpu="${var_cpu:-4}"
var_ram="${var_ram:-6144}"
var_disk="${var_disk:-6}"
var_os="${var_os:-debian}"
var_version="${var_version:-13}"
var_unprivileged="${var_unprivileged:-1}"
header_info "$APP"
variables
color
catch_errors
function update_script() {
header_info
check_container_storage
check_container_resources
if [[ ! -d /opt/palmr_data ]]; then
msg_error "No ${APP} Installation Found!"
exit
fi
if check_for_gh_release "palmr" "kyantech/Palmr"; then
msg_info "Stopping Services"
systemctl stop palmr-frontend palmr-backend
msg_ok "Stopped Services"
cp /opt/palmr/apps/server/.env /opt/palmr.env
rm -rf /opt/palmr
fetch_and_deploy_gh_release "Palmr" "kyantech/Palmr" "tarball" "latest" "/opt/palmr"
PNPM="$(jq -r '.packageManager' /opt/palmr/package.json)"
NODE_VERSION="24" NODE_MODULE="$PNPM" setup_nodejs
msg_info "Updating ${APP}"
cd /opt/palmr/apps/server
mv /opt/palmr.env /opt/palmr/apps/server/.env
$STD pnpm install
$STD npx prisma generate
$STD npx prisma migrate deploy
$STD npx prisma db push
$STD pnpm build
cd /opt/palmr/apps/web
export NODE_ENV=production
export NEXT_TELEMETRY_DISABLED=1
mv ./.env.example ./.env
$STD pnpm install
$STD pnpm build
chown -R palmr:palmr /opt/palmr_data /opt/palmr
msg_ok "Updated ${APP}"
msg_info "Starting Services"
systemctl start palmr-backend palmr-frontend
msg_ok "Started Services"
msg_ok "Updated successfully!"
fi
exit
}
start
build_container
description
msg_ok "Completed successfully!\n"
echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}"
echo -e "${INFO}${YW} Access it using the following URL:${CL}"
echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:3000${CL}"

View File

@@ -9,7 +9,7 @@ APP="Pangolin"
var_tags="${var_tags:-proxy}"
var_cpu="${var_cpu:-2}"
var_ram="${var_ram:-4096}"
var_disk="${var_disk:-5}"
var_disk="${var_disk:-10}"
var_os="${var_os:-debian}"
var_version="${var_version:-13}"
var_unprivileged="${var_unprivileged:-1}"
@@ -29,6 +29,8 @@ function update_script() {
exit
fi
ensure_dependencies build-essential python3
NODE_VERSION="24" setup_nodejs
if check_for_gh_release "pangolin" "fosrl/pangolin"; then

61
ct/strapi.sh Executable file
View File

@@ -0,0 +1,61 @@
#!/usr/bin/env bash
source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/build.func)
# Copyright (c) 2021-2026 community-scripts ORG
# Author: pespinel
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
# Source: https://strapi.io/
APP="Strapi"
var_tags="${var_tags:-cms}"
var_cpu="${var_cpu:-2}"
var_ram="${var_ram:-4096}"
var_disk="${var_disk:-8}"
var_os="${var_os:-debian}"
var_version="${var_version:-13}"
var_unprivileged="${var_unprivileged:-1}"
header_info "$APP"
variables
color
catch_errors
function update_script() {
header_info
check_container_storage
check_container_resources
if [[ ! -f /etc/systemd/system/strapi.service ]]; then
msg_error "No ${APP} Installation Found!"
exit
fi
NODE_VERSION="24" setup_nodejs
msg_info "Stopping Strapi"
systemctl stop strapi
msg_ok "Stopped Strapi"
msg_info "Updating Strapi"
cd /opt/strapi
$STD npx @strapi/upgrade minor --yes
msg_ok "Updated Strapi"
msg_info "Building Strapi"
export NODE_OPTIONS="--max-old-space-size=3072"
$STD npm run build
msg_ok "Built Strapi"
msg_info "Starting Strapi"
systemctl start strapi
msg_ok "Started Strapi"
msg_ok "Updated successfully!"
exit
}
start
build_container
description
msg_ok "Completed Successfully!\n"
echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}"
echo -e "${INFO}${YW} Access it using the following URL:${CL}"
echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:1337${CL}"

View File

@@ -1,5 +1,5 @@
{
"generated": "2026-02-27T00:22:57Z",
"generated": "2026-02-28T18:06:16Z",
"versions": [
{
"slug": "2fauth",
@@ -18,9 +18,9 @@
{
"slug": "adguardhome-sync",
"repo": "bakito/adguardhome-sync",
"version": "v0.8.2",
"version": "v0.9.0",
"pinned": false,
"date": "2025-10-24T17:13:47Z"
"date": "2026-02-27T18:37:37Z"
},
{
"slug": "adventurelog",
@@ -116,9 +116,9 @@
{
"slug": "bentopdf",
"repo": "alam00000/bentopdf",
"version": "v2.3.1",
"version": "v2.3.3",
"pinned": false,
"date": "2026-02-21T09:04:27Z"
"date": "2026-02-27T08:40:05Z"
},
{
"slug": "beszel",
@@ -144,23 +144,23 @@
{
"slug": "blocky",
"repo": "0xERR0R/blocky",
"version": "v0.28.2",
"version": "v0.29.0",
"pinned": false,
"date": "2025-11-18T05:51:46Z"
"date": "2026-02-27T15:48:56Z"
},
{
"slug": "booklore",
"repo": "booklore-app/BookLore",
"version": "v2.0.3",
"version": "v2.0.4",
"pinned": false,
"date": "2026-02-26T19:10:59Z"
"date": "2026-02-28T01:54:25Z"
},
{
"slug": "bookstack",
"repo": "BookStackApp/BookStack",
"version": "v25.12.7",
"version": "v25.12.8",
"pinned": false,
"date": "2026-02-19T23:36:55Z"
"date": "2026-02-27T10:33:14Z"
},
{
"slug": "byparr",
@@ -200,9 +200,9 @@
{
"slug": "cleanuparr",
"repo": "Cleanuparr/Cleanuparr",
"version": "v2.7.5",
"version": "v2.7.6",
"pinned": false,
"date": "2026-02-24T17:11:50Z"
"date": "2026-02-27T19:32:02Z"
},
{
"slug": "cloudreve",
@@ -242,9 +242,9 @@
{
"slug": "cosmos",
"repo": "azukaar/Cosmos-Server",
"version": "v0.21.3",
"version": "v0.21.5",
"pinned": false,
"date": "2026-02-26T20:04:35Z"
"date": "2026-02-27T10:07:11Z"
},
{
"slug": "cronicle",
@@ -277,9 +277,9 @@
{
"slug": "dawarich",
"repo": "Freika/dawarich",
"version": "1.3.0",
"version": "1.3.1",
"pinned": false,
"date": "2026-02-25T19:30:25Z"
"date": "2026-02-27T19:47:40Z"
},
{
"slug": "discopanel",
@@ -361,9 +361,9 @@
{
"slug": "endurain",
"repo": "endurain-project/endurain",
"version": "v0.17.5",
"version": "v0.17.6",
"pinned": false,
"date": "2026-02-24T14:51:03Z"
"date": "2026-02-27T23:08:50Z"
},
{
"slug": "ersatztv",
@@ -382,9 +382,9 @@
{
"slug": "firefly",
"repo": "firefly-iii/firefly-iii",
"version": "v6.5.0",
"version": "v6.5.1",
"pinned": false,
"date": "2026-02-23T19:19:00Z"
"date": "2026-02-27T20:55:55Z"
},
{
"slug": "fladder",
@@ -438,9 +438,9 @@
{
"slug": "ghostfolio",
"repo": "ghostfolio/ghostfolio",
"version": "2.243.0",
"version": "2.244.0",
"pinned": false,
"date": "2026-02-23T19:31:36Z"
"date": "2026-02-28T09:51:45Z"
},
{
"slug": "gitea",
@@ -452,9 +452,9 @@
{
"slug": "gitea-mirror",
"repo": "RayLabsHQ/gitea-mirror",
"version": "v3.9.5",
"version": "v3.9.6",
"pinned": false,
"date": "2026-02-26T05:32:12Z"
"date": "2026-02-27T07:15:42Z"
},
{
"slug": "glance",
@@ -494,9 +494,9 @@
{
"slug": "grist",
"repo": "gristlabs/grist-core",
"version": "v1.7.10",
"version": "v1.7.11",
"pinned": false,
"date": "2026-01-12T20:50:50Z"
"date": "2026-02-27T17:13:50Z"
},
{
"slug": "grocy",
@@ -550,9 +550,9 @@
{
"slug": "homarr",
"repo": "homarr-labs/homarr",
"version": "v1.53.2",
"version": "v1.54.0",
"pinned": false,
"date": "2026-02-20T19:41:55Z"
"date": "2026-02-27T19:38:50Z"
},
{
"slug": "homebox",
@@ -613,9 +613,9 @@
{
"slug": "jackett",
"repo": "Jackett/Jackett",
"version": "v0.24.1218",
"version": "v0.24.1226",
"pinned": false,
"date": "2026-02-26T05:55:11Z"
"date": "2026-02-28T05:58:51Z"
},
{
"slug": "jellystat",
@@ -669,9 +669,9 @@
{
"slug": "kima-hub",
"repo": "Chevron7Locked/kima-hub",
"version": "v1.5.7",
"version": "v1.5.10",
"pinned": false,
"date": "2026-02-23T23:58:59Z"
"date": "2026-02-27T19:25:56Z"
},
{
"slug": "kimai",
@@ -718,9 +718,9 @@
{
"slug": "kubo",
"repo": "ipfs/kubo",
"version": "v0.40.0",
"version": "v0.40.1",
"pinned": false,
"date": "2026-02-25T23:16:17Z"
"date": "2026-02-27T17:58:22Z"
},
{
"slug": "kutt",
@@ -872,9 +872,9 @@
{
"slug": "metube",
"repo": "alexta69/metube",
"version": "2026.02.22",
"version": "2026.02.27",
"pinned": false,
"date": "2026-02-22T00:58:45Z"
"date": "2026-02-27T11:47:02Z"
},
{
"slug": "miniflux",
@@ -942,9 +942,9 @@
{
"slug": "nightscout",
"repo": "nightscout/cgm-remote-monitor",
"version": "15.0.3",
"version": "15.0.4",
"pinned": false,
"date": "2025-05-08T22:12:34Z"
"date": "2026-02-28T17:07:02Z"
},
{
"slug": "nocodb",
@@ -956,9 +956,9 @@
{
"slug": "nodebb",
"repo": "NodeBB/NodeBB",
"version": "v4.8.1",
"version": "v4.9.0",
"pinned": false,
"date": "2026-01-28T14:19:11Z"
"date": "2026-02-27T19:20:51Z"
},
{
"slug": "nodecast-tv",
@@ -1037,19 +1037,12 @@
"pinned": false,
"date": "2025-02-24T19:47:06Z"
},
{
"slug": "palmr",
"repo": "kyantech/Palmr",
"version": "v3.3.2-beta",
"pinned": false,
"date": "2025-12-10T05:42:43Z"
},
{
"slug": "pangolin",
"repo": "fosrl/pangolin",
"version": "1.15.4",
"version": "1.16.1",
"pinned": false,
"date": "2026-02-13T23:01:29Z"
"date": "2026-02-27T21:18:53Z"
},
{
"slug": "paperless-ai",
@@ -1068,9 +1061,9 @@
{
"slug": "paperless-ngx",
"repo": "paperless-ngx/paperless-ngx",
"version": "v2.20.8",
"version": "v2.20.9",
"pinned": false,
"date": "2026-02-22T01:40:54Z"
"date": "2026-02-28T10:17:35Z"
},
{
"slug": "patchmon",
@@ -1222,9 +1215,9 @@
{
"slug": "pulse",
"repo": "rcourtman/Pulse",
"version": "v5.1.14",
"version": "v5.1.15",
"pinned": false,
"date": "2026-02-25T00:11:58Z"
"date": "2026-02-27T15:17:24Z"
},
{
"slug": "pve-scripts-local",
@@ -1348,9 +1341,9 @@
{
"slug": "scanopy",
"repo": "scanopy/scanopy",
"version": "v0.14.8",
"version": "v0.14.9",
"pinned": false,
"date": "2026-02-24T16:45:30Z"
"date": "2026-02-28T02:59:32Z"
},
{
"slug": "scraparr",
@@ -1376,16 +1369,16 @@
{
"slug": "seerr",
"repo": "seerr-team/seerr",
"version": "v3.0.1",
"version": "v3.1.0",
"pinned": false,
"date": "2026-02-14T19:30:24Z"
"date": "2026-02-27T17:25:29Z"
},
{
"slug": "semaphore",
"repo": "semaphoreui/semaphore",
"version": "v2.17.14",
"version": "v2.17.15",
"pinned": false,
"date": "2026-02-24T14:27:03Z"
"date": "2026-02-28T09:04:40Z"
},
{
"slug": "shelfmark",
@@ -1488,9 +1481,9 @@
{
"slug": "sure",
"repo": "we-promise/sure",
"version": "chart-v0.6.8-alpha.13",
"version": "v0.6.8",
"pinned": false,
"date": "2026-02-20T11:15:15Z"
"date": "2026-02-28T12:55:36Z"
},
{
"slug": "tandoor",
@@ -1558,9 +1551,9 @@
{
"slug": "traccar",
"repo": "traccar/traccar",
"version": "v6.12.1",
"version": "v6.12.2",
"pinned": false,
"date": "2026-02-22T18:47:37Z"
"date": "2026-02-27T15:08:36Z"
},
{
"slug": "tracearr",
@@ -1670,9 +1663,9 @@
{
"slug": "vikunja",
"repo": "go-vikunja/vikunja",
"version": "v2.0.0",
"version": "v2.1.0",
"pinned": false,
"date": "2026-02-25T13:58:47Z"
"date": "2026-02-27T14:26:53Z"
},
{
"slug": "wallabag",
@@ -1817,9 +1810,9 @@
{
"slug": "zoraxy",
"repo": "tobychui/zoraxy",
"version": "v3.3.2-rc1",
"version": "v3.3.2-rc2",
"pinned": false,
"date": "2026-02-15T02:16:17Z"
"date": "2026-02-27T03:31:25Z"
},
{
"slug": "zwave-js-ui",

View File

@@ -1,45 +0,0 @@
{
"name": "Palmr",
"slug": "palmr",
"categories": [
11
],
"date_created": "2025-08-08",
"type": "ct",
"updateable": true,
"privileged": false,
"interface_port": 3000,
"disable": true,
"documentation": "https://palmr.kyantech.com.br/docs/3.1-beta",
"config_path": "/opt/palmr/apps/server/.env, /opt/palmr/apps/web/.env",
"website": "https://palmr.kyantech.com.br/",
"logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/palmr.webp",
"description": "Palmr is a fast and secure platform for sharing files, built with performance and privacy in mind.",
"install_methods": [
{
"type": "default",
"script": "ct/palmr.sh",
"resources": {
"cpu": 4,
"ram": 6144,
"hdd": 6,
"os": "Debian",
"version": "13"
}
}
],
"default_credentials": {
"username": null,
"password": null
},
"notes": [
{
"text": "To use a bind mount for storage, create symlinks to your mount for both `uploads` and `temp-uploads` in `/opt/palmr_data`, and uncomment `CUSTOM_PATH` to add the path to your bind mount",
"type": "info"
},
{
"text": "To use Palmr with a reverse proxy, uncomment `SECURE_SITE` in `/opt/palmr/apps/server/.env`",
"type": "info"
}
]
}

View File

@@ -21,7 +21,7 @@
"resources": {
"cpu": 2,
"ram": 4096,
"hdd": 5,
"hdd": 10,
"os": "Debian",
"version": "13"
}

View File

@@ -0,0 +1,48 @@
{
"name": "Strapi",
"slug": "strapi",
"categories": [
12
],
"date_created": "2026-02-27",
"type": "ct",
"updateable": true,
"privileged": false,
"interface_port": 1337,
"documentation": "https://docs.strapi.io/",
"website": "https://strapi.io/",
"logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/strapi.webp",
"config_path": "/opt/strapi/.env",
"description": "Strapi is a leading open-source headless CMS that enables developers to build powerful APIs quickly. It features a flexible content structure with customizable content types, supporting both REST and GraphQL APIs. The intuitive admin panel allows non-technical users to manage content easily, while developers can extend functionality through plugins. Built on Node.js, Strapi offers role-based access control, media library management, and internationalization support out of the box.",
"install_methods": [
{
"type": "default",
"script": "ct/strapi.sh",
"resources": {
"cpu": 2,
"ram": 4096,
"hdd": 8,
"os": "debian",
"version": "13"
}
}
],
"default_credentials": {
"username": null,
"password": null
},
"notes": [
{
"text": "First-time setup requires creating an admin account at http://IP:1337/admin",
"type": "info"
},
{
"text": "Default installation uses SQLite. For production use, consider configuring PostgreSQL or MySQL.",
"type": "info"
},
{
"text": "Building the admin panel requires 4GB RAM. Container may take 10-15 minutes to fully initialize.",
"type": "warning"
}
]
}

View File

@@ -74,7 +74,7 @@ After=network.target mariadb.service
Type=simple
User=root
WorkingDirectory=/opt/booklore/dist
ExecStart=/usr/bin/java -XX:+UseG1GC -XX:+UseStringDeduplication -XX:+UseCompactObjectHeaders -jar /opt/booklore/dist/app.jar
ExecStart=/usr/bin/java -XX:+UseG1GC -XX:+UseStringDeduplication -XX:+UseCompactObjectHeaders -XX:MaxRAMPercentage=75.0 -XX:+ExitOnOutOfMemoryError -jar /opt/booklore/dist/app.jar
EnvironmentFile=/opt/booklore_storage/.env
SuccessExitStatus=143
TimeoutStopSec=10

View File

@@ -16,19 +16,23 @@ update_os
msg_info "Installing Dependencies"
$STD apt install -y \
build-essential \
git \
libpq-dev \
libgeos-dev \
libyaml-dev \
libffi-dev \
libssl-dev \
libjemalloc2 \
imagemagick \
libmagickwand-dev \
libvips-dev \
cmake \
redis-server \
nginx
git \
imagemagick \
libffi-dev \
libgeos-dev \
libgeos++-dev \
libjemalloc2 \
libjemalloc-dev \
libmagickwand-dev \
libpq-dev \
libssl-dev \
libvips-dev \
libxml2-dev \
libxslt-dev \
libyaml-dev \
nginx \
redis-server
msg_ok "Installed Dependencies"
PG_VERSION="17" PG_MODULES="postgis-3" setup_postgresql
@@ -82,7 +86,9 @@ elif [[ -f /opt/dawarich/app/package.json ]]; then
$STD npm install
fi
$STD bundle exec rake assets:precompile
$STD bundle exec rails db:prepare
$STD bundle exec rails db:create
$STD bundle exec rails db:schema:load
$STD bundle exec rails db:seed || msg_warn "Database seed failed (upstream rgeo-geojson issue), app will still work"
$STD bundle exec rake data:migrate
msg_ok "Installed Dawarich"

View File

@@ -76,6 +76,20 @@ source /opt/gramps-web/venv/bin/activate
$STD uv pip install --no-cache-dir --upgrade pip setuptools wheel
$STD uv pip install --no-cache-dir gunicorn
$STD uv pip install --no-cache-dir /opt/gramps-web-api
GRAMPS_VERSION=$(/opt/gramps-web/venv/bin/python3 -c "import gramps.version; print('%s%s' % (gramps.version.VERSION_TUPLE[0], gramps.version.VERSION_TUPLE[1]))" 2>/dev/null || echo "60")
GRAMPS_PLUGINS_DIR="/opt/gramps-web/data/gramps/gramps${GRAMPS_VERSION}/plugins"
mkdir -p "$GRAMPS_PLUGINS_DIR"
msg_info "Installing Gramps Addons (gramps${GRAMPS_VERSION})"
$STD wget -q https://github.com/gramps-project/addons/archive/refs/heads/master.zip -O /tmp/gramps-addons.zip
for addon in FilterRules JSON; do
unzip -p /tmp/gramps-addons.zip "addons-master/gramps${GRAMPS_VERSION}/download/${addon}.addon.tgz" | \
tar -xz -C "$GRAMPS_PLUGINS_DIR"
done
rm -f /tmp/gramps-addons.zip
msg_ok "Installed Gramps Addons"
cd /opt/gramps-web/frontend
export COREPACK_ENABLE_DOWNLOAD_PROMPT=0
$STD corepack enable
@@ -84,7 +98,7 @@ $STD npm run build
cd /opt/gramps-web-api
GRAMPS_API_CONFIG=/opt/gramps-web/config/config.cfg \
ALEMBIC_CONFIG=/opt/gramps-web-api/alembic.ini \
GRAMPSHOME=/opt/gramps-web/data/gramps \
GRAMPSHOME=/opt/gramps-web/data \
GRAMPS_DATABASE_PATH=/opt/gramps-web/data/gramps/grampsdb \
$STD /opt/gramps-web/venv/bin/python3 -m gramps_webapi user migrate
msg_ok "Set up Gramps Web"
@@ -100,7 +114,7 @@ Type=simple
User=root
WorkingDirectory=/opt/gramps-web-api
Environment=GRAMPS_API_CONFIG=/opt/gramps-web/config/config.cfg
Environment=GRAMPSHOME=/opt/gramps-web/data/gramps
Environment=GRAMPSHOME=/opt/gramps-web/data
Environment=GRAMPS_DATABASE_PATH=/opt/gramps-web/data/gramps/grampsdb
Environment=PATH=/opt/gramps-web/venv/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
ExecStart=/opt/gramps-web/venv/bin/gunicorn -w 2 -b 0.0.0.0:5000 gramps_webapi.wsgi:app --timeout 120 --limit-request-line 8190

View File

@@ -32,9 +32,9 @@ if [ -d /dev/dri ]; then
$STD apt install -y --no-install-recommends patchelf
tmp_dir=$(mktemp -d)
$STD pushd "$tmp_dir"
curl -fsSLO https://raw.githubusercontent.com/immich-app/base-images/refs/heads/main/server/Dockerfile
curl -fsSLO https://raw.githubusercontent.com/immich-app/immich/refs/heads/main/machine-learning/Dockerfile
readarray -t INTEL_URLS < <(
sed -n "/intel-[igc|opencl]/p" ./Dockerfile | awk '{print $2}'
sed -n "/intel-[igc|opencl]/p" ./Dockerfile | awk '{print $3}'
sed -n "/libigdgmm12/p" ./Dockerfile | awk '{print $3}'
)
for url in "${INTEL_URLS[@]}"; do
@@ -150,7 +150,7 @@ PG_VERSION="16" PG_MODULES="pgvector" setup_postgresql
VCHORD_RELEASE="0.5.3"
fetch_and_deploy_gh_release "VectorChord" "tensorchord/VectorChord" "binary" "${VCHORD_RELEASE}" "/tmp" "postgresql-16-vchord_*_amd64.deb"
sed -i -e "/^#shared_preload/s/^#//;/^shared_preload/s/''/'vchord.so'/" /etc/postgresql/16/main/postgresql.conf
sed -i "s/^#shared_preload.*/shared_preload_libraries = 'vchord.so'/" /etc/postgresql/16/main/postgresql.conf
systemctl restart postgresql.service
PG_DB_NAME="immich" PG_DB_USER="immich" PG_DB_GRANT_SUPERUSER="true" PG_DB_SKIP_ALTER_ROLE="true" setup_postgresql_db
@@ -342,9 +342,9 @@ mkdir -p "$ML_DIR" && chown -R immich:immich "$INSTALL_DIR"
export VIRTUAL_ENV="${ML_DIR}/ml-venv"
if [[ -f ~/.openvino ]]; then
msg_info "Installing HW-accelerated machine-learning"
$STD uv add --no-sync --optional openvino onnxruntime-openvino==1.20.0 --active -n -p python3.12 --managed-python
$STD sudo --preserve-env=VIRTUAL_ENV -nu immich uv sync --extra openvino --no-dev --active --link-mode copy -n -p python3.12 --managed-python
patchelf --clear-execstack "${VIRTUAL_ENV}/lib/python3.12/site-packages/onnxruntime/capi/onnxruntime_pybind11_state.cpython-312-x86_64-linux-gnu.so"
$STD uv add --no-sync --optional openvino onnxruntime-openvino==1.24.1 --active -n -p python3.13 --managed-python
$STD sudo --preserve-env=VIRTUAL_ENV -nu immich uv sync --extra openvino --no-dev --active --link-mode copy -n -p python3.13 --managed-python
patchelf --clear-execstack "${VIRTUAL_ENV}/lib/python3.13/site-packages/onnxruntime/capi/onnxruntime_pybind11_state.cpython-313-x86_64-linux-gnu.so"
msg_ok "Installed HW-accelerated machine-learning"
else
msg_info "Installing machine-learning"

View File

@@ -1,91 +0,0 @@
#!/usr/bin/env bash
# Copyright (c) 2021-2026 community-scripts ORG
# Author: vhsdream
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
# Source: https://github.com/kyantech/Palmr
source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
color
verb_ip6
catch_errors
setting_up_container
network_check
update_os
fetch_and_deploy_gh_release "Palmr" "kyantech/Palmr" "tarball" "latest" "/opt/palmr"
PNPM="$(jq -r '.packageManager' /opt/palmr/package.json)"
NODE_VERSION="24" NODE_MODULE="$PNPM" setup_nodejs
msg_info "Configuring palmr backend"
PALMR_DIR="/opt/palmr_data"
mkdir -p "$PALMR_DIR"
PALMR_DB="${PALMR_DIR}/palmr.db"
PALMR_KEY="$(openssl rand -hex 32)"
cd /opt/palmr/apps/server
sed -e 's/_ENCRYPTION=true/_ENCRYPTION=false/' \
-e '/^# ENC/s/# //' \
-e "s/ENCRYPTION_KEY=.*$/ENCRYPTION_KEY=$PALMR_KEY/" \
-e "s|file:.*$|file:$PALMR_DB\"|" \
-e "\|db\"$|a\\# Uncomment below when using a reverse proxy\\
# SECURE_SITE=true\\
# Uncomment and add your path if using symlinks for data storage\\
# CUSTOM_PATH=<path-to-your-bind-mount>" \
.env.example >./.env
$STD pnpm install
$STD npx prisma generate
$STD npx prisma migrate deploy
$STD npx prisma db push
$STD pnpm db:seed
$STD pnpm build
msg_ok "Configured palmr backend"
msg_info "Configuring palmr frontend"
cd /opt/palmr/apps/web
mv ./.env.example ./.env
export NODE_ENV=production
export NEXT_TELEMETRY_DISABLED=1
$STD pnpm install
$STD pnpm build
msg_ok "Configured palmr frontend"
msg_info "Creating service"
useradd -d "$PALMR_DIR" -M -s /usr/sbin/nologin -U palmr
chown -R palmr:palmr "$PALMR_DIR" /opt/palmr
cat <<EOF >/etc/systemd/system/palmr-backend.service
[Unit]
Description=palmr Backend Service
After=network.target
[Service]
Type=simple
User=palmr
Group=palmr
WorkingDirectory=/opt/palmr_data
ExecStart=/usr/bin/node /opt/palmr/apps/server/dist/server.js
[Install]
WantedBy=multi-user.target
EOF
cat <<EOF >/etc/systemd/system/palmr-frontend.service
[Unit]
Description=palmr Frontend Service
After=network.target palmr-backend.service
[Service]
Type=simple
User=palmr
Group=palmr
WorkingDirectory=/opt/palmr/apps/web
ExecStart=/usr/bin/pnpm start
[Install]
WantedBy=multi-user.target
EOF
systemctl enable -q --now palmr-backend palmr-frontend
msg_ok "Created service"
motd_ssh
customize
cleanup_lxc

View File

@@ -15,6 +15,8 @@ update_os
msg_info "Installing Dependencies"
$STD apt install -y \
build-essential \
python3 \
sqlite3 \
iptables
msg_ok "Installed Dependencies"

View File

@@ -69,6 +69,10 @@ cat <<EOF >/opt/Reactive-Resume/.env
NODE_ENV=production
PORT=3000
# for use behind a reverse proxy, use your FQDN for PUBLIC_URL and STORAGE_URL
# To avoid issues when behind a reverse proxy with downloading PDFs, ensure that the
# storage path is accessible via a subdomain (i.e storage.yourapp.xyz) or you set your
# reverse proxy to properly rewrite the subpath (/rxresume) to point to the service
# running on port 9000 (minio).
PUBLIC_URL=http://${LOCAL_IP}:3000
STORAGE_URL=http://${LOCAL_IP}:9000/rxresume
DATABASE_URL=postgresql://${PG_DB_USER}:${PG_DB_PASS}@localhost:5432/${PG_DB_NAME}?schema=public

69
install/strapi-install.sh Executable file
View File

@@ -0,0 +1,69 @@
#!/usr/bin/env bash
# Copyright (c) 2021-2026 community-scripts ORG
# Author: pespinel
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
# Source: https://strapi.io/
source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
color
verb_ip6
catch_errors
setting_up_container
network_check
update_os
msg_info "Installing Dependencies"
$STD apt install -y \
build-essential \
python3 \
python3-setuptools \
libvips42
msg_ok "Installed Dependencies"
NODE_VERSION="24" setup_nodejs
msg_info "Installing Strapi (Patience)"
mkdir -p /opt/strapi
cd /opt/strapi
$STD npx --yes create-strapi-app@latest . --quickstart --no-run --skip-cloud
msg_ok "Installed Strapi"
msg_info "Building Strapi"
cd /opt/strapi
export NODE_OPTIONS="--max-old-space-size=3072"
$STD npm run build
msg_ok "Built Strapi"
msg_info "Creating Service"
cat <<EOF >/opt/strapi/.env
HOST=0.0.0.0
PORT=1337
APP_KEYS=$(openssl rand -base64 32)
API_TOKEN_SALT=$(openssl rand -base64 32)
ADMIN_JWT_SECRET=$(openssl rand -base64 32)
TRANSFER_TOKEN_SALT=$(openssl rand -base64 32)
JWT_SECRET=$(openssl rand -base64 32)
EOF
cat <<EOF >/etc/systemd/system/strapi.service
[Unit]
Description=Strapi CMS
After=network.target
[Service]
Type=simple
WorkingDirectory=/opt/strapi
EnvironmentFile=/opt/strapi/.env
ExecStart=/usr/bin/npm run start
Restart=on-failure
Environment=NODE_ENV=production
[Install]
WantedBy=multi-user.target
EOF
systemctl enable -q --now strapi
msg_ok "Created Service"
motd_ssh
customize
cleanup_lxc

View File

@@ -118,7 +118,7 @@ maxkeys_check() {
# Exit if kernel parameters are unavailable
if [[ "$per_user_maxkeys" -eq 0 || "$per_user_maxbytes" -eq 0 ]]; then
echo -e "${CROSS}${RD} Error: Unable to read kernel parameters. Ensure proper permissions.${CL}"
msg_error "Unable to read kernel key parameters. Ensure proper permissions."
exit 1
fi
@@ -135,19 +135,19 @@ maxkeys_check() {
# Check if key or byte usage is near limits
failure=0
if [[ "$used_lxc_keys" -gt "$threshold_keys" ]]; then
echo -e "${CROSS}${RD} Warning: Key usage is near the limit (${used_lxc_keys}/${per_user_maxkeys}).${CL}"
msg_warn "Key usage is near the limit (${used_lxc_keys}/${per_user_maxkeys})"
echo -e "${INFO} Suggested action: Set ${GN}kernel.keys.maxkeys=${new_limit_keys}${CL} in ${BOLD}/etc/sysctl.d/98-community-scripts.conf${CL}."
failure=1
fi
if [[ "$used_lxc_bytes" -gt "$threshold_bytes" ]]; then
echo -e "${CROSS}${RD} Warning: Key byte usage is near the limit (${used_lxc_bytes}/${per_user_maxbytes}).${CL}"
msg_warn "Key byte usage is near the limit (${used_lxc_bytes}/${per_user_maxbytes})"
echo -e "${INFO} Suggested action: Set ${GN}kernel.keys.maxbytes=${new_limit_bytes}${CL} in ${BOLD}/etc/sysctl.d/98-community-scripts.conf${CL}."
failure=1
fi
# Provide next steps if issues are detected
if [[ "$failure" -eq 1 ]]; then
echo -e "${INFO} To apply changes, run: ${BOLD}service procps force-reload${CL}"
msg_error "Kernel key limits exceeded - see suggestions above"
exit 1
fi
@@ -2034,6 +2034,7 @@ advanced_settings() {
((STEP++))
else
whiptail --msgbox "Default bridge 'vmbr0' not found!\n\nPlease configure a network bridge in Proxmox first." 10 58
msg_error "Default bridge 'vmbr0' not found"
exit 1
fi
else
@@ -3049,7 +3050,7 @@ install_script() {
CHOICE=""
;;
*)
echo -e "${CROSS}${RD}Invalid option: $CHOICE${CL}"
msg_error "Invalid option: $CHOICE"
exit 1
;;
esac
@@ -3128,12 +3129,12 @@ check_container_resources() {
current_cpu=$(nproc)
if [[ "$current_ram" -lt "$var_ram" ]] || [[ "$current_cpu" -lt "$var_cpu" ]]; then
echo -e "\n${INFO}${HOLD} ${GN}Required: ${var_cpu} CPU, ${var_ram}MB RAM ${CL}| ${RD}Current: ${current_cpu} CPU, ${current_ram}MB RAM${CL}"
msg_warn "Under-provisioned: Required ${var_cpu} CPU/${var_ram}MB RAM, Current ${current_cpu} CPU/${current_ram}MB RAM"
echo -e "${YWB}Please ensure that the ${APP} LXC is configured with at least ${var_cpu} vCPU and ${var_ram} MB RAM for the build process.${CL}\n"
echo -ne "${INFO}${HOLD} May cause data loss! ${INFO} Continue update with under-provisioned LXC? <yes/No> "
read -r prompt
read -r prompt </dev/tty
if [[ ! ${prompt,,} =~ ^(yes)$ ]]; then
echo -e "${CROSS}${HOLD} ${YWB}Exiting based on user input.${CL}"
msg_error "Aborted: under-provisioned LXC (${current_cpu} CPU/${current_ram}MB RAM < ${var_cpu} CPU/${var_ram}MB RAM)"
exit 1
fi
else
@@ -3152,11 +3153,11 @@ check_container_storage() {
local used_size=$(df /boot --output=used | tail -n 1)
usage=$((100 * used_size / total_size))
if ((usage > 80)); then
echo -e "${INFO}${HOLD} ${YWB}Warning: Storage is dangerously low (${usage}%).${CL}"
msg_warn "Storage is dangerously low (${usage}% used on /boot)"
echo -ne "Continue anyway? <y/N> "
read -r prompt
read -r prompt </dev/tty
if [[ ! ${prompt,,} =~ ^(y|yes)$ ]]; then
echo -e "${CROSS}${HOLD}${YWB}Exiting based on user input.${CL}"
msg_error "Aborted: storage too low (${usage}% used)"
exit 1
fi
fi
@@ -3546,10 +3547,16 @@ build_container() {
# Build PCT_OPTIONS as string for export
TEMP_DIR=$(mktemp -d)
pushd "$TEMP_DIR" >/dev/null
local _func_url
if [ "$var_os" == "alpine" ]; then
export FUNCTIONS_FILE_PATH="$(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/alpine-install.func)"
_func_url="https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/alpine-install.func"
else
export FUNCTIONS_FILE_PATH="$(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/install.func)"
_func_url="https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/install.func"
fi
export FUNCTIONS_FILE_PATH="$(curl -fsSL "$_func_url")"
if [[ -z "$FUNCTIONS_FILE_PATH" || ${#FUNCTIONS_FILE_PATH} -lt 100 ]]; then
msg_error "Failed to download install functions from: $_func_url"
exit 1
fi
# Core exports for install.func
@@ -3700,10 +3707,18 @@ $PCT_OPTIONS_STRING"
NVIDIA_DEVICES=()
# Store PCI info to avoid multiple calls
local pci_vga_info=$(lspci -nn 2>/dev/null | grep -E "VGA|Display|3D")
# grep returns exit 1 when no match — use || true to prevent ERR trap
local pci_vga_info
pci_vga_info=$(lspci -nn 2>/dev/null | grep -E "VGA|Display|3D" || true)
# No GPU-related PCI devices at all? Skip silently.
if [[ -z "$pci_vga_info" ]]; then
msg_debug "No VGA/Display/3D PCI devices found"
return 0
fi
# Check for Intel GPU - look for Intel vendor ID [8086]
if echo "$pci_vga_info" | grep -q "\[8086:"; then
if grep -q "\[8086:" <<<"$pci_vga_info"; then
msg_custom "🎮" "${BL}" "Detected Intel GPU"
if [[ -d /dev/dri ]]; then
for d in /dev/dri/renderD* /dev/dri/card*; do
@@ -3713,7 +3728,7 @@ $PCT_OPTIONS_STRING"
fi
# Check for AMD GPU - look for AMD vendor IDs [1002] (AMD/ATI) or [1022] (AMD)
if echo "$pci_vga_info" | grep -qE "\[1002:|\[1022:"; then
if grep -qE "\[1002:|\[1022:" <<<"$pci_vga_info"; then
msg_custom "🎮" "${RD}" "Detected AMD GPU"
if [[ -d /dev/dri ]]; then
# Only add if not already claimed by Intel
@@ -3726,7 +3741,7 @@ $PCT_OPTIONS_STRING"
fi
# Check for NVIDIA GPU - look for NVIDIA vendor ID [10de]
if echo "$pci_vga_info" | grep -q "\[10de:"; then
if grep -q "\[10de:" <<<"$pci_vga_info"; then
msg_custom "🎮" "${GN}" "Detected NVIDIA GPU"
# Simple passthrough - just bind /dev/nvidia* devices if they exist
@@ -3827,7 +3842,7 @@ EOF
for gpu in "${available_gpus[@]}"; do
echo " - $gpu"
done
read -rp "Which GPU type to passthrough? (${available_gpus[*]}): " selected_gpu
read -rp "Which GPU type to passthrough? (${available_gpus[*]}): " selected_gpu </dev/tty
selected_gpu="${selected_gpu^^}"
# Validate selection
@@ -3920,7 +3935,9 @@ EOF
fi
sleep 1
if [ "$i" -eq 10 ]; then
msg_error "LXC Container did not reach running state"
local ct_status
ct_status=$(pct status "$CTID" 2>/dev/null || echo "unknown")
msg_error "LXC Container did not reach running state (status: ${ct_status})"
exit 1
fi
done
@@ -3944,7 +3961,7 @@ EOF
if [ -z "$ip_in_lxc" ]; then
msg_error "No IP assigned to CT $CTID after 20s"
echo -e "${YW}Troubleshooting:${CL}"
msg_custom "🔧" "${YW}" "Troubleshooting:"
echo " • Verify bridge ${BRG} exists and has connectivity"
echo " • Check if DHCP server is reachable (if using DHCP)"
echo " • Verify static IP configuration (if using static IP)"
@@ -3966,8 +3983,7 @@ EOF
done
if [ "$ping_success" = false ]; then
msg_warn "Network configured (IP: $ip_in_lxc) but connectivity test failed"
echo -e "${YW}Container may have limited internet access. Installation will continue...${CL}"
msg_warn "Network configured (IP: $ip_in_lxc) but connectivity test failed - installation will continue"
else
msg_ok "Network in LXC is reachable (ping)"
fi
@@ -4004,6 +4020,16 @@ EOF
# install_gpu_userland "NVIDIA"
# fi
# Disable error trap for entire customization & install phase.
# All errors are handled explicitly — recovery menu shown on failure.
# Without this, customization errors (e.g. container stopped during base package
# install) would trigger error_handler() with a simple "Remove broken container?"
# prompt instead of the full recovery menu with retry/repair options.
set +Eeuo pipefail
trap - ERR
local install_exit_code=0
# Continue with standard container setup
if [ "$var_os" == "alpine" ]; then
sleep 3
@@ -4011,7 +4037,10 @@ EOF
http://dl-cdn.alpinelinux.org/alpine/latest-stable/main
http://dl-cdn.alpinelinux.org/alpine/latest-stable/community
EOF'
pct exec "$CTID" -- ash -c "apk add bash newt curl openssh nano mc ncurses jq >/dev/null"
pct exec "$CTID" -- ash -c "apk add bash newt curl openssh nano mc ncurses jq" >>"$BUILD_LOG" 2>&1 || {
msg_error "Failed to install base packages in Alpine container"
install_exit_code=1
}
else
sleep 3
LANG=${LANG:-en_US.UTF-8}
@@ -4034,69 +4063,69 @@ EOF'
msg_warn "Skipping timezone setup zone '$tz' not found in container"
fi
pct exec "$CTID" -- bash -c "apt-get update >/dev/null && apt-get install -y sudo curl mc gnupg2 jq >/dev/null" || {
pct exec "$CTID" -- bash -c "apt-get update 2>&1 && apt-get install -y sudo curl mc gnupg2 jq 2>&1" >>"$BUILD_LOG" 2>&1 || {
msg_error "apt-get base packages installation failed"
exit 1
install_exit_code=1
}
fi
msg_ok "Customized LXC Container"
# Only continue with installation if customization succeeded
if [[ $install_exit_code -eq 0 ]]; then
msg_ok "Customized LXC Container"
# Optional DNS override for retry scenarios (inside LXC, never on host)
if [[ "${DNS_RETRY_OVERRIDE:-false}" == "true" ]]; then
msg_info "Applying DNS retry override in LXC (8.8.8.8, 1.1.1.1)"
pct exec "$CTID" -- bash -c "printf 'nameserver 8.8.8.8\nnameserver 1.1.1.1\n' >/etc/resolv.conf" >/dev/null 2>&1 || true
msg_ok "DNS override applied in LXC"
fi
# Install SSH keys
install_ssh_keys_into_ct
# Start timer for duration tracking
start_install_timer
# Run application installer
# Disable error trap - container errors are handled internally via flag file
set +Eeuo pipefail # Disable ALL error handling temporarily
trap - ERR # Remove ERR trap completely
# Signal handlers use this flag to stop the container on abort (SIGHUP/SIGINT/SIGTERM)
# Without this, SSH disconnects leave the container running as an orphan process
# that sends "configuring" status AFTER the host already reported "failed"
export CONTAINER_INSTALLING=true
# Capture lxc-attach terminal output to host-side log via tee.
# This is the ONLY reliable way to get install output when:
# - install.func fails to load (DNS error) → no container-side logging
# - install script crashes before logging starts
# - $STD/silent() not used for some commands
# PIPESTATUS[0] gets the real exit code from lxc-attach (not from tee).
local _LXC_CAPTURE_LOG="/tmp/.install-capture-${SESSION_ID}.log"
lxc-attach -n "$CTID" -- bash -c "$(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/install/${var_install}.sh)" 2>&1 | tee "$_LXC_CAPTURE_LOG"
local lxc_exit=${PIPESTATUS[0]}
unset CONTAINER_INSTALLING
# Keep error handling DISABLED during failure detection and recovery
# Re-enabling it here would cause any pct exec/pull failure to trigger
# error_handler() on the host, bypassing the recovery menu entirely
# Check for error flag file in container (more reliable than lxc-attach exit code)
local install_exit_code=0
if [[ -n "${SESSION_ID:-}" ]]; then
local error_flag="/root/.install-${SESSION_ID}.failed"
if pct exec "$CTID" -- test -f "$error_flag" 2>/dev/null; then
install_exit_code=$(pct exec "$CTID" -- cat "$error_flag" 2>/dev/null || echo "1")
pct exec "$CTID" -- rm -f "$error_flag" 2>/dev/null || true
# Optional DNS override for retry scenarios (inside LXC, never on host)
if [[ "${DNS_RETRY_OVERRIDE:-false}" == "true" ]]; then
msg_info "Applying DNS retry override in LXC (8.8.8.8, 1.1.1.1)"
pct exec "$CTID" -- bash -c "printf 'nameserver 8.8.8.8\nnameserver 1.1.1.1\n' >/etc/resolv.conf" >/dev/null 2>&1 || true
msg_ok "DNS override applied in LXC"
fi
fi
# Fallback to lxc-attach exit code if no flag file
if [[ $install_exit_code -eq 0 && $lxc_exit -ne 0 ]]; then
install_exit_code=$lxc_exit
fi
# Install SSH keys
install_ssh_keys_into_ct
# Installation failed?
# Start timer for duration tracking
start_install_timer
# Run application installer
# Error handling already disabled above (before customization phase)
# Signal handlers use this flag to stop the container on abort (SIGHUP/SIGINT/SIGTERM)
# Without this, SSH disconnects leave the container running as an orphan process
# that sends "configuring" status AFTER the host already reported "failed"
export CONTAINER_INSTALLING=true
# Capture lxc-attach terminal output to host-side log via tee.
# This is the ONLY reliable way to get install output when:
# - install.func fails to load (DNS error) → no container-side logging
# - install script crashes before logging starts
# - $STD/silent() not used for some commands
# PIPESTATUS[0] gets the real exit code from lxc-attach (not from tee).
local _LXC_CAPTURE_LOG="/tmp/.install-capture-${SESSION_ID}.log"
lxc-attach -n "$CTID" -- bash -c "$(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/install/${var_install}.sh)" 2>&1 | tee "$_LXC_CAPTURE_LOG"
local lxc_exit=${PIPESTATUS[0]}
unset CONTAINER_INSTALLING
# Keep error handling DISABLED during failure detection and recovery
# Re-enabling it here would cause any pct exec/pull failure to trigger
# error_handler() on the host, bypassing the recovery menu entirely
# Check for error flag file in container (more reliable than lxc-attach exit code)
if [[ -n "${SESSION_ID:-}" ]]; then
local error_flag="/root/.install-${SESSION_ID}.failed"
if pct exec "$CTID" -- test -f "$error_flag" 2>/dev/null; then
install_exit_code=$(pct exec "$CTID" -- cat "$error_flag" 2>/dev/null || echo "1")
pct exec "$CTID" -- rm -f "$error_flag" 2>/dev/null || true
fi
fi
# Fallback to lxc-attach exit code if no flag file
if [[ $install_exit_code -eq 0 && ${lxc_exit:-0} -ne 0 ]]; then
install_exit_code=${lxc_exit:-0}
fi
fi # end: if [[ $install_exit_code -eq 0 ]] (customization succeeded)
# Installation or customization failed?
if [[ $install_exit_code -ne 0 ]]; then
# Prevent job-control signals from suspending the script during recovery.
# In non-interactive shells (bash -c), background processes (spinner) can
@@ -4208,7 +4237,7 @@ EOF'
pct enter "$CTID"
echo ""
echo -en "${YW}Container ${CTID} still running. Remove now? (y/N): ${CL}"
if read -r response && [[ "$response" =~ ^[Yy]$ ]]; then
if read -r response </dev/tty && [[ "$response" =~ ^[Yy]$ ]]; then
pct stop "$CTID" &>/dev/null || true
pct destroy "$CTID" &>/dev/null || true
msg_ok "Container ${CTID} removed"
@@ -4358,7 +4387,7 @@ EOF'
echo ""
echo -en "${YW}Select option [1-${max_option}] (default: 1, auto-remove in 60s): ${CL}"
if read -t 60 -r response; then
if read -t 60 -r response </dev/tty; then
case "${response:-1}" in
1)
# Remove container
@@ -4575,7 +4604,7 @@ destroy_lxc() {
trap 'echo; msg_error "Aborted by user (SIGINT/SIGQUIT)"; return 130' INT QUIT
local prompt
if ! read -rp "Remove this Container? <y/N> " prompt; then
if ! read -rp "Remove this Container? <y/N> " prompt </dev/tty; then
# read returns non-zero on Ctrl-D/ESC
msg_error "Aborted input (Ctrl-D/ESC)"
return 130
@@ -4908,16 +4937,16 @@ create_lxc_container() {
return 0
fi
echo
echo "An update for the Proxmox LXC stack is available:"
msg_info "An update for the Proxmox LXC stack is available"
echo " pve-container: installed=${_pvec_i:-n/a} candidate=${_pvec_c:-n/a}"
echo " lxc-pve : installed=${_lxcp_i:-n/a} candidate=${_lxcp_c:-n/a}"
echo
read -rp "Do you want to upgrade now? [y/N] " _ans
read -rp "Do you want to upgrade now? [y/N] " _ans </dev/tty
case "${_ans,,}" in
y | yes)
msg_info "Upgrading Proxmox LXC stack (pve-container, lxc-pve)"
if $STD apt-get update && $STD apt-get install -y --only-upgrade pve-container lxc-pve; then
apt_update_safe
if $STD apt-get install -y --only-upgrade pve-container lxc-pve; then
msg_ok "LXC stack upgraded."
if [[ "$do_retry" == "yes" ]]; then
msg_info "Retrying container creation after upgrade"
@@ -4961,7 +4990,6 @@ create_lxc_container() {
exit 205
}
if qm status "$CTID" &>/dev/null || pct status "$CTID" &>/dev/null; then
echo -e "ID '$CTID' is already in use."
unset CTID
msg_error "Cannot use ID that is already in use."
exit 206
@@ -5019,17 +5047,40 @@ create_lxc_container() {
msg_info "Validating storage '$CONTAINER_STORAGE'"
STORAGE_TYPE=$(grep -E "^[^:]+: $CONTAINER_STORAGE$" /etc/pve/storage.cfg | cut -d: -f1 | head -1)
if [[ -z "$STORAGE_TYPE" ]]; then
msg_error "Storage '$CONTAINER_STORAGE' not found in /etc/pve/storage.cfg"
exit 213
fi
case "$STORAGE_TYPE" in
iscsidirect) exit 212 ;;
iscsi | zfs) exit 213 ;;
cephfs) exit 219 ;;
pbs) exit 224 ;;
iscsidirect)
msg_error "Storage '$CONTAINER_STORAGE' uses iSCSI-direct which does not support container rootfs."
exit 212
;;
iscsi | zfs)
msg_error "Storage '$CONTAINER_STORAGE' ($STORAGE_TYPE) does not support container rootdir content."
exit 213
;;
cephfs)
msg_error "Storage '$CONTAINER_STORAGE' uses CephFS which is not supported for LXC rootfs."
exit 219
;;
pbs)
msg_error "Storage '$CONTAINER_STORAGE' is a Proxmox Backup Server — cannot be used for containers."
exit 224
;;
linstor | rbd | nfs | cifs)
pvesm status -storage "$CONTAINER_STORAGE" &>/dev/null || exit 217
if ! pvesm status -storage "$CONTAINER_STORAGE" &>/dev/null; then
msg_error "Storage '$CONTAINER_STORAGE' ($STORAGE_TYPE) is not accessible or inactive."
exit 217
fi
;;
esac
pvesm status -content rootdir 2>/dev/null | awk 'NR>1{print $1}' | grep -qx "$CONTAINER_STORAGE" || exit 213
if ! pvesm status -content rootdir 2>/dev/null | awk 'NR>1{print $1}' | grep -qx "$CONTAINER_STORAGE"; then
msg_error "Storage '$CONTAINER_STORAGE' ($STORAGE_TYPE) does not support 'rootdir' content."
exit 213
fi
msg_ok "Storage '$CONTAINER_STORAGE' ($STORAGE_TYPE) validated"
msg_info "Validating template storage '$TEMPLATE_STORAGE'"
@@ -5102,8 +5153,7 @@ create_lxc_container() {
# If still no template, try to find alternatives
if [[ -z "$TEMPLATE" ]]; then
echo ""
echo "[DEBUG] No template found for ${PCT_OSTYPE} ${PCT_OSVERSION}, searching for alternatives..."
msg_warn "No template found for ${PCT_OSTYPE} ${PCT_OSVERSION}, searching for alternatives..."
# Get all available versions for this OS type
AVAILABLE_VERSIONS=()
@@ -5123,7 +5173,7 @@ create_lxc_container() {
echo " [$((i + 1))] ${AVAILABLE_VERSIONS[$i]}"
done
echo ""
read -p "Select version [1-${#AVAILABLE_VERSIONS[@]}] or press Enter to cancel: " choice
read -p "Select version [1-${#AVAILABLE_VERSIONS[@]}] or press Enter to cancel: " choice </dev/tty
if [[ "$choice" =~ ^[0-9]+$ ]] && [[ "$choice" -ge 1 ]] && [[ "$choice" -le ${#AVAILABLE_VERSIONS[@]} ]]; then
PCT_OSVERSION="${AVAILABLE_VERSIONS[$((choice - 1))]}"
@@ -5186,7 +5236,7 @@ create_lxc_container() {
done
echo ""
read -p "Select version [1-${#AVAILABLE_VERSIONS[@]}] or Enter to exit: " choice
read -p "Select version [1-${#AVAILABLE_VERSIONS[@]}] or Enter to exit: " choice </dev/tty
if [[ "$choice" =~ ^[0-9]+$ ]] && [[ "$choice" -ge 1 ]] && [[ "$choice" -le ${#AVAILABLE_VERSIONS[@]} ]]; then
export var_version="${AVAILABLE_VERSIONS[$((choice - 1))]}"
@@ -5296,7 +5346,7 @@ create_lxc_container() {
[[ -f "$TEMPLATE_PATH" ]] && rm -f "$TEMPLATE_PATH"
for attempt in {1..3}; do
msg_info "Attempt $attempt: Downloading template $TEMPLATE to $TEMPLATE_STORAGE"
if pveam download "$TEMPLATE_STORAGE" "$TEMPLATE" >/dev/null 2>&1; then
if pveam download "$TEMPLATE_STORAGE" "$TEMPLATE" >>"${BUILD_LOG:-/dev/null}" 2>&1; then
msg_ok "Template download successful."
break
fi
@@ -5373,17 +5423,35 @@ create_lxc_container() {
LOGFILE="/tmp/pct_create_${CTID}_$(date +%Y%m%d_%H%M%S)_${SESSION_ID}.log"
# Helper: append pct_create log to BUILD_LOG before exit so combined log has full context
_flush_pct_log() {
if [[ -s "${LOGFILE:-}" && -n "${BUILD_LOG:-}" ]]; then
{
echo ""
echo "--- pct create output (${LOGFILE}) ---"
cat "$LOGFILE"
echo "--- end pct create output ---"
} >>"$BUILD_LOG" 2>/dev/null || true
fi
}
# Validate template before pct create (while holding lock)
if [[ ! -s "$TEMPLATE_PATH" || "$(stat -c%s "$TEMPLATE_PATH" 2>/dev/null || echo 0)" -lt 1000000 ]]; then
msg_info "Template file missing or too small downloading"
rm -f "$TEMPLATE_PATH"
pveam download "$TEMPLATE_STORAGE" "$TEMPLATE" >/dev/null 2>&1
pveam download "$TEMPLATE_STORAGE" "$TEMPLATE" >>"${BUILD_LOG:-/dev/null}" 2>&1 || {
msg_error "Failed to download template '$TEMPLATE' to storage '$TEMPLATE_STORAGE'"
exit 222
}
msg_ok "Template downloaded"
elif ! tar -tf "$TEMPLATE_PATH" &>/dev/null; then
if [[ -n "$ONLINE_TEMPLATE" ]]; then
msg_info "Template appears corrupted re-downloading"
rm -f "$TEMPLATE_PATH"
pveam download "$TEMPLATE_STORAGE" "$TEMPLATE" >/dev/null 2>&1
pveam download "$TEMPLATE_STORAGE" "$TEMPLATE" >>"${BUILD_LOG:-/dev/null}" 2>&1 || {
msg_error "Failed to re-download template '$TEMPLATE'"
exit 222
}
msg_ok "Template re-downloaded"
else
msg_warn "Template appears corrupted, but no online version exists. Skipping re-download."
@@ -5404,7 +5472,7 @@ create_lxc_container() {
if grep -qiE 'unable to open|corrupt|invalid' "$LOGFILE"; then
msg_info "Template may be corrupted re-downloading"
rm -f "$TEMPLATE_PATH"
pveam download "$TEMPLATE_STORAGE" "$TEMPLATE" >/dev/null 2>&1
pveam download "$TEMPLATE_STORAGE" "$TEMPLATE" >>"${BUILD_LOG:-/dev/null}" 2>&1
msg_ok "Template re-downloaded"
fi
@@ -5417,7 +5485,7 @@ create_lxc_container() {
if [[ ! -f "$LOCAL_TEMPLATE_PATH" ]]; then
msg_ok "Trying local storage fallback"
msg_info "Downloading template to local"
pveam download local "$TEMPLATE" >/dev/null 2>&1
pveam download local "$TEMPLATE" >>"${BUILD_LOG:-/dev/null}" 2>&1
msg_ok "Template downloaded to local"
else
msg_ok "Trying local storage fallback"
@@ -5425,20 +5493,19 @@ create_lxc_container() {
if ! pct create "$CTID" "local:vztmpl/${TEMPLATE}" $PCT_OPTIONS >>"$LOGFILE" 2>&1; then
# Local fallback also failed - check for LXC stack version issue
if grep -qiE 'unsupported .* version' "$LOGFILE"; then
echo
echo "pct reported 'unsupported ... version' your LXC stack might be too old for this template."
echo "We can try to upgrade 'pve-container' and 'lxc-pve' now and retry automatically."
msg_warn "pct reported 'unsupported version' LXC stack might be too old for this template"
offer_lxc_stack_upgrade_and_maybe_retry "yes"
rc=$?
case $rc in
0) : ;; # success - container created, continue
2)
echo "Upgrade was declined. Please update and re-run:
apt update && apt install --only-upgrade pve-container lxc-pve"
msg_error "Upgrade declined. Please update and re-run: apt update && apt install --only-upgrade pve-container lxc-pve"
_flush_pct_log
exit 231
;;
3)
echo "Upgrade and/or retry failed. Please inspect: $LOGFILE"
msg_error "Upgrade and/or retry failed. Please inspect: $LOGFILE"
_flush_pct_log
exit 231
;;
esac
@@ -5449,6 +5516,7 @@ create_lxc_container() {
pct create "$CTID" "local:vztmpl/${TEMPLATE}" $PCT_OPTIONS 2>&1 | tee -a "$LOGFILE"
set +x
fi
_flush_pct_log
exit 209
fi
else
@@ -5457,20 +5525,19 @@ create_lxc_container() {
else
# Already on local storage and still failed - check LXC stack version
if grep -qiE 'unsupported .* version' "$LOGFILE"; then
echo
echo "pct reported 'unsupported ... version' your LXC stack might be too old for this template."
echo "We can try to upgrade 'pve-container' and 'lxc-pve' now and retry automatically."
msg_warn "pct reported 'unsupported version' LXC stack might be too old for this template"
offer_lxc_stack_upgrade_and_maybe_retry "yes"
rc=$?
case $rc in
0) : ;; # success - container created, continue
2)
echo "Upgrade was declined. Please update and re-run:
apt update && apt install --only-upgrade pve-container lxc-pve"
msg_error "Upgrade declined. Please update and re-run: apt update && apt install --only-upgrade pve-container lxc-pve"
_flush_pct_log
exit 231
;;
3)
echo "Upgrade and/or retry failed. Please inspect: $LOGFILE"
msg_error "Upgrade and/or retry failed. Please inspect: $LOGFILE"
_flush_pct_log
exit 231
;;
esac
@@ -5481,6 +5548,7 @@ create_lxc_container() {
pct create "$CTID" "local:vztmpl/${TEMPLATE}" $PCT_OPTIONS 2>&1 | tee -a "$LOGFILE"
set +x
fi
_flush_pct_log
exit 209
fi
fi
@@ -5492,16 +5560,28 @@ create_lxc_container() {
# Verify container exists
pct list | awk '{print $1}' | grep -qx "$CTID" || {
msg_error "Container ID $CTID not listed in 'pct list'. See $LOGFILE"
_flush_pct_log
exit 215
}
# Verify config rootfs
grep -q '^rootfs:' "/etc/pve/lxc/$CTID.conf" || {
msg_error "RootFS entry missing in container config. See $LOGFILE"
_flush_pct_log
exit 216
}
msg_ok "LXC Container ${BL}$CTID${CL} ${GN}was successfully created."
# Append pct create log to BUILD_LOG for combined log visibility
if [[ -s "$LOGFILE" && -n "${BUILD_LOG:-}" ]]; then
{
echo ""
echo "--- pct create output ---"
cat "$LOGFILE"
echo "--- end pct create output ---"
} >>"$BUILD_LOG" 2>/dev/null || true
fi
}
# ==============================================================================

View File

@@ -276,7 +276,7 @@ shell_check() {
msg_error "Your default shell is currently not set to Bash. To use these scripts, please switch to the Bash shell."
echo -e "\nExiting..."
sleep 2
exit
exit 1
fi
}
@@ -293,7 +293,7 @@ root_check() {
msg_error "Please run this script as root."
echo -e "\nExiting..."
sleep 2
exit
exit 1
fi
}
@@ -345,11 +345,10 @@ pve_check() {
# ------------------------------------------------------------------------------
arch_check() {
if [ "$(dpkg --print-architecture)" != "amd64" ]; then
echo -e "\n ${INFO}${YWB}This script will not work with PiMox! \n"
echo -e "\n ${YWB}Visit https://github.com/asylumexp/Proxmox for ARM64 support. \n"
echo -e "Exiting..."
msg_error "This script will not work with PiMox (ARM architecture detected)."
msg_warn "Visit https://github.com/asylumexp/Proxmox for ARM64 support."
sleep 2
exit
exit 1
fi
}
@@ -530,7 +529,9 @@ silent() {
if [[ $rc -ne 0 ]]; then
# Source explain_exit_code if needed
if ! declare -f explain_exit_code >/dev/null 2>&1; then
source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/error_handler.func)
if ! source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/error_handler.func); then
explain_exit_code() { echo "unknown (error_handler.func download failed)"; }
fi
fi
local explanation
@@ -551,6 +552,53 @@ silent() {
fi
}
# ------------------------------------------------------------------------------
# apt_update_safe()
#
# - Runs apt-get update with graceful error handling
# - On failure: shows warning with common causes instead of aborting
# - Logs full output to active log file
# - Returns 0 even on failure so the caller can continue
# - Typical cause: enterprise repos returning 401 Unauthorized
#
# Usage:
# apt_update_safe # Warn on failure, continue without aborting
# ------------------------------------------------------------------------------
apt_update_safe() {
local logfile
logfile="$(get_active_logfile)"
local _restore_errexit=false
[[ "$-" == *e* ]] && _restore_errexit=true
set +Eeuo pipefail
trap - ERR
apt-get update >>"$logfile" 2>&1
local rc=$?
if $_restore_errexit; then
set -Eeuo pipefail
trap 'error_handler' ERR
fi
if [[ $rc -ne 0 ]]; then
msg_warn "apt-get update exited with code ${rc} — some repositories may have failed."
# Check log for common 401/403 enterprise repo issues
if grep -qiE '401\s*Unauthorized|403\s*Forbidden|enterprise\.proxmox\.com' "$logfile" 2>/dev/null; then
echo -e "${TAB}${INFO} ${YWB}Hint: Proxmox enterprise repository returned an auth error.${CL}"
echo -e "${TAB} If you don't have a subscription, you can disable the enterprise"
echo -e "${TAB} repo and use the no-subscription repo instead."
fi
echo -e "${TAB}${INFO} ${YWB}Continuing despite partial update failure — packages may still be installable.${CL}"
echo ""
fi
return 0
}
# ------------------------------------------------------------------------------
# spinner()
#
@@ -785,8 +833,8 @@ fatal() {
# ------------------------------------------------------------------------------
exit_script() {
clear
echo -e "\n${CROSS}${RD}User exited script${CL}\n"
exit
msg_error "User exited script"
exit 0
}
# ------------------------------------------------------------------------------
@@ -807,6 +855,7 @@ get_header() {
if [ ! -s "$local_header_path" ]; then
if ! curl -fsSL "$header_url" -o "$local_header_path"; then
msg_warn "Failed to download header: $header_url"
return 1
fi
fi
@@ -847,10 +896,10 @@ header_info() {
ensure_tput() {
if ! command -v tput >/dev/null 2>&1; then
if grep -qi 'alpine' /etc/os-release; then
apk add --no-cache ncurses >/dev/null 2>&1
apk add --no-cache ncurses >/dev/null 2>&1 || msg_warn "Failed to install ncurses (tput may be unavailable)"
elif command -v apt-get >/dev/null 2>&1; then
apt-get update -qq >/dev/null
apt-get install -y -qq ncurses-bin >/dev/null 2>&1
apt-get install -y -qq ncurses-bin >/dev/null 2>&1 || msg_warn "Failed to install ncurses-bin (tput may be unavailable)"
fi
fi
}
@@ -1310,6 +1359,7 @@ prompt_select() {
# Validate options
if [[ $num_options -eq 0 ]]; then
msg_warn "prompt_select called with no options"
echo "" >&2
return 1
fi
@@ -1552,22 +1602,30 @@ check_or_create_swap() {
local swap_size_mb
swap_size_mb=$(prompt_input "Enter swap size in MB (e.g., 2048 for 2GB):" "2048" 60)
if ! [[ "$swap_size_mb" =~ ^[0-9]+$ ]]; then
msg_error "Invalid size input. Aborting."
msg_error "Invalid swap size: '${swap_size_mb}' (must be a number in MB)"
return 1
fi
local swap_file="/swapfile"
msg_info "Creating ${swap_size_mb}MB swap file at $swap_file"
if dd if=/dev/zero of="$swap_file" bs=1M count="$swap_size_mb" status=progress &&
chmod 600 "$swap_file" &&
mkswap "$swap_file" &&
swapon "$swap_file"; then
msg_ok "Swap file created and activated successfully"
else
msg_error "Failed to create or activate swap"
if ! dd if=/dev/zero of="$swap_file" bs=1M count="$swap_size_mb" status=progress; then
msg_error "Failed to allocate swap file (dd failed)"
return 1
fi
if ! chmod 600 "$swap_file"; then
msg_error "Failed to set permissions on $swap_file"
return 1
fi
if ! mkswap "$swap_file"; then
msg_error "Failed to format swap file (mkswap failed)"
return 1
fi
if ! swapon "$swap_file"; then
msg_error "Failed to activate swap (swapon failed)"
return 1
fi
msg_ok "Swap file created and activated successfully"
}
# ------------------------------------------------------------------------------
@@ -1649,7 +1707,7 @@ function get_lxc_ip() {
LOCAL_IP="$(get_current_ip || true)"
if [[ -z "$LOCAL_IP" ]]; then
msg_error "Could not determine LOCAL_IP"
msg_error "Could not determine LOCAL_IP (checked: eth0, hostname -I, ip route, IPv6 targets)"
return 1
fi
fi

View File

@@ -286,7 +286,7 @@ error_handler() {
echo -en "${YW}Remove broken container ${CTID}? (Y/n) [auto-remove in 60s]: ${CL}"
fi
if read -t 60 -r response; then
if read -t 60 -r response </dev/tty; then
if [[ -z "$response" || "$response" =~ ^[Yy]$ ]]; then
echo ""
if declare -f msg_info >/dev/null 2>&1; then

View File

@@ -233,7 +233,7 @@ fi
EOF
chmod +x /usr/local/bin/apt-proxy-detect.sh
fi
$STD apt-get update
apt_update_safe
$STD apt-get -o Dpkg::Options::="--force-confold" -y dist-upgrade
rm -rf /usr/lib/python3.*/EXTERNALLY-MANAGED
msg_ok "Updated Container OS"

View File

@@ -201,6 +201,7 @@ install_packages_with_retry() {
fi
done
msg_error "Failed to install packages after $((max_retries + 1)) attempts: ${packages[*]}"
return 1
}
@@ -231,6 +232,7 @@ upgrade_packages_with_retry() {
fi
done
msg_error "Failed to upgrade packages after $((max_retries + 1)) attempts: ${packages[*]}"
return 1
}
@@ -675,6 +677,7 @@ verify_repo_available() {
if curl -fsSL --max-time 10 "${repo_url}/dists/${suite}/Release" &>/dev/null; then
return 0
fi
msg_warn "Repository not available: ${repo_url} (suite: ${suite})"
return 1
}
@@ -839,6 +842,7 @@ github_api_call() {
esac
done
msg_error "GitHub API call failed after ${max_retries} attempts: ${url}"
return 1
}
@@ -900,6 +904,7 @@ codeberg_api_call() {
esac
done
msg_error "Codeberg API call failed after ${max_retries} attempts: ${url}"
return 1
}
@@ -1369,7 +1374,9 @@ setup_deb822_repo() {
[[ -n "$enabled" ]] && echo "Enabled: $enabled"
} >/etc/apt/sources.list.d/${name}.sources
$STD apt update
$STD apt update || {
msg_warn "apt update failed after adding repository: ${name}"
}
}
# ------------------------------------------------------------------------------
@@ -1377,12 +1384,16 @@ setup_deb822_repo() {
# ------------------------------------------------------------------------------
hold_package_version() {
local package="$1"
$STD apt-mark hold "$package"
$STD apt-mark hold "$package" || {
msg_warn "Failed to hold package version: ${package}"
}
}
unhold_package_version() {
local package="$1"
$STD apt-mark unhold "$package"
$STD apt-mark unhold "$package" || {
msg_warn "Failed to unhold package version: ${package}"
}
}
# ------------------------------------------------------------------------------
@@ -1412,6 +1423,7 @@ enable_and_start_service() {
local service="$1"
if ! systemctl enable "$service" &>/dev/null; then
msg_error "Failed to enable service: $service"
return 1
fi
@@ -1454,6 +1466,7 @@ extract_version_from_json() {
version=$(echo "$json" | jq -r ".${field} // empty")
if [[ -z "$version" ]]; then
msg_warn "JSON field '${field}' is empty in API response"
return 1
fi
@@ -1473,8 +1486,9 @@ get_latest_github_release() {
local temp_file=$(mktemp)
if ! github_api_call "https://api.github.com/repos/${repo}/releases/latest" "$temp_file"; then
msg_warn "GitHub API call failed for ${repo}"
rm -f "$temp_file"
return 0
return 1
fi
local version
@@ -1483,7 +1497,7 @@ get_latest_github_release() {
if [[ -z "$version" ]]; then
msg_error "Could not determine latest version for ${repo}"
return 0
return 1
fi
echo "$version"
@@ -1499,8 +1513,9 @@ get_latest_codeberg_release() {
# Codeberg API: get all releases and pick the first non-draft/non-prerelease
if ! codeberg_api_call "https://codeberg.org/api/v1/repos/${repo}/releases" "$temp_file"; then
msg_warn "Codeberg API call failed for ${repo}"
rm -f "$temp_file"
return 0
return 1
fi
local version
@@ -1515,7 +1530,7 @@ get_latest_codeberg_release() {
if [[ -z "$version" ]]; then
msg_error "Could not determine latest version for ${repo}"
return 0
return 1
fi
echo "$version"
@@ -1646,6 +1661,7 @@ get_latest_gh_tag() {
sort -V | tail -n1)
if [[ -z "$latest" ]]; then
msg_warn "No matching tags found for ${repo}${prefix:+ (prefix: $prefix)}"
return 1
fi
@@ -1881,7 +1897,7 @@ check_for_codeberg_release() {
releases_json=$(curl -fsSL --max-time 20 \
-H 'Accept: application/json' \
"https://codeberg.org/api/v1/repos/${source}/releases" 2>/dev/null) || {
msg_error "Unable to fetch releases for ${app}"
msg_error "Unable to fetch releases for ${app} (codeberg.org/api/v1/repos/${source}/releases)"
return 1
}
@@ -2014,12 +2030,12 @@ function download_with_progress() {
if [[ -z "$content_length" ]]; then
if ! curl -fL# -o "$output" "$url"; then
msg_error "Download failed"
msg_error "Download failed: $url"
return 1
fi
else
if ! curl -fsSL "$url" | pv -s "$content_length" >"$output"; then
msg_error "Download failed"
msg_error "Download failed: $url"
return 1
fi
fi
@@ -2562,7 +2578,10 @@ _gh_scan_older_releases() {
-H 'Accept: application/vnd.github+json' \
-H 'X-GitHub-Api-Version: 2022-11-28' \
"${header[@]}" \
"https://api.github.com/repos/${repo}/releases?per_page=15" 2>/dev/null) || return 1
"https://api.github.com/repos/${repo}/releases?per_page=15" 2>/dev/null) || {
msg_warn "Failed to fetch older releases for ${repo}"
return 1
}
local count
count=$(echo "$releases_list" | jq 'length')
@@ -3104,7 +3123,9 @@ function setup_composer() {
# Scenario 1: Already installed - just self-update
if [[ -n "$INSTALLED_VERSION" ]]; then
msg_info "Update Composer $INSTALLED_VERSION"
$STD "$COMPOSER_BIN" self-update --no-interaction || true
$STD "$COMPOSER_BIN" self-update --no-interaction || {
msg_warn "Composer self-update failed, continuing with current version"
}
local UPDATED_VERSION
UPDATED_VERSION=$("$COMPOSER_BIN" --version 2>/dev/null | awk '{print $3}')
cache_installed_version "composer" "$UPDATED_VERSION"
@@ -3140,7 +3161,9 @@ function setup_composer() {
fi
chmod +x "$COMPOSER_BIN"
$STD "$COMPOSER_BIN" self-update --no-interaction || true
$STD "$COMPOSER_BIN" self-update --no-interaction || {
msg_warn "Composer self-update failed after fresh install"
}
local FINAL_VERSION
FINAL_VERSION=$("$COMPOSER_BIN" --version 2>/dev/null | awk '{print $3}')
@@ -5223,7 +5246,9 @@ function setup_mysql() {
ensure_apt_working || return 1
# Perform upgrade with retry logic (non-fatal if fails)
upgrade_packages_with_retry "mysql-server" "mysql-client" || true
upgrade_packages_with_retry "mysql-server" "mysql-client" || {
msg_warn "MySQL package upgrade had issues, continuing with current version"
}
cache_installed_version "mysql" "$MYSQL_VERSION"
msg_ok "Update MySQL $MYSQL_VERSION"
@@ -5413,7 +5438,9 @@ function setup_nodejs() {
}
# Force APT cache refresh after repository setup
$STD apt update
$STD apt update || {
msg_warn "apt update failed after Node.js repository setup"
}
ensure_dependencies curl ca-certificates gnupg
@@ -5656,7 +5683,10 @@ EOF
if [[ "$DISTRO_ID" == "ubuntu" ]]; then
# Ubuntu: Use ondrej/php PPA
msg_info "Adding ondrej/php PPA for Ubuntu"
$STD apt install -y software-properties-common
$STD apt install -y software-properties-common || {
msg_error "Failed to install software-properties-common"
return 1
}
# Don't use $STD for add-apt-repository as it uses background processes
add-apt-repository -y ppa:ondrej/php >>"$(get_active_logfile)" 2>&1
else
@@ -5667,7 +5697,9 @@ EOF
}
fi
ensure_apt_working || return 1
$STD apt update
$STD apt update || {
msg_warn "apt update failed after PHP repository setup"
}
# Get available PHP version from repository
local AVAILABLE_PHP_VERSION=""
@@ -5962,7 +5994,9 @@ function setup_postgresql() {
}
fi
$STD systemctl enable --now postgresql 2>/dev/null || true
$STD systemctl enable --now postgresql 2>/dev/null || {
msg_warn "Failed to enable/start PostgreSQL service"
}
# Add PostgreSQL binaries to PATH
if ! grep -q '/usr/lib/postgresql' /etc/environment 2>/dev/null; then
@@ -5976,7 +6010,9 @@ function setup_postgresql() {
if [[ -n "$PG_MODULES" ]]; then
IFS=',' read -ra MODULES <<<"$PG_MODULES"
for module in "${MODULES[@]}"; do
$STD apt install -y "postgresql-${PG_VERSION}-${module}" 2>/dev/null || true
$STD apt install -y "postgresql-${PG_VERSION}-${module}" 2>/dev/null || {
msg_warn "Failed to install PostgreSQL module: ${module}"
}
done
fi
}
@@ -6635,7 +6671,9 @@ function setup_clickhouse() {
ensure_apt_working || return 1
# Perform upgrade with retry logic (non-fatal if fails)
upgrade_packages_with_retry "clickhouse-server" "clickhouse-client" || true
upgrade_packages_with_retry "clickhouse-server" "clickhouse-client" || {
msg_warn "ClickHouse package upgrade had issues, continuing with current version"
}
cache_installed_version "clickhouse" "$CLICKHOUSE_VERSION"
msg_ok "Update ClickHouse $CLICKHOUSE_VERSION"
return 0
@@ -6770,7 +6808,9 @@ function setup_rust() {
}
# Update to latest patch version
$STD rustup update "$RUST_TOOLCHAIN" </dev/null || true
$STD rustup update "$RUST_TOOLCHAIN" </dev/null || {
msg_warn "Rust toolchain update had issues"
}
# Ensure PATH is updated for current shell session
export PATH="$CARGO_BIN:$PATH"
@@ -7172,7 +7212,10 @@ function setup_docker() {
docker-ce-cli \
containerd.io \
docker-buildx-plugin \
docker-compose-plugin
docker-compose-plugin || {
msg_error "Failed to update Docker packages"
return 1
}
msg_ok "Updated Docker to $DOCKER_LATEST_VERSION"
else
msg_ok "Docker is up-to-date ($DOCKER_CURRENT_VERSION)"
@@ -7184,7 +7227,10 @@ function setup_docker() {
docker-ce-cli \
containerd.io \
docker-buildx-plugin \
docker-compose-plugin
docker-compose-plugin || {
msg_error "Failed to install Docker packages"
return 1
}
DOCKER_CURRENT_VERSION=$(docker --version | grep -oP '\d+\.\d+\.\d+' | head -1)
msg_ok "Installed Docker $DOCKER_CURRENT_VERSION"

View File

@@ -88,7 +88,7 @@ function truenas_iso_lookup() {
curl -sL "$BASE_URL" |
grep -oE 'href="[^"]+\.iso"' |
sed 's/href="//; s/"$//' |
grep -vE '(nightly|ALPHA)' |
grep -vE '(MASTER|ALPHA)' |
grep -E "$year_pattern"
)