Compare commits

..

1 Commits

Author SHA1 Message Date
CanbiZ (MickLesk)
6eb083bfa0 fix(frontend): improve detail view badges, addon texts, and HTML title
- Show colored type badges (VM/LXC/PVE/ADDON) in detail view matching list view
- Display PVE and ADDON labels in script cards instead of empty strings
- Show 'Existing LXC or Proxmox Node' as OS for addons instead of generic 'Proxmox Node'
- Hide privileged badge for addon scripts
- Add specific update hint for addons: 'Run update_<slug> to update'
- Set document title to '<ScriptName> | Proxmox VE Helper-Scripts' when a script is selected
2026-03-02 08:57:39 +01:00
110 changed files with 790 additions and 2269 deletions

View File

@@ -1,255 +0,0 @@
name: Push JSON changes to PocketBase
on:
push:
branches:
- main
paths:
- "frontend/public/json/**"
jobs:
push-json:
runs-on: self-hosted
steps:
- name: Checkout Repository
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Get changed JSON files with slug
id: changed
run: |
changed=$(git diff --name-only "${{ github.event.before }}" "${{ github.event.after }}" -- frontend/public/json/ | grep '\.json$' || true)
with_slug=""
for f in $changed; do
[[ -f "$f" ]] || continue
jq -e '.slug' "$f" >/dev/null 2>&1 && with_slug="$with_slug $f"
done
with_slug=$(echo $with_slug | xargs -n1)
if [[ -z "$with_slug" ]]; then
echo "No app JSON files changed (or no files with slug)."
echo "count=0" >> "$GITHUB_OUTPUT"
exit 0
fi
echo "$with_slug" > changed_app_jsons.txt
echo "count=$(echo "$with_slug" | wc -w)" >> "$GITHUB_OUTPUT"
- name: Push to PocketBase
if: steps.changed.outputs.count != '0'
env:
POCKETBASE_URL: ${{ secrets.POCKETBASE_URL }}
POCKETBASE_COLLECTION: ${{ secrets.POCKETBASE_COLLECTION }}
POCKETBASE_ADMIN_EMAIL: ${{ secrets.POCKETBASE_ADMIN_EMAIL }}
POCKETBASE_ADMIN_PASSWORD: ${{ secrets.POCKETBASE_ADMIN_PASSWORD }}
run: |
node << 'ENDSCRIPT'
(async function() {
const fs = require('fs');
const https = require('https');
const http = require('http');
const url = require('url');
function request(fullUrl, opts) {
return new Promise(function(resolve, reject) {
const u = url.parse(fullUrl);
const isHttps = u.protocol === 'https:';
const body = opts.body;
const options = {
hostname: u.hostname,
port: u.port || (isHttps ? 443 : 80),
path: u.path,
method: opts.method || 'GET',
headers: opts.headers || {}
};
if (body) options.headers['Content-Length'] = Buffer.byteLength(body);
const lib = isHttps ? https : http;
const req = lib.request(options, function(res) {
let data = '';
res.on('data', function(chunk) { data += chunk; });
res.on('end', function() {
resolve({ ok: res.statusCode >= 200 && res.statusCode < 300, statusCode: res.statusCode, body: data });
});
});
req.on('error', reject);
if (body) req.write(body);
req.end();
});
}
const raw = process.env.POCKETBASE_URL.replace(/\/$/, '');
const apiBase = /\/api$/i.test(raw) ? raw : raw + '/api';
const coll = process.env.POCKETBASE_COLLECTION;
const files = fs.readFileSync('changed_app_jsons.txt', 'utf8').trim().split(/\s+/).filter(Boolean);
const authUrl = apiBase + '/collections/users/auth-with-password';
console.log('Auth URL: ' + authUrl);
const authBody = JSON.stringify({
identity: process.env.POCKETBASE_ADMIN_EMAIL,
password: process.env.POCKETBASE_ADMIN_PASSWORD
});
const authRes = await request(authUrl, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: authBody
});
if (!authRes.ok) {
throw new Error('Auth failed. Tried: ' + authUrl + ' - Verify POST to that URL with body {"identity":"...","password":"..."} works. Response: ' + authRes.body);
}
const token = JSON.parse(authRes.body).token;
const recordsUrl = apiBase + '/collections/' + encodeURIComponent(coll) + '/records';
let categoryIdToName = {};
try {
const metadata = JSON.parse(fs.readFileSync('frontend/public/json/metadata.json', 'utf8'));
(metadata.categories || []).forEach(function(cat) { categoryIdToName[cat.id] = cat.name; });
} catch (e) { console.warn('Could not load metadata.json:', e.message); }
let typeValueToId = {};
let categoryNameToPbId = {};
try {
const typesRes = await request(apiBase + '/collections/z_ref_script_types/records?perPage=500', { headers: { 'Authorization': token } });
if (typesRes.ok) {
const typesData = JSON.parse(typesRes.body);
(typesData.items || []).forEach(function(item) {
if (item.type != null) typeValueToId[item.type] = item.id;
if (item.name != null) typeValueToId[item.name] = item.id;
if (item.value != null) typeValueToId[item.value] = item.id;
});
}
} catch (e) { console.warn('Could not fetch z_ref_script_types:', e.message); }
try {
const catRes = await request(apiBase + '/collections/script_categories/records?perPage=500', { headers: { 'Authorization': token } });
if (catRes.ok) {
const catData = JSON.parse(catRes.body);
(catData.items || []).forEach(function(item) { if (item.name) categoryNameToPbId[item.name] = item.id; });
}
} catch (e) { console.warn('Could not fetch script_categories:', e.message); }
var noteTypeToId = {};
var installMethodTypeToId = {};
var osToId = {};
var osVersionToId = {};
try {
const res = await request(apiBase + '/collections/z_ref_note_types/records?perPage=500', { headers: { 'Authorization': token } });
if (res.ok) JSON.parse(res.body).items?.forEach(function(item) { if (item.type != null) noteTypeToId[item.type] = item.id; });
} catch (e) { console.warn('z_ref_note_types:', e.message); }
try {
const res = await request(apiBase + '/collections/z_ref_install_method_types/records?perPage=500', { headers: { 'Authorization': token } });
if (res.ok) JSON.parse(res.body).items?.forEach(function(item) { if (item.type != null) installMethodTypeToId[item.type] = item.id; });
} catch (e) { console.warn('z_ref_install_method_types:', e.message); }
try {
const res = await request(apiBase + '/collections/z_ref_os/records?perPage=500', { headers: { 'Authorization': token } });
if (res.ok) JSON.parse(res.body).items?.forEach(function(item) { if (item.os != null) osToId[item.os] = item.id; });
} catch (e) { console.warn('z_ref_os:', e.message); }
try {
const res = await request(apiBase + '/collections/z_ref_os_version/records?perPage=500&expand=os', { headers: { 'Authorization': token } });
if (res.ok) {
(JSON.parse(res.body).items || []).forEach(function(item) {
var osName = item.expand && item.expand.os && item.expand.os.os != null ? item.expand.os.os : null;
if (osName != null && item.version != null) osVersionToId[osName + '|' + item.version] = item.id;
});
}
} catch (e) { console.warn('z_ref_os_version:', e.message); }
var notesCollUrl = apiBase + '/collections/script_notes/records';
var installMethodsCollUrl = apiBase + '/collections/script_install_methods/records';
for (const file of files) {
if (!fs.existsSync(file)) continue;
const data = JSON.parse(fs.readFileSync(file, 'utf8'));
if (!data.slug) { console.log('Skipping', file, '(no slug)'); continue; }
var payload = {
name: data.name,
slug: data.slug,
script_created: data.date_created || data.script_created,
script_updated: data.date_created || data.script_updated,
updateable: data.updateable,
privileged: data.privileged,
port: data.interface_port != null ? data.interface_port : data.port,
documentation: data.documentation,
website: data.website,
logo: data.logo,
description: data.description,
config_path: data.config_path,
default_user: (data.default_credentials && data.default_credentials.username) || data.default_user,
default_passwd: (data.default_credentials && data.default_credentials.password) || data.default_passwd,
is_dev: false
};
var resolvedType = typeValueToId[data.type];
if (resolvedType == null && data.type === 'ct') resolvedType = typeValueToId['lxc'];
if (resolvedType) payload.type = resolvedType;
var resolvedCats = (data.categories || []).map(function(n) { return categoryNameToPbId[categoryIdToName[n]]; }).filter(Boolean);
if (resolvedCats.length) payload.categories = resolvedCats;
if (data.version !== undefined) payload.version = data.version;
if (data.changelog !== undefined) payload.changelog = data.changelog;
if (data.screenshots !== undefined) payload.screenshots = data.screenshots;
const filter = "(slug='" + data.slug + "')";
const listRes = await request(recordsUrl + '?filter=' + encodeURIComponent(filter) + '&perPage=1', {
headers: { 'Authorization': token }
});
const list = JSON.parse(listRes.body);
const existingId = list.items && list.items[0] && list.items[0].id;
async function resolveNotesAndInstallMethods(scriptId) {
var noteIds = [];
for (var i = 0; i < (data.notes || []).length; i++) {
var note = data.notes[i];
var typeId = noteTypeToId[note.type];
if (typeId == null) continue;
var postRes = await request(notesCollUrl, {
method: 'POST',
headers: { 'Authorization': token, 'Content-Type': 'application/json' },
body: JSON.stringify({ text: note.text || '', type: typeId })
});
if (postRes.ok) noteIds.push(JSON.parse(postRes.body).id);
}
var installMethodIds = [];
for (var j = 0; j < (data.install_methods || []).length; j++) {
var im = data.install_methods[j];
var typeId = installMethodTypeToId[im.type];
var res = im.resources || {};
var osId = osToId[res.os];
var osVersionKey = (res.os != null && res.version != null) ? res.os + '|' + res.version : null;
var osVersionId = osVersionKey ? osVersionToId[osVersionKey] : null;
var imBody = {
script: scriptId,
resources_cpu: res.cpu != null ? res.cpu : 0,
resources_ram: res.ram != null ? res.ram : 0,
resources_hdd: res.hdd != null ? res.hdd : 0
};
if (typeId) imBody.type = typeId;
if (osId) imBody.os = osId;
if (osVersionId) imBody.os_version = osVersionId;
var imPostRes = await request(installMethodsCollUrl, {
method: 'POST',
headers: { 'Authorization': token, 'Content-Type': 'application/json' },
body: JSON.stringify(imBody)
});
if (imPostRes.ok) installMethodIds.push(JSON.parse(imPostRes.body).id);
}
return { noteIds: noteIds, installMethodIds: installMethodIds };
}
if (existingId) {
var resolved = await resolveNotesAndInstallMethods(existingId);
payload.notes = resolved.noteIds;
payload.install_methods = resolved.installMethodIds;
console.log('Updating', file, '(slug=' + data.slug + ')');
const r = await request(recordsUrl + '/' + existingId, {
method: 'PATCH',
headers: { 'Authorization': token, 'Content-Type': 'application/json' },
body: JSON.stringify(payload)
});
if (!r.ok) throw new Error('PATCH failed: ' + r.body);
} else {
console.log('Creating', file, '(slug=' + data.slug + ')');
const r = await request(recordsUrl, {
method: 'POST',
headers: { 'Authorization': token, 'Content-Type': 'application/json' },
body: JSON.stringify(payload)
});
if (!r.ok) throw new Error('POST failed: ' + r.body);
var scriptId = JSON.parse(r.body).id;
var resolved = await resolveNotesAndInstallMethods(scriptId);
var patchRes = await request(recordsUrl + '/' + scriptId, {
method: 'PATCH',
headers: { 'Authorization': token, 'Content-Type': 'application/json' },
body: JSON.stringify({ install_methods: resolved.installMethodIds, notes: resolved.noteIds })
});
if (!patchRes.ok) throw new Error('PATCH relations failed: ' + patchRes.body);
}
}
console.log('Done.');
})().catch(e => { console.error(e); process.exit(1); });
ENDSCRIPT
shell: bash

View File

@@ -410,61 +410,20 @@ Exercise vigilance regarding copycat or coat-tailing sites that seek to exploit
</details>
## 2026-03-03
### 🆕 New Scripts
- Tinyauth: v5 Support & add Debian Version [@MickLesk](https://github.com/MickLesk) ([#12501](https://github.com/community-scripts/ProxmoxVE/pull/12501))
### 🗑️ Deleted Scripts
- Remove Unifi Network Server scripts (dead APT repo) [@Copilot](https://github.com/Copilot) ([#12500](https://github.com/community-scripts/ProxmoxVE/pull/12500))
### 🌐 Website
- #### 🐞 Bug Fixes
- Revert #11534 PR that messed up search [@BramSuurdje](https://github.com/BramSuurdje) ([#12492](https://github.com/community-scripts/ProxmoxVE/pull/12492))
## 2026-03-02
### 🆕 New Scripts
- PowerDNS ([#12481](https://github.com/community-scripts/ProxmoxVE/pull/12481))
- Profilarr ([#12441](https://github.com/community-scripts/ProxmoxVE/pull/12441))
### 🚀 Updated Scripts
- #### 🐞 Bug Fixes
- Tracearr: prepare for imminent v1.4.19 release [@durzo](https://github.com/durzo) ([#12413](https://github.com/community-scripts/ProxmoxVE/pull/12413))
- #### ✨ New Features
- Frigate: Bump to v0.17 [@MickLesk](https://github.com/MickLesk) ([#12474](https://github.com/community-scripts/ProxmoxVE/pull/12474))
- #### 💥 Breaking Changes
- Migrate: DokPloy, Komodo, Coolify, Dockge, Runtipi to Addons [@MickLesk](https://github.com/MickLesk) ([#12275](https://github.com/community-scripts/ProxmoxVE/pull/12275))
- #### 🔧 Refactor
- ref: replace generic exit 1 with specific exit codes in ct & install [@MickLesk](https://github.com/MickLesk) ([#12475](https://github.com/community-scripts/ProxmoxVE/pull/12475))
### 💾 Core
- #### ✨ New Features
- tools.func: Improve stability with retry logic, caching, and debug mode [@MickLesk](https://github.com/MickLesk) ([#10351](https://github.com/community-scripts/ProxmoxVE/pull/10351))
- #### 🔧 Refactor
- core: standardize exit codes and add mappings [@MickLesk](https://github.com/MickLesk) ([#12467](https://github.com/community-scripts/ProxmoxVE/pull/12467))
### 🌐 Website
- frontend: improve detail view badges, addon texts, and HTML title [@MickLesk](https://github.com/MickLesk) ([#12461](https://github.com/community-scripts/ProxmoxVE/pull/12461))
## 2026-03-01
### 🚀 Updated Scripts

View File

@@ -39,7 +39,7 @@ function update_script() {
COMPOSE_FILE=$(find /opt/komodo -maxdepth 1 -type f -name '*.compose.yaml' ! -name 'compose.env' | head -n1)
if [[ -z "$COMPOSE_FILE" ]]; then
msg_error "No valid compose file found in /opt/komodo!"
exit 252
exit 1
fi
$STD docker compose -p komodo -f "$COMPOSE_FILE" --env-file /opt/komodo/compose.env pull
$STD docker compose -p komodo -f "$COMPOSE_FILE" --env-file /opt/komodo/compose.env up -d

View File

@@ -35,20 +35,6 @@ function update_script() {
$STD service tinyauth stop
msg_ok "Service Stopped"
if [[ -f /opt/tinyauth/.env ]] && ! grep -q "^TINYAUTH_" /opt/tinyauth/.env; then
msg_info "Migrating .env to v5 format"
sed -i \
-e 's/^DATABASE_PATH=/TINYAUTH_DATABASE_PATH=/' \
-e 's/^USERS=/TINYAUTH_AUTH_USERS=/' \
-e "s/^USERS='/TINYAUTH_AUTH_USERS='/" \
-e 's/^APP_URL=/TINYAUTH_APPURL=/' \
-e 's/^SECRET=/TINYAUTH_AUTH_SECRET=/' \
-e 's/^PORT=/TINYAUTH_SERVER_PORT=/' \
-e 's/^ADDRESS=/TINYAUTH_SERVER_ADDRESS=/' \
/opt/tinyauth/.env
msg_ok "Migrated .env to v5 format"
fi
msg_info "Updating Tinyauth"
rm -f /opt/tinyauth/tinyauth
curl -fsSL "https://github.com/steveiliop56/tinyauth/releases/download/v${RELEASE}/tinyauth-amd64" -o /opt/tinyauth/tinyauth

View File

@@ -26,7 +26,7 @@ function update_script() {
if [[ ! -d /opt/endurain ]]; then
msg_error "No ${APP} installation found!"
exit 233
exit 1
fi
if check_for_gh_release "endurain" "endurain-project/endurain"; then
msg_info "Stopping Service"

View File

@@ -25,7 +25,7 @@ function update_script() {
check_container_resources
if ! command -v evcc >/dev/null 2>&1; then
msg_error "No ${APP} Installation Found!"
exit 233
exit 1
fi
if [[ -f /etc/apt/sources.list.d/evcc-stable.list ]]; then

View File

@@ -26,7 +26,7 @@ function update_script() {
if ! dpkg -s grafana >/dev/null 2>&1; then
msg_error "No ${APP} Installation Found!"
exit 233
exit 1
fi
if [[ -f /etc/apt/sources.list.d/grafana.list ]] || [[ ! -f /etc/apt/sources.list.d/grafana.sources ]]; then

View File

@@ -1,6 +0,0 @@
____ ____ _ _______
/ __ \____ _ _____ _____/ __ \/ | / / ___/
/ /_/ / __ \ | /| / / _ \/ ___/ / / / |/ /\__ \
/ ____/ /_/ / |/ |/ / __/ / / /_/ / /| /___/ /
/_/ \____/|__/|__/\___/_/ /_____/_/ |_//____/

View File

@@ -1,6 +0,0 @@
____ _____ __
/ __ \_________ / __(_) /___ ___________
/ /_/ / ___/ __ \/ /_/ / / __ `/ ___/ ___/
/ ____/ / / /_/ / __/ / / /_/ / / / /
/_/ /_/ \____/_/ /_/_/\__,_/_/ /_/

View File

@@ -1,6 +0,0 @@
_______ __ __
/_ __(_)___ __ ______ ___ __/ /_/ /_
/ / / / __ \/ / / / __ `/ / / / __/ __ \
/ / / / / / / /_/ / /_/ / /_/ / /_/ / / /
/_/ /_/_/ /_/\__, /\__,_/\__,_/\__/_/ /_/
/____/

6
ct/headers/unifi Normal file
View File

@@ -0,0 +1,6 @@
__ __ _ _____
/ / / /___ (_) __(_)
/ / / / __ \/ / /_/ /
/ /_/ / / / / / __/ /
\____/_/ /_/_/_/ /_/

View File

@@ -26,7 +26,7 @@ function update_script() {
if [[ ! -f /etc/itsm-ng/config_db.php ]]; then
msg_error "No ${APP} Installation Found!"
exit 233
exit 1
fi
setup_mariadb

View File

@@ -45,7 +45,7 @@ function update_script() {
if [[ -z "$KASM_URL" ]] || [[ -z "$KASM_VERSION" ]]; then
msg_error "Unable to detect latest Kasm release URL."
exit 250
exit 1
fi
msg_info "Checked for new version"

View File

@@ -43,7 +43,7 @@ function update_script() {
COMPOSE_FILE=$(find /opt/komodo -maxdepth 1 -type f -name '*.compose.yaml' ! -name 'compose.env' | head -n1)
if [[ -z "$COMPOSE_FILE" ]]; then
msg_error "No valid compose file found in /opt/komodo!"
exit 252
exit 1
fi
$STD docker compose -p komodo -f "$COMPOSE_FILE" --env-file /opt/komodo/compose.env pull
$STD docker compose -p komodo -f "$COMPOSE_FILE" --env-file /opt/komodo/compose.env up -d

View File

@@ -26,7 +26,7 @@ function update_script() {
if ! dpkg -s loki >/dev/null 2>&1; then
msg_error "No ${APP} Installation Found!"
exit 233
exit 1
fi
CHOICE=$(msg_menu "Loki Update Options" \

View File

@@ -44,7 +44,7 @@ function update_script() {
echo -e "${TAB}${GATEWAY}${BGN}https://github.com/community-scripts/ProxmoxVE/discussions/9223${CL}"
echo -e ""
msg_custom "⚠️" "Update aborted. Please migrate your data first."
exit 253
exit 1
fi
fi

View File

@@ -1,68 +0,0 @@
#!/usr/bin/env bash
source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/build.func)
# Copyright (c) 2021-2026 community-scripts ORG
# Author: Slaviša Arežina (tremor021)
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
# Source: https://www.powerdns.com/
APP="PowerDNS"
var_tags="${var_tags:-dns}"
var_cpu="${var_cpu:-1}"
var_ram="${var_ram:-1024}"
var_disk="${var_disk:-4}"
var_os="${var_os:-debian}"
var_version="${var_version:-13}"
var_unprivileged="${var_unprivileged:-1}"
header_info "$APP"
variables
color
catch_errors
function update_script() {
header_info
check_container_storage
check_container_resources
if [[ ! -d /opt/poweradmin ]]; then
msg_error "No ${APP} Installation Found!"
exit
fi
msg_info "Updating PowerDNS"
$STD apt update
$STD apt install -y --only-upgrade pdns-server pdns-backend-sqlite3
msg_ok "Updated PowerDNS"
if check_for_gh_release "poweradmin" "poweradmin/poweradmin"; then
msg_info "Backing up Configuration"
cp /opt/poweradmin/config/settings.php /opt/poweradmin_settings.php.bak
cp /opt/poweradmin/powerdns.db /opt/poweradmin_powerdns.db.bak
msg_ok "Backed up Configuration"
CLEAN_INSTALL=1 fetch_and_deploy_gh_release "poweradmin" "poweradmin/poweradmin" "tarball"
msg_info "Updating Poweradmin"
cp /opt/poweradmin_settings.php.bak /opt/poweradmin/config/settings.php
cp /opt/poweradmin_powerdns.db.bak /opt/poweradmin/powerdns.db
rm -rf /opt/poweradmin/install
rm -f /opt/poweradmin_settings.php.bak /opt/poweradmin_powerdns.db.bak
chown -R www-data:www-data /opt/poweradmin
msg_ok "Updated Poweradmin"
msg_info "Restarting Services"
systemctl restart pdns apache2
msg_ok "Restarted Services"
msg_ok "Updated successfully!"
fi
exit
}
start
build_container
description
msg_ok "Completed Successfully!\n"
echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}"
echo -e "${INFO}${YW} Access it using the following URL:${CL}"
echo -e "${TAB}${GATEWAY}${BGN}http://${IP}${CL}"

View File

@@ -1,85 +0,0 @@
#!/usr/bin/env bash
source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/build.func)
# Copyright (c) 2021-2026 community-scripts ORG
# Author: michelroegl-brunner
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
# Source: https://github.com/Dictionarry-Hub/profilarr
APP="Profilarr"
var_tags="${var_tags:-arr;radarr;sonarr;config}"
var_cpu="${var_cpu:-2}"
var_ram="${var_ram:-2048}"
var_disk="${var_disk:-8}"
var_os="${var_os:-debian}"
var_version="${var_version:-13}"
var_unprivileged="${var_unprivileged:-1}"
header_info "$APP"
variables
color
catch_errors
function update_script() {
header_info
check_container_storage
check_container_resources
if [[ ! -d /opt/profilarr ]]; then
msg_error "No ${APP} Installation Found!"
exit
fi
if check_for_gh_release "profilarr" "Dictionarry-Hub/profilarr"; then
msg_info "Stopping Service"
systemctl stop profilarr
msg_ok "Stopped Service"
msg_info "Backing up Data"
if [[ -d /config ]]; then
cp -r /config /opt/profilarr_config_backup
fi
msg_ok "Backed up Data"
CLEAN_INSTALL=1 fetch_and_deploy_gh_release "profilarr" "Dictionarry-Hub/profilarr" "tarball"
msg_info "Installing Python Dependencies"
cd /opt/profilarr/backend
$STD uv venv /opt/profilarr/backend/.venv
sed 's/==/>=/g' requirements.txt >requirements-relaxed.txt
$STD uv pip install --python /opt/profilarr/backend/.venv/bin/python -r requirements-relaxed.txt
rm -f requirements-relaxed.txt
msg_ok "Installed Python Dependencies"
msg_info "Building Frontend"
if [[ -d /opt/profilarr/frontend ]]; then
cd /opt/profilarr/frontend
$STD npm install
$STD npm run build
cp -r dist /opt/profilarr/backend/app/static
fi
msg_ok "Built Frontend"
msg_info "Restoring Data"
if [[ -d /opt/profilarr_config_backup ]]; then
mkdir -p /config
cp -r /opt/profilarr_config_backup/. /config/
rm -rf /opt/profilarr_config_backup
fi
msg_ok "Restored Data"
msg_info "Starting Service"
systemctl start profilarr
msg_ok "Started Service"
msg_ok "Updated successfully!"
fi
exit
}
start
build_container
description
msg_ok "Completed Successfully!\n"
echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}"
echo -e "${INFO}${YW} Access it using the following URL:${CL}"
echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:6868${CL}"

View File

@@ -1,53 +0,0 @@
#!/usr/bin/env bash
source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/build.func)
# Copyright (c) 2021-2026 community-scripts ORG
# Author: MickLesk (CanbiZ)
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
# Source: https://github.com/steveiliop56/tinyauth
APP="Tinyauth"
var_tags="${var_tags:-auth}"
var_cpu="${var_cpu:-1}"
var_ram="${var_ram:-512}"
var_disk="${var_disk:-4}"
var_os="${var_os:-debian}"
var_version="${var_version:-13}"
var_unprivileged="${var_unprivileged:-1}"
header_info "$APP"
variables
color
catch_errors
function update_script() {
header_info
check_container_storage
check_container_resources
if [[ ! -d /opt/tinyauth ]]; then
msg_error "No ${APP} Installation Found!"
exit
fi
if check_for_gh_release "tinyauth" "steveiliop56/tinyauth"; then
msg_info "Stopping Service"
systemctl stop tinyauth
msg_ok "Stopped Service"
fetch_and_deploy_gh_release "tinyauth" "steveiliop56/tinyauth" "singlefile" "latest" "/opt/tinyauth" "tinyauth-amd64"
msg_info "Starting Service"
systemctl start tinyauth
msg_ok "Started Service"
msg_ok "Updated successfully!"
fi
exit
}
start
build_container
description
msg_ok "Completed successfully!\n"
echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}"
echo -e "${INFO}${YW} Access it using the following URL:${CL}"
echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:3000${CL}"

View File

@@ -75,31 +75,10 @@ if [ -f \$pg_config_file ]; then
fi
fi
systemctl restart postgresql
sudo -u postgres psql -c "ALTER USER tracearr WITH SUPERUSER;"
EOF
chmod +x /data/tracearr/prestart.sh
msg_ok "Updated prestart script"
# check if tailscale is installed
if command -v tailscale >/dev/null 2>&1; then
# Tracearr runs tailscaled in user mode, disable the service.
$STD systemctl disable --now tailscaled
$STD systemctl stop tailscaled
msg_ok "Tailscale already installed"
else
msg_info "Installing tailscale"
setup_deb822_repo \
"tailscale" \
"https://pkgs.tailscale.com/stable/$(get_os_info id)/$(get_os_info codename).noarmor.gpg" \
"https://pkgs.tailscale.com/stable/$(get_os_info id)/" \
"$(get_os_info codename)"
$STD apt install -y tailscale
# Tracearr runs tailscaled in user mode, disable the service.
$STD systemctl disable --now tailscaled
$STD systemctl stop tailscaled
msg_ok "Installed tailscale"
fi
if check_for_gh_release "tracearr" "connorgallopo/Tracearr"; then
msg_info "Stopping Services"
systemctl stop tracearr postgresql redis
@@ -143,8 +122,6 @@ EOF
sed -i "s/^APP_VERSION=.*/APP_VERSION=$(cat /root/.tracearr)/" /data/tracearr/.env
chmod 600 /data/tracearr/.env
chown -R tracearr:tracearr /data/tracearr
mkdir -p /data/backup
chown -R tracearr:tracearr /data/backup
msg_ok "Configured Tracearr"
msg_info "Starting services"

47
ct/unifi.sh Normal file
View File

@@ -0,0 +1,47 @@
#!/usr/bin/env bash
source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/build.func)
# Copyright (c) 2021-2026 tteck
# Author: tteck (tteckster)
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
# Source: https://ui.com/download/unifi
APP="Unifi"
var_tags="${var_tags:-network;unifi}"
var_cpu="${var_cpu:-2}"
var_ram="${var_ram:-2048}"
var_disk="${var_disk:-8}"
var_os="${var_os:-debian}"
var_version="${var_version:-12}"
var_unprivileged="${var_unprivileged:-1}"
header_info "$APP"
variables
color
catch_errors
function update_script() {
header_info
check_container_storage
check_container_resources
if [[ ! -d /usr/lib/unifi ]]; then
msg_error "No ${APP} Installation Found!"
exit
fi
JAVA_VERSION="21" setup_java
msg_info "Updating ${APP}"
$STD apt update --allow-releaseinfo-change
ensure_dependencies unifi
msg_ok "Updated successfully!"
exit
}
start
build_container
description
msg_ok "Completed successfully!\n"
echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}"
echo -e "${INFO}${YW} Access it using the following URL:${CL}"
echo -e "${TAB}${GATEWAY}${BGN}https://${IP}:8443${CL}"

View File

@@ -34,14 +34,14 @@ function update_script() {
msg_warn "This requires MANUAL config changes in /etc/vikunja/config.yml."
msg_warn "See: https://vikunja.io/changelog/whats-new-in-vikunja-1.0.0/#config-changes"
read -rp "Continue with update? (y to proceed): " -t 30 CONFIRM1 || exit 254
read -rp "Continue with update? (y to proceed): " -t 30 CONFIRM1 || exit 1
[[ "$CONFIRM1" =~ ^[yY]$ ]] || exit 0
echo
msg_warn "Vikunja may not start after the update until you manually adjust the config."
msg_warn "Details: https://vikunja.io/changelog/whats-new-in-vikunja-1.0.0/#config-changes"
read -rp "Acknowledge and continue? (y): " -t 30 CONFIRM2 || exit 254
read -rp "Acknowledge and continue? (y): " -t 30 CONFIRM2 || exit 1
[[ "$CONFIRM2" =~ ^[yY]$ ]] || exit 0
fi

View File

@@ -1,10 +1,10 @@
{
"name": "Tinyauth",
"slug": "tinyauth",
"name": "Alpine-Tinyauth",
"slug": "alpine-tinyauth",
"categories": [
6
],
"date_created": "2026-03-03",
"date_created": "2025-05-06",
"type": "ct",
"updateable": true,
"privileged": false,
@@ -17,13 +17,13 @@
"install_methods": [
{
"type": "default",
"script": "ct/tinyauth.sh",
"script": "ct/alpine-tinyauth.sh",
"resources": {
"cpu": 1,
"ram": 512,
"hdd": 4,
"os": "debian",
"version": "13"
"ram": 256,
"hdd": 2,
"os": "alpine",
"version": "3.23"
}
},
{

View File

@@ -1,5 +1,5 @@
{
"generated": "2026-03-03T06:17:56Z",
"generated": "2026-03-02T06:20:15Z",
"versions": [
{
"slug": "2fauth",
@@ -200,9 +200,9 @@
{
"slug": "cleanuparr",
"repo": "Cleanuparr/Cleanuparr",
"version": "v2.7.7",
"version": "v2.7.6",
"pinned": false,
"date": "2026-03-02T13:08:32Z"
"date": "2026-02-27T19:32:02Z"
},
{
"slug": "cloudreve",
@@ -291,9 +291,9 @@
{
"slug": "dispatcharr",
"repo": "Dispatcharr/Dispatcharr",
"version": "v0.20.2",
"version": "v0.20.1",
"pinned": false,
"date": "2026-03-03T01:40:33Z"
"date": "2026-02-26T21:38:19Z"
},
{
"slug": "docmost",
@@ -312,9 +312,9 @@
{
"slug": "domain-monitor",
"repo": "Hosteroid/domain-monitor",
"version": "v1.1.4",
"version": "v1.1.3",
"pinned": false,
"date": "2026-03-02T09:25:01Z"
"date": "2026-02-11T15:48:18Z"
},
{
"slug": "donetick",
@@ -382,9 +382,9 @@
{
"slug": "firefly",
"repo": "firefly-iii/firefly-iii",
"version": "v6.5.2",
"version": "v6.5.1",
"pinned": false,
"date": "2026-03-03T05:42:27Z"
"date": "2026-02-27T20:55:55Z"
},
{
"slug": "fladder",
@@ -424,9 +424,9 @@
{
"slug": "frigate",
"repo": "blakeblackshear/frigate",
"version": "v0.17.0",
"version": "v0.16.4",
"pinned": true,
"date": "2026-02-27T03:03:01Z"
"date": "2026-01-29T00:42:14Z"
},
{
"slug": "gatus",
@@ -452,9 +452,9 @@
{
"slug": "gitea-mirror",
"repo": "RayLabsHQ/gitea-mirror",
"version": "v3.11.0",
"version": "v3.10.1",
"pinned": false,
"date": "2026-03-02T10:19:59Z"
"date": "2026-03-01T03:08:07Z"
},
{
"slug": "glance",
@@ -585,9 +585,9 @@
{
"slug": "immich-public-proxy",
"repo": "alangrainger/immich-public-proxy",
"version": "v1.15.4",
"version": "v1.15.3",
"pinned": false,
"date": "2026-03-02T21:28:06Z"
"date": "2026-02-16T22:54:27Z"
},
{
"slug": "inspircd",
@@ -613,9 +613,9 @@
{
"slug": "jackett",
"repo": "Jackett/Jackett",
"version": "v0.24.1261",
"version": "v0.24.1247",
"pinned": false,
"date": "2026-03-03T05:54:20Z"
"date": "2026-03-02T05:56:37Z"
},
{
"slug": "jellystat",
@@ -634,9 +634,9 @@
{
"slug": "jotty",
"repo": "fccview/jotty",
"version": "1.21.0",
"version": "1.20.0",
"pinned": false,
"date": "2026-03-02T11:08:54Z"
"date": "2026-02-12T09:23:30Z"
},
{
"slug": "kapowarr",
@@ -872,9 +872,9 @@
{
"slug": "metube",
"repo": "alexta69/metube",
"version": "2026.03.02",
"version": "2026.02.27",
"pinned": false,
"date": "2026-03-02T19:19:10Z"
"date": "2026-02-27T11:47:02Z"
},
{
"slug": "miniflux",
@@ -1149,13 +1149,6 @@
"pinned": false,
"date": "2026-02-23T19:50:48Z"
},
{
"slug": "powerdns",
"repo": "poweradmin/poweradmin",
"version": "v4.0.7",
"pinned": false,
"date": "2026-02-15T20:09:48Z"
},
{
"slug": "privatebin",
"repo": "PrivateBin/PrivateBin",
@@ -1163,13 +1156,6 @@
"pinned": false,
"date": "2025-11-12T07:10:14Z"
},
{
"slug": "profilarr",
"repo": "Dictionarry-Hub/profilarr",
"version": "v1.1.4",
"pinned": false,
"date": "2026-01-29T14:57:25Z"
},
{
"slug": "projectsend",
"repo": "projectsend/projectsend",
@@ -1229,9 +1215,9 @@
{
"slug": "pulse",
"repo": "rcourtman/Pulse",
"version": "v5.1.17",
"version": "v5.1.16",
"pinned": false,
"date": "2026-03-02T20:15:31Z"
"date": "2026-03-01T23:13:09Z"
},
{
"slug": "pve-scripts-local",
@@ -1355,9 +1341,9 @@
{
"slug": "scanopy",
"repo": "scanopy/scanopy",
"version": "v0.14.11",
"version": "v0.14.10",
"pinned": false,
"date": "2026-03-02T08:48:42Z"
"date": "2026-02-28T21:05:12Z"
},
{
"slug": "scraparr",
@@ -1432,9 +1418,9 @@
{
"slug": "snowshare",
"repo": "TuroYT/snowshare",
"version": "v1.3.8",
"version": "v1.3.7",
"pinned": false,
"date": "2026-03-02T07:43:42Z"
"date": "2026-02-23T15:51:39Z"
},
{
"slug": "sonarr",
@@ -1670,9 +1656,9 @@
{
"slug": "victoriametrics",
"repo": "VictoriaMetrics/VictoriaMetrics",
"version": "v1.137.0",
"version": "v1.136.0",
"pinned": false,
"date": "2026-03-02T10:09:29Z"
"date": "2026-02-16T13:17:50Z"
},
{
"slug": "vikunja",
@@ -1698,9 +1684,9 @@
{
"slug": "wanderer",
"repo": "meilisearch/meilisearch",
"version": "v1.37.0",
"version": "v1.36.0",
"pinned": false,
"date": "2026-03-02T09:16:36Z"
"date": "2026-02-23T08:13:32Z"
},
{
"slug": "warracker",
@@ -1733,9 +1719,9 @@
{
"slug": "wealthfolio",
"repo": "afadil/wealthfolio",
"version": "v3.0.2",
"version": "v3.0.0",
"pinned": false,
"date": "2026-03-03T05:01:49Z"
"date": "2026-02-24T22:37:05Z"
},
{
"slug": "web-check",
@@ -1803,9 +1789,9 @@
{
"slug": "zigbee2mqtt",
"repo": "Koenkk/zigbee2mqtt",
"version": "2.9.1",
"version": "2.9.0",
"pinned": false,
"date": "2026-03-02T11:16:46Z"
"date": "2026-03-01T13:58:14Z"
},
{
"slug": "zipline",
@@ -1817,9 +1803,9 @@
{
"slug": "zitadel",
"repo": "zitadel/zitadel",
"version": "v4.12.0",
"version": "v4.11.1",
"pinned": false,
"date": "2026-03-02T08:16:10Z"
"date": "2026-02-25T06:13:13Z"
},
{
"slug": "zoraxy",

View File

@@ -1,40 +0,0 @@
{
"name": "PowerDNS",
"slug": "powerdns",
"categories": [
5
],
"date_created": "2026-03-02",
"type": "ct",
"updateable": true,
"privileged": false,
"interface_port": 80,
"documentation": "https://doc.powerdns.com/index.html",
"config_path": "/opt/poweradmin/config/settings.php",
"website": "https://www.powerdns.com/",
"logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/powerdns.webp",
"description": "The PowerDNS Authoritative Server is a versatile nameserver which supports a large number of backends. These backends can either be plain zone files or be more dynamic in nature. PowerDNS has the concepts of backends. A backend is a datastore that the server will consult that contains DNS records (and some metadata). The backends range from database backends (MySQL, PostgreSQL) and BIND zone files to co-processes and JSON APIs.",
"install_methods": [
{
"type": "default",
"script": "ct/powerdns.sh",
"resources": {
"cpu": 1,
"ram": 1024,
"hdd": 4,
"os": "Debian",
"version": "13"
}
}
],
"default_credentials": {
"username": null,
"password": null
},
"notes": [
{
"text": "For administrator credentials type: `cat ~/poweradmin.creds` inside LXC.",
"type": "info"
}
]
}

View File

@@ -1,35 +0,0 @@
{
"name": "Profilarr",
"slug": "profilarr",
"categories": [
14
],
"date_created": "2026-03-02",
"type": "ct",
"updateable": true,
"privileged": false,
"interface_port": 6868,
"documentation": "https://github.com/Dictionarry-Hub/profilarr#readme",
"website": "https://github.com/Dictionarry-Hub/profilarr",
"logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/profilarr.webp",
"config_path": "/config",
"description": "Profilarr is a configuration management platform for Radarr and Sonarr that simplifies importing, syncing, and managing quality profiles, custom formats, and release profiles.",
"install_methods": [
{
"type": "default",
"script": "ct/profilarr.sh",
"resources": {
"cpu": 2,
"ram": 2048,
"hdd": 8,
"os": "Debian",
"version": "13"
}
}
],
"default_credentials": {
"username": null,
"password": null
},
"notes": []
}

View File

@@ -0,0 +1,42 @@
{
"name": "UniFi Network Server",
"slug": "unifi",
"categories": [
4
],
"date_created": "2024-05-02",
"type": "ct",
"updateable": true,
"privileged": false,
"interface_port": 8443,
"documentation": "https://help.ui.com/hc/en-us/articles/360012282453-Self-Hosting-a-UniFi-Network-Server",
"website": "https://www.ui.com/",
"logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/ubiquiti-unifi.webp",
"config_path": "",
"description": "UniFi Network Server is a software that helps manage and monitor UniFi networks (Wi-Fi, Ethernet, etc.) by providing an intuitive user interface and advanced features. It allows network administrators to configure, monitor, and upgrade network devices, as well as view network statistics, client devices, and historical events. The aim of the application is to make the management of UniFi networks easier and more efficient.",
"disable": true,
"disable_description": "This script is disabled because UniFi no longer delivers APT packages for Debian systems. The installation relies on APT repositories that are no longer maintained or available. For more details, see: https://github.com/community-scripts/ProxmoxVE/issues/11876",
"install_methods": [
{
"type": "default",
"script": "ct/unifi.sh",
"resources": {
"cpu": 2,
"ram": 2048,
"hdd": 8,
"os": "debian",
"version": "12"
}
}
],
"default_credentials": {
"username": null,
"password": null
},
"notes": [
{
"text": "For non-AVX CPUs, MongoDB 4.4 is installed. Please note this is a legacy solution that may present security risks and could become unsupported in future updates.",
"type": "warning"
}
]
}

View File

@@ -2,68 +2,42 @@
import type { z } from "zod";
import { githubGist, nord } from "react-syntax-highlighter/dist/esm/styles/hljs";
import { CalendarIcon, Check, Clipboard, Download } from "lucide-react";
import { useCallback, useEffect, useMemo, useState } from "react";
import SyntaxHighlighter from "react-syntax-highlighter";
import { useTheme } from "next-themes";
import { format } from "date-fns";
import { toast } from "sonner";
import Image from "next/image";
import type { Category } from "@/lib/types";
import { DropdownMenu, DropdownMenuContent, DropdownMenuGroup, DropdownMenuItem, DropdownMenuTrigger } from "@/components/ui/dropdown-menu";
import { Dialog, DialogContent, DialogDescription, DialogHeader, DialogTitle, DialogTrigger } from "@/components/ui/dialog";
import { Select, SelectContent, SelectItem, SelectTrigger, SelectValue } from "@/components/ui/select";
import { Popover, PopoverContent, PopoverTrigger } from "@/components/ui/popover";
import { Tabs, TabsContent, TabsList, TabsTrigger } from "@/components/ui/tabs";
import { Alert, AlertDescription, AlertTitle } from "@/components/ui/alert";
import { Calendar } from "@/components/ui/calendar";
import { Textarea } from "@/components/ui/textarea";
import { Button } from "@/components/ui/button";
import { Switch } from "@/components/ui/switch";
import { basePath } from "@/config/site-config";
import { Input } from "@/components/ui/input";
import { Label } from "@/components/ui/label";
import { Tabs, TabsContent, TabsList, TabsTrigger } from "@/components/ui/tabs";
import { fetchCategories } from "@/lib/data";
import { cn } from "@/lib/utils";
import type { Script } from "./_schemas/schemas";
import { ScriptItem } from "../scripts/_components/script-item";
import InstallMethod from "./_components/install-method";
import { ScriptSchema } from "./_schemas/schemas";
import Categories from "./_components/categories";
import Note from "./_components/note";
function search(scripts: Script[], query: string): Script[] {
const queryLower = query.toLowerCase().trim();
const searchWords = queryLower.split(/\s+/).filter(Boolean);
return scripts
.map((script) => {
const nameLower = script.name.toLowerCase();
const descriptionLower = (script.description || "").toLowerCase();
let score = 0;
for (const word of searchWords) {
if (nameLower.includes(word)) {
score += 10;
}
if (descriptionLower.includes(word)) {
score += 5;
}
}
return { script, score };
})
.filter(({ score }) => score > 0)
.sort((a, b) => b.score - a.score)
.slice(0, 20)
.map(({ script }) => script);
}
import { githubGist, nord } from "react-syntax-highlighter/dist/esm/styles/hljs";
import SyntaxHighlighter from "react-syntax-highlighter";
import { ScriptItem } from "../scripts/_components/script-item";
import { DropdownMenu, DropdownMenuContent, DropdownMenuGroup, DropdownMenuItem, DropdownMenuTrigger } from "@/components/ui/dropdown-menu";
import { Dialog, DialogContent, DialogDescription, DialogHeader, DialogTitle, DialogTrigger } from "@/components/ui/dialog";
import { search } from "@/components/command-menu";
import { basePath } from "@/config/site-config";
import Image from "next/image";
import { useTheme } from "next-themes";
const initialScript: Script = {
name: "",
@@ -103,32 +77,32 @@ export default function JSONGenerator() {
const selectedCategoryObj = useMemo(
() => categories.find(cat => cat.id.toString() === selectedCategory),
[categories, selectedCategory],
[categories, selectedCategory]
);
const allScripts = useMemo(
() => categories.flatMap(cat => cat.scripts || []),
[categories],
[categories]
);
const scripts = useMemo(() => {
const query = searchQuery.trim();
const query = searchQuery.trim()
if (query) {
return search(allScripts, query);
return search(allScripts, query)
}
if (selectedCategoryObj) {
return selectedCategoryObj.scripts || [];
return selectedCategoryObj.scripts || []
}
return [];
return []
}, [allScripts, selectedCategoryObj, searchQuery]);
useEffect(() => {
fetchCategories()
.then(setCategories)
.catch(error => console.error("Error fetching categories:", error));
.catch((error) => console.error("Error fetching categories:", error));
}, []);
useEffect(() => {
@@ -148,14 +122,11 @@ export default function JSONGenerator() {
if (updated.type === "pve") {
scriptPath = `tools/pve/${updated.slug}.sh`;
}
else if (updated.type === "addon") {
} else if (updated.type === "addon") {
scriptPath = `tools/addon/${updated.slug}.sh`;
}
else if (method.type === "alpine") {
} else if (method.type === "alpine") {
scriptPath = `${updated.type}/alpine-${updated.slug}.sh`;
}
else {
} else {
scriptPath = `${updated.type}/${updated.slug}.sh`;
}
@@ -174,13 +145,11 @@ export default function JSONGenerator() {
}, []);
const handleCopy = useCallback(() => {
if (!isValid)
toast.warning("JSON schema is invalid. Copying anyway.");
if (!isValid) toast.warning("JSON schema is invalid. Copying anyway.");
navigator.clipboard.writeText(JSON.stringify(script, null, 2));
setIsCopied(true);
setTimeout(() => setIsCopied(false), 2000);
if (isValid)
toast.success("Copied metadata to clipboard");
if (isValid) toast.success("Copied metadata to clipboard");
}, [script]);
const importScript = (script: Script) => {
@@ -197,11 +166,11 @@ export default function JSONGenerator() {
setIsValid(true);
setZodErrors(null);
toast.success("Imported JSON successfully");
}
catch (error) {
} catch (error) {
toast.error("Failed to read or parse the JSON file.");
}
};
}
const handleFileImport = useCallback(() => {
const input = document.createElement("input");
@@ -211,8 +180,7 @@ export default function JSONGenerator() {
input.onchange = (e: Event) => {
const target = e.target as HTMLInputElement;
const file = target.files?.[0];
if (!file)
return;
if (!file) return;
const reader = new FileReader();
reader.onload = (event) => {
@@ -221,8 +189,7 @@ export default function JSONGenerator() {
const parsed = JSON.parse(content);
importScript(parsed);
toast.success("Imported JSON successfully");
}
catch (error) {
} catch (error) {
toast.error("Failed to read the JSON file.");
}
};
@@ -276,10 +243,7 @@ export default function JSONGenerator() {
<div className="mt-2 space-y-1">
{zodErrors.issues.map((error, index) => (
<AlertDescription key={index} className="p-1 text-red-500">
{error.path.join(".")}
{" "}
-
{error.message}
{error.path.join(".")} -{error.message}
</AlertDescription>
))}
</div>
@@ -306,7 +270,7 @@ export default function JSONGenerator() {
onOpenChange={setIsImportDialogOpen}
>
<DialogTrigger asChild>
<DropdownMenuItem onSelect={e => e.preventDefault()}>
<DropdownMenuItem onSelect={(e) => e.preventDefault()}>
Import existing script
</DropdownMenuItem>
</DialogTrigger>
@@ -328,7 +292,7 @@ export default function JSONGenerator() {
<SelectValue placeholder="Category" />
</SelectTrigger>
<SelectContent>
{categories.map(category => (
{categories.map((category) => (
<SelectItem key={category.id} value={category.id.toString()}>
{category.name}
</SelectItem>
@@ -338,44 +302,40 @@ export default function JSONGenerator() {
<Input
placeholder="Search for a script..."
value={searchQuery}
onChange={e => setSearchQuery(e.target.value)}
onChange={(e) => setSearchQuery(e.target.value)}
/>
{!selectedCategory && !searchQuery
? (
<p className="text-muted-foreground text-sm text-center">
Select a category or search for a script
</p>
)
: scripts.length === 0
? (
<p className="text-muted-foreground text-sm text-center">
No scripts found
</p>
)
: (
<div className="grid grid-cols-3 auto-rows-min h-64 overflow-y-auto gap-4">
{scripts.map(script => (
<div
key={script.slug}
className="p-2 border rounded cursor-pointer hover:bg-accent hover:text-accent-foreground"
onClick={() => {
importScript(script);
setIsImportDialogOpen(false);
}}
>
<Image
src={script.logo || `/${basePath}/logo.png`}
alt={script.name}
className="w-full h-12 object-contain mb-2"
width={16}
height={16}
unoptimized
/>
<p className="text-sm text-center">{script.name}</p>
</div>
))}
</div>
)}
{!selectedCategory && !searchQuery ? (
<p className="text-muted-foreground text-sm text-center">
Select a category or search for a script
</p>
) : scripts.length === 0 ? (
<p className="text-muted-foreground text-sm text-center">
No scripts found
</p>
) : (
<div className="grid grid-cols-3 auto-rows-min h-64 overflow-y-auto gap-4">
{scripts.map(script => (
<div
key={script.slug}
className="p-2 border rounded cursor-pointer hover:bg-accent hover:text-accent-foreground"
onClick={() => {
importScript(script);
setIsImportDialogOpen(false);
}}
>
<Image
src={script.logo || `/${basePath}/logo.png`}
alt={script.name}
className="w-full h-12 object-contain mb-2"
width={16}
height={16}
unoptimized
/>
<p className="text-sm text-center">{script.name}</p>
</div>
))}
</div>
)}
</div>
</div>
</DialogContent>
@@ -388,19 +348,15 @@ export default function JSONGenerator() {
<div className="grid grid-cols-2 gap-4">
<div>
<Label>
Name
{" "}
<span className="text-red-500">*</span>
Name <span className="text-red-500">*</span>
</Label>
<Input placeholder="Example" value={script.name} onChange={e => updateScript("name", e.target.value)} />
<Input placeholder="Example" value={script.name} onChange={(e) => updateScript("name", e.target.value)} />
</div>
<div>
<Label>
Slug
{" "}
<span className="text-red-500">*</span>
Slug <span className="text-red-500">*</span>
</Label>
<Input placeholder="example" value={script.slug} onChange={e => updateScript("slug", e.target.value)} />
<Input placeholder="example" value={script.slug} onChange={(e) => updateScript("slug", e.target.value)} />
</div>
</div>
<div>
@@ -410,7 +366,7 @@ export default function JSONGenerator() {
<Input
placeholder="Full logo URL"
value={script.logo || ""}
onChange={e => updateScript("logo", e.target.value || null)}
onChange={(e) => updateScript("logo", e.target.value || null)}
/>
</div>
<div>
@@ -418,28 +374,24 @@ export default function JSONGenerator() {
<Input
placeholder="Path to config file"
value={script.config_path || ""}
onChange={e => updateScript("config_path", e.target.value || "")}
onChange={(e) => updateScript("config_path", e.target.value || "")}
/>
</div>
<div>
<Label>
Description
{" "}
<span className="text-red-500">*</span>
Description <span className="text-red-500">*</span>
</Label>
<Textarea
placeholder="Example"
value={script.description}
onChange={e => updateScript("description", e.target.value)}
onChange={(e) => updateScript("description", e.target.value)}
/>
</div>
<Categories script={script} setScript={setScript} categories={categories} />
<div className="flex gap-2">
<div className="flex flex-col gap-2 w-full">
<Label>
Date Created
{" "}
<span className="text-red-500">*</span>
Date Created <span className="text-red-500">*</span>
</Label>
<Popover>
<PopoverTrigger asChild className="flex-1">
@@ -463,7 +415,7 @@ export default function JSONGenerator() {
</div>
<div className="flex flex-col gap-2 w-full">
<Label>Type</Label>
<Select value={script.type} onValueChange={value => updateScript("type", value)}>
<Select value={script.type} onValueChange={(value) => updateScript("type", value)}>
<SelectTrigger className="flex-1">
<SelectValue placeholder="Type" />
</SelectTrigger>
@@ -478,17 +430,17 @@ export default function JSONGenerator() {
</div>
<div className="w-full flex gap-5">
<div className="flex items-center space-x-2">
<Switch checked={script.updateable} onCheckedChange={checked => updateScript("updateable", checked)} />
<Switch checked={script.updateable} onCheckedChange={(checked) => updateScript("updateable", checked)} />
<label>Updateable</label>
</div>
<div className="flex items-center space-x-2">
<Switch checked={script.privileged} onCheckedChange={checked => updateScript("privileged", checked)} />
<Switch checked={script.privileged} onCheckedChange={(checked) => updateScript("privileged", checked)} />
<label>Privileged</label>
</div>
<div className="flex items-center space-x-2">
<Switch
checked={script.disable || false}
onCheckedChange={checked => updateScript("disable", checked)}
onCheckedChange={(checked) => updateScript("disable", checked)}
/>
<label>Disabled</label>
</div>
@@ -496,14 +448,12 @@ export default function JSONGenerator() {
{script.disable && (
<div>
<Label>
Disable Description
{" "}
<span className="text-red-500">*</span>
Disable Description <span className="text-red-500">*</span>
</Label>
<Textarea
placeholder="Explain why this script is disabled..."
value={script.disable_description || ""}
onChange={e => updateScript("disable_description", e.target.value)}
onChange={(e) => updateScript("disable_description", e.target.value)}
/>
</div>
)}
@@ -511,18 +461,18 @@ export default function JSONGenerator() {
placeholder="Interface Port"
type="number"
value={script.interface_port || ""}
onChange={e => updateScript("interface_port", e.target.value ? Number(e.target.value) : null)}
onChange={(e) => updateScript("interface_port", e.target.value ? Number(e.target.value) : null)}
/>
<div className="flex gap-2">
<Input
placeholder="Website URL"
value={script.website || ""}
onChange={e => updateScript("website", e.target.value || null)}
onChange={(e) => updateScript("website", e.target.value || null)}
/>
<Input
placeholder="Documentation URL"
value={script.documentation || ""}
onChange={e => updateScript("documentation", e.target.value || null)}
onChange={(e) => updateScript("documentation", e.target.value || null)}
/>
</div>
<InstallMethod script={script} setScript={setScript} setIsValid={setIsValid} setZodErrors={setZodErrors} />
@@ -530,20 +480,22 @@ export default function JSONGenerator() {
<Input
placeholder="Username"
value={script.default_credentials.username || ""}
onChange={e =>
onChange={(e) =>
updateScript("default_credentials", {
...script.default_credentials,
username: e.target.value || null,
})}
})
}
/>
<Input
placeholder="Password"
value={script.default_credentials.password || ""}
onChange={e =>
onChange={(e) =>
updateScript("default_credentials", {
...script.default_credentials,
password: e.target.value || null,
})}
})
}
/>
<Note script={script} setScript={setScript} setIsValid={setIsValid} setZodErrors={setZodErrors} />
</form>
@@ -552,7 +504,7 @@ export default function JSONGenerator() {
<Tabs
defaultValue="json"
className="w-full"
onValueChange={value => setCurrentTab(value as "json" | "preview")}
onValueChange={(value) => setCurrentTab(value as "json" | "preview")}
value={currentTab}
>
<TabsList className="grid w-full grid-cols-2">

View File

@@ -1,7 +1,6 @@
import { ArrowRightIcon, Sparkles } from "lucide-react";
import { useRouter } from "next/navigation";
import { ArrowRightIcon, Sparkles } from "lucide-react";
import Image from "next/image";
import Link from "next/link";
import React from "react";
import type { Category, Script } from "@/lib/types";
@@ -22,6 +21,35 @@ import { Tooltip, TooltipContent, TooltipProvider, TooltipTrigger } from "./ui/t
import { DialogTitle } from "./ui/dialog";
import { Button } from "./ui/button";
import { Badge } from "./ui/badge";
import Link from "next/link";
export function search(scripts: Script[], query: string): Script[] {
const queryLower = query.toLowerCase().trim();
const searchWords = queryLower.split(/\s+/).filter(Boolean);
return scripts
.map(script => {
const nameLower = script.name.toLowerCase();
const descriptionLower = (script.description || "").toLowerCase();
let score = 0;
for (const word of searchWords) {
if (nameLower.includes(word)) {
score += 10;
}
if (descriptionLower.includes(word)) {
score += 5;
}
}
return { script, score };
})
.filter(({ score }) => score > 0)
.sort((a, b) => b.score - a.score)
.slice(0, 20)
.map(({ script }) => script);
}
export function formattedBadge(type: string) {
switch (type) {
@@ -51,9 +79,11 @@ function getRandomScript(categories: Category[], previouslySelected: Set<string>
}
function CommandMenu() {
const [query, setQuery] = React.useState("");
const [open, setOpen] = React.useState(false);
const [links, setLinks] = React.useState<Category[]>([]);
const [isLoading, setIsLoading] = React.useState(false);
const [results, setResults] = React.useState<Script[]>([]);
const [selectedScripts, setSelectedScripts] = React.useState<Set<string>>(new Set());
const router = useRouter();
@@ -70,6 +100,27 @@ function CommandMenu() {
});
};
React.useEffect(() => {
if (query.trim() === "") {
fetchSortedCategories();
}
else {
const scriptMap = new Map<string, Script>();
for (const category of links) {
for (const script of category.scripts || []) {
if (!scriptMap.has(script.slug)) {
scriptMap.set(script.slug, script);
}
}
}
const uniqueScripts = Array.from(scriptMap.values());
const filteredResults = search(uniqueScripts, query);
setResults(filteredResults);
}
}, [query]);
React.useEffect(() => {
const handleKeyDown = (e: KeyboardEvent) => {
if (e.key === "k" && (e.metaKey || e.ctrlKey)) {
@@ -197,49 +248,46 @@ function CommandMenu() {
<CommandDialog
open={open}
onOpenChange={setOpen}
filter={(value: string, search: string) => {
const searchLower = search.toLowerCase().trim();
if (!searchLower)
return 1;
const valueLower = value.toLowerCase();
const searchWords = searchLower.split(/\s+/).filter(Boolean);
// All search words must appear somewhere in the value (name + description)
const allWordsMatch = searchWords.every((word: string) => valueLower.includes(word));
return allWordsMatch ? 1 : 0;
onOpenChange={(open) => {
setOpen(open);
if (open) {
setQuery("");
setResults([]);
}
}}
>
<DialogTitle className="sr-only">Search scripts</DialogTitle>
<CommandInput placeholder="Search for a script..." />
<CommandInput
placeholder="Search for a script..."
onValueChange={setQuery}
value={query}
/>
<CommandList>
<CommandEmpty>
{isLoading
? (
"Searching..."
)
: (
<div className="flex flex-col items-center justify-center py-6 text-center">
<p className="text-sm text-muted-foreground">No scripts match your search.</p>
<div className="mt-4">
<p className="text-xs text-muted-foreground mb-2">Want to add a new script?</p>
<Button variant="outline" size="sm" asChild>
<Link
href={`https://github.com/community-scripts/${basePath}/tree/main/docs/contribution/GUIDE.md`}
target="_blank"
rel="noopener noreferrer"
>
Documentation
{" "}
<ArrowRightIcon className="ml-2 h-4 w-4" />
</Link>
</Button>
</div>
</div>
)}
{isLoading ? (
"Searching..."
) : (
<div className="flex flex-col items-center justify-center py-6 text-center">
<p className="text-sm text-muted-foreground">No scripts match your search.</p>
<div className="mt-4">
<p className="text-xs text-muted-foreground mb-2">Want to add a new script?</p>
<Button variant="outline" size="sm" asChild>
<Link
href={`https://github.com/community-scripts/${basePath}/tree/main/docs/contribution/GUIDE.md`}
target="_blank"
rel="noopener noreferrer"
>
Documentation <ArrowRightIcon className="ml-2 h-4 w-4" />
</Link>
</Button>
</div>
</div>
)}
</CommandEmpty>
{Object.entries(uniqueScriptsByCategory).map(([categoryName, scripts]) => (
<CommandGroup key={`category:${categoryName}`} heading={categoryName}>
{scripts.map(script => (
{results.length > 0 ? (
<CommandGroup heading="Search Results">
{results.map(script => (
<CommandItem
key={`script:${script.slug}`}
value={`${script.name} ${script.type} ${script.description || ""}`}
@@ -272,7 +320,44 @@ function CommandMenu() {
</CommandItem>
))}
</CommandGroup>
))}
) : ( // When no search results, show all scripts grouped by category
Object.entries(uniqueScriptsByCategory).map(([categoryName, scripts]) => (
<CommandGroup key={`category:${categoryName}`} heading={categoryName}>
{scripts.map(script => (
<CommandItem
key={`script:${script.slug}`}
value={`${script.name} ${script.type} ${script.description || ""}`}
onSelect={() => {
setOpen(false);
router.push(`/scripts?id=${script.slug}`);
}}
tabIndex={0}
aria-label={`Open script ${script.name}`}
onKeyDown={(e) => {
if (e.key === "Enter" || e.key === " ") {
setOpen(false);
router.push(`/scripts?id=${script.slug}`);
}
}}
>
<div className="flex gap-2" onClick={() => setOpen(false)}>
<Image
src={script.logo || `/${basePath}/logo.png`}
onError={e => ((e.currentTarget as HTMLImageElement).src = `/${basePath}/logo.png`)}
unoptimized
width={16}
height={16}
alt=""
className="h-5 w-5"
/>
<span>{script.name}</span>
<span>{formattedBadge(script.type)}</span>
</div>
</CommandItem>
))}
</CommandGroup>
))
)}
</CommandList>
</CommandDialog>
</>

View File

@@ -14,7 +14,7 @@ network_check
update_os
read -r -p "${TAB3}Enter PostgreSQL version (15/16/17): " ver
[[ $ver =~ ^(15|16|17)$ ]] || { echo "Invalid version"; exit 64; }
[[ $ver =~ ^(15|16|17)$ ]] || { echo "Invalid version"; exit 1; }
msg_info "Installing PostgreSQL ${ver}"
$STD apk add --no-cache postgresql${ver} postgresql${ver}-contrib postgresql${ver}-openrc sudo

View File

@@ -36,9 +36,9 @@ msg_ok "Installed Tinyauth"
read -r -p "${TAB3}Enter your Tinyauth subdomain (e.g. https://tinyauth.example.com): " app_url
cat <<EOF >/opt/tinyauth/.env
TINYAUTH_DATABASE_PATH=/opt/tinyauth/database.db
TINYAUTH_AUTH_USERS='${USER}'
TINYAUTH_APPURL=${app_url}
DATABASE_PATH=/opt/tinyauth/database.db
USERS='${USER}'
APP_URL=${app_url}
EOF
msg_info "Creating Service"

View File

@@ -25,7 +25,7 @@ case $version in
;;
*)
msg_error "Invalid JDK version selected. Please enter 8, 11, 17 or 21."
exit 64
exit 1
;;
esac
;;
@@ -39,7 +39,7 @@ case $version in
;;
*)
msg_error "Invalid JDK version selected. Please enter 11, 17 or 21."
exit 64
exit 1
;;
esac
;;
@@ -53,13 +53,13 @@ case $version in
;;
*)
msg_error "Invalid JDK version selected. Please enter 17 or 21."
exit 64
exit 1
;;
esac
;;
*)
msg_error "Invalid Tomcat version selected. Please enter 9, 10.1 or 11."
exit 64
exit 1
;;
esac

View File

@@ -59,7 +59,7 @@ mkdir -p /opt/booklore/dist
JAR_PATH=$(find /opt/booklore/booklore-api/build/libs -maxdepth 1 -type f -name "booklore-api-*.jar" ! -name "*plain*" | head -n1)
if [[ -z "$JAR_PATH" ]]; then
msg_error "Backend JAR not found"
exit 153
exit 1
fi
cp "$JAR_PATH" /opt/booklore/dist/app.jar
msg_ok "Built Backend"

View File

@@ -86,7 +86,7 @@ EOF
msg_ok "Docker TCP socket available on $socket"
else
msg_error "Docker failed to restart. Check journalctl -xeu docker.service"
exit 150
exit 1
fi
fi

View File

@@ -21,7 +21,7 @@ msg_info "Fetching latest EMQX Enterprise version"
LATEST_VERSION=$(curl -fsSL https://www.emqx.com/en/downloads/enterprise | grep -oP '/en/downloads/enterprise/v\K[0-9]+\.[0-9]+\.[0-9]+' | sort -V | tail -n1)
if [[ -z "$LATEST_VERSION" ]]; then
msg_error "Failed to determine latest EMQX version"
exit 250
exit 1
fi
msg_ok "Latest version: v$LATEST_VERSION"

View File

@@ -1,7 +1,8 @@
#!/usr/bin/env bash
# Copyright (c) 2021-2026 community-scripts ORG
# Authors: MickLesk (CanbiZ) | Co-Authors: remz1337
# Authors: MickLesk (CanbiZ)
# Co-Authors: remz1337
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
# Source: https://frigate.video/ | Github: https://github.com/blakeblackshear/frigate
@@ -16,7 +17,7 @@ update_os
source /etc/os-release
if [[ "$VERSION_ID" != "12" ]]; then
msg_error "Frigate requires Debian 12 (Bookworm) due to Python 3.11 dependencies"
exit 238
exit 1
fi
msg_info "Converting APT sources to DEB822 format"
@@ -84,7 +85,6 @@ $STD apt install -y \
tclsh \
libopenblas-dev \
liblapack-dev \
libgomp1 \
make \
moreutils
msg_ok "Installed Dependencies"
@@ -101,16 +101,9 @@ export NVIDIA_DRIVER_CAPABILITIES="compute,video,utility"
export TOKENIZERS_PARALLELISM=true
export TRANSFORMERS_NO_ADVISORY_WARNINGS=1
export OPENCV_FFMPEG_LOGLEVEL=8
export PYTHONWARNINGS="ignore:::numpy.core.getlimits"
export HAILORT_LOGGER_PATH=NONE
export TF_CPP_MIN_LOG_LEVEL=3
export TF_CPP_MIN_VLOG_LEVEL=3
export TF_ENABLE_ONEDNN_OPTS=0
export AUTOGRAPH_VERBOSITY=0
export GLOG_minloglevel=3
export GLOG_logtostderr=0
fetch_and_deploy_gh_release "frigate" "blakeblackshear/frigate" "tarball" "v0.17.0" "/opt/frigate"
fetch_and_deploy_gh_release "frigate" "blakeblackshear/frigate" "tarball" "v0.16.4" "/opt/frigate"
msg_info "Building Nginx"
$STD bash /opt/frigate/docker/main/build_nginx.sh
@@ -145,19 +138,13 @@ install -c -m 644 libusb-1.0.pc /usr/local/lib/pkgconfig
ldconfig
msg_ok "Built libUSB"
msg_info "Bootstrapping pip"
wget -q https://bootstrap.pypa.io/get-pip.py -O /tmp/get-pip.py
sed -i 's/args.append("setuptools")/args.append("setuptools==77.0.3")/' /tmp/get-pip.py
$STD python3 /tmp/get-pip.py "pip"
rm -f /tmp/get-pip.py
msg_ok "Bootstrapped pip"
msg_info "Installing Python Dependencies"
$STD pip3 install -r /opt/frigate/docker/main/requirements.txt
msg_ok "Installed Python Dependencies"
msg_info "Building Python Wheels (Patience)"
mkdir -p /wheels
sed -i 's|^SQLITE3_VERSION=.*|SQLITE3_VERSION="version-3.46.0"|g' /opt/frigate/docker/main/build_pysqlite3.sh
$STD bash /opt/frigate/docker/main/build_pysqlite3.sh
for i in {1..3}; do
$STD pip3 wheel --wheel-dir=/wheels -r /opt/frigate/docker/main/requirements-wheels.txt --default-timeout=300 --retries=3 && break
@@ -165,7 +152,7 @@ for i in {1..3}; do
done
msg_ok "Built Python Wheels"
NODE_VERSION="20" setup_nodejs
NODE_VERSION="22" NODE_MODULE="yarn" setup_nodejs
msg_info "Downloading Inference Models"
mkdir -p /models /openvino-model
@@ -196,10 +183,6 @@ $STD pip3 install -U /wheels/*.whl
ldconfig
msg_ok "Installed HailoRT Runtime"
msg_info "Installing MemryX Runtime"
$STD bash /opt/frigate/docker/main/install_memryx.sh
msg_ok "Installed MemryX Runtime"
msg_info "Installing OpenVino"
$STD pip3 install -r /opt/frigate/docker/main/requirements-ov.txt
msg_ok "Installed OpenVino"
@@ -226,8 +209,6 @@ $STD make version
cd /opt/frigate/web
$STD npm install
$STD npm run build
mv /opt/frigate/web/dist/BASE_PATH/monacoeditorwork/* /opt/frigate/web/dist/assets/
rm -rf /opt/frigate/web/dist/BASE_PATH
cp -r /opt/frigate/web/dist/* /opt/frigate/web/
sed -i '/^s6-svc -O \.$/s/^/#/' /opt/frigate/docker/main/rootfs/etc/s6-overlay/s6-rc.d/frigate/run
msg_ok "Built Frigate Application"
@@ -243,19 +224,6 @@ echo "tmpfs /tmp/cache tmpfs defaults 0 0" >>/etc/fstab
cat <<EOF >/etc/frigate.env
DEFAULT_FFMPEG_VERSION="7.0"
INCLUDED_FFMPEG_VERSIONS="7.0:5.0"
NVIDIA_VISIBLE_DEVICES=all
NVIDIA_DRIVER_CAPABILITIES="compute,video,utility"
TOKENIZERS_PARALLELISM=true
TRANSFORMERS_NO_ADVISORY_WARNINGS=1
OPENCV_FFMPEG_LOGLEVEL=8
PYTHONWARNINGS="ignore:::numpy.core.getlimits"
HAILORT_LOGGER_PATH=NONE
TF_CPP_MIN_LOG_LEVEL=3
TF_CPP_MIN_VLOG_LEVEL=3
TF_ENABLE_ONEDNN_OPTS=0
AUTOGRAPH_VERBOSITY=0
GLOG_minloglevel=3
GLOG_logtostderr=0
EOF
cat <<EOF >/config/config.yml
@@ -269,6 +237,7 @@ cameras:
input_args: -re -stream_loop -1 -fflags +genpts
roles:
- detect
- rtmp
detect:
height: 1080
width: 1920
@@ -286,7 +255,6 @@ ffmpeg:
detectors:
detector01:
type: openvino
device: AUTO
model:
width: 300
height: 300

View File

@@ -31,7 +31,7 @@ fi
if [[ -z "$KASM_URL" ]] || [[ -z "$KASM_VERSION" ]]; then
msg_error "Unable to detect latest Kasm release URL."
exit 250
exit 1
fi
msg_ok "Detected Kasm Workspaces version $KASM_VERSION"

View File

@@ -28,7 +28,7 @@ $STD apt install -y \
msg_ok "Installed Dependencies"
PYTHON_VERSION="3.12" setup_uv
PG_VERSION="16" setup_postgresql
POSTGRES_VERSION="16" setup_postgresql
NODE_MODULE="yarn" NODE_VERSION="24" setup_nodejs
fetch_and_deploy_gh_release "mealie" "mealie-recipes/mealie" "tarball" "latest" "/opt/mealie"
PG_DB_NAME="mealie_db" PG_DB_USER="mealie_user" PG_DB_GRANT_SUPERUSER="true" setup_postgresql_db

View File

@@ -51,7 +51,7 @@ while true; do
attempts=$((attempts + 1))
if [[ "$attempts" -ge 3 ]]; then
msg_error "Maximum attempts reached. Exiting."
exit 254
exit 1
fi
done
@@ -76,11 +76,11 @@ for i in {1..60}; do
elif [[ "$STATUS" == "unhealthy" ]]; then
msg_error "NPMplus container is unhealthy! Check logs."
docker logs "$CONTAINER_ID"
exit 150
exit 1
fi
fi
sleep 2
[[ $i -eq 60 ]] && msg_error "NPMplus container did not become healthy within 120s." && docker logs "$CONTAINER_ID" && exit 150
[[ $i -eq 60 ]] && msg_error "NPMplus container did not become healthy within 120s." && docker logs "$CONTAINER_ID" && exit 1
done
msg_ok "Builded and started NPMplus"

View File

@@ -78,11 +78,11 @@ if curl -fL# -C - -o "$TMP_TAR" "$OLLAMA_URL"; then
msg_ok "Installed Ollama ${RELEASE}"
else
msg_error "Extraction failed archive corrupt or incomplete"
exit 251
exit 1
fi
else
msg_error "Download failed $OLLAMA_URL not reachable"
exit 250
exit 1
fi
msg_info "Creating ollama User and Group"

View File

@@ -59,7 +59,7 @@ EOF
else
msg_error "Failed to download or verify GPG key from $KEY_URL"
[[ -f "$TMP_KEY_CONTENT" ]] && rm -f "$TMP_KEY_CONTENT"
exit 250
exit 1
fi
rm -f "$TMP_KEY_CONTENT"

View File

@@ -34,7 +34,7 @@ for server in "${servers[@]}"; do
done
if ((attempt >= MAX_ATTEMPTS)); then
msg_error "No more attempts - aborting script!"
exit 254
exit 1
fi
done

View File

@@ -16,7 +16,7 @@ update_os
read -r -p "${TAB3}Enter PostgreSQL version (15/16/17/18): " ver
[[ $ver =~ ^(15|16|17|18)$ ]] || {
echo "Invalid version"
exit 64
exit 1
}
PG_VERSION=$ver setup_postgresql

View File

@@ -1,134 +0,0 @@
#!/usr/bin/env bash
# Copyright (c) 2021-2026 community-scripts ORG
# Author: Slaviša Arežina (tremor021)
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
# Source: https://www.powerdns.com/
source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
color
verb_ip6
catch_errors
setting_up_container
network_check
update_os
msg_info "Installing Dependencies"
$STD apt install -y sqlite3
msg_ok "Installed Dependencies"
PHP_VERSION="8.3" PHP_APACHE="YES" PHP_FPM="YES" PHP_MODULE="gettext,tokenizer,sqlite3,ldap" setup_php
setup_deb822_repo \
"pdns" \
"https://repo.powerdns.com/FD380FBB-pub.asc" \
"http://repo.powerdns.com/debian" \
"trixie-auth-50"
cat <<EOF >/etc/apt/preferences.d/auth-50
Package: pdns-*
Pin: origin repo.powerdns.com
Pin-Priority: 600
EOF
escape_sql() {
printf '%s' "$1" | sed "s/'/''/g"
}
msg_info "Setting up PowerDNS"
$STD apt install -y \
pdns-server \
pdns-backend-sqlite3
msg_ok "Setup PowerDNS"
fetch_and_deploy_gh_release "poweradmin" "poweradmin/poweradmin" "tarball"
msg_info "Setting up Poweradmin"
sqlite3 /opt/poweradmin/powerdns.db </opt/poweradmin/sql/poweradmin-sqlite-db-structure.sql
sqlite3 /opt/poweradmin/powerdns.db </opt/poweradmin/sql/pdns/49/schema.sqlite3.sql
PA_ADMIN_USERNAME="admin"
PA_ADMIN_EMAIL="admin@example.com"
PA_ADMIN_FULLNAME="Administrator"
PA_ADMIN_PASSWORD=$(openssl rand -base64 16 | tr -d "=+/" | cut -c1-16)
PA_SESSION_KEY=$(openssl rand -base64 75 | tr -dc 'A-Za-z0-9^@#!(){}[]%_\-+=~' | head -c 50)
PASSWORD_HASH=$(php -r "echo password_hash(\$argv[1], PASSWORD_DEFAULT);" -- "${PA_ADMIN_PASSWORD}" 2>/dev/null)
sqlite3 /opt/poweradmin/powerdns.db "INSERT INTO users (username, password, fullname, email, description, perm_templ, active, use_ldap) \
VALUES ('$(escape_sql "${PA_ADMIN_USERNAME}")', '$(escape_sql "${PASSWORD_HASH}")', '$(escape_sql "${PA_ADMIN_FULLNAME}")', \
'$(escape_sql "${PA_ADMIN_EMAIL}")', 'System Administrator', 1, 1, 0);"
cat <<EOF >~/poweradmin.creds
Admin Username: ${PA_ADMIN_USERNAME}
Admin Password: ${PA_ADMIN_PASSWORD}
EOF
cat <<EOF >/opt/poweradmin/config/settings.php
<?php
/**
* Poweradmin Settings Configuration File
*
* Generated by the installer on 2026-02-02 21:01:40
*/
return [
/**
* Database Settings
*/
'database' => [
'type' => 'sqlite',
'file' => '/opt/poweradmin/powerdns.db',
],
/**
* Security Settings
*/
'security' => [
'session_key' => '${PA_SESSION_KEY}',
],
/**
* Interface Settings
*/
'interface' => [
'language' => 'en_EN',
],
/**
* DNS Settings
*/
'dns' => [
'hostmaster' => 'localhost.lan',
'ns1' => '8.8.8.8',
'ns2' => '9.9.9.9',
]
];
EOF
rm -rf /opt/poweradmin/install
msg_ok "Setup Poweradmin"
msg_info "Creating Service"
rm /etc/apache2/sites-enabled/000-default.conf
cat <<EOF >/etc/apache2/sites-enabled/poweradmin.conf
<VirtualHost *:80>
ServerName localhost
DocumentRoot /opt/poweradmin
<Directory /opt/poweradmin>
Options -Indexes +FollowSymLinks
AllowOverride All
Require all granted
</Directory>
# For DDNS update functionality
RewriteEngine On
RewriteRule ^/update(.*)\$ /dynamic_update.php [L]
RewriteRule ^/nic/update(.*)\$ /dynamic_update.php [L]
</VirtualHost>
EOF
$STD a2enmod rewrite headers
chown -R www-data:www-data /opt/poweradmin
$STD systemctl restart apache2
msg_info "Created Service"
motd_ssh
customize
cleanup_lxc

View File

@@ -1,74 +0,0 @@
#!/usr/bin/env bash
# Copyright (c) 2021-2026 community-scripts ORG
# Author: michelroegl-brunner
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
# Source: https://github.com/Dictionarry-Hub/profilarr
source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
color
verb_ip6
catch_errors
setting_up_container
network_check
update_os
msg_info "Installing Dependencies"
$STD apt install -y \
build-essential \
python3-dev \
libffi-dev \
libssl-dev \
git
msg_ok "Installed Dependencies"
PYTHON_VERSION="3.12" setup_uv
NODE_VERSION="22" setup_nodejs
msg_info "Creating directories"
mkdir -p /opt/profilarr \
/config
msg_ok "Created directories"
fetch_and_deploy_gh_release "profilarr" "Dictionarry-Hub/profilarr" "tarball"
msg_info "Installing Python Dependencies"
cd /opt/profilarr/backend
$STD uv venv /opt/profilarr/backend/.venv
sed 's/==/>=/g' requirements.txt >requirements-relaxed.txt
$STD uv pip install --python /opt/profilarr/backend/.venv/bin/python -r requirements-relaxed.txt
rm -f requirements-relaxed.txt
msg_ok "Installed Python Dependencies"
msg_info "Building Frontend"
cd /opt/profilarr/frontend
$STD npm install
$STD npm run build
cp -r dist /opt/profilarr/backend/app/static
msg_ok "Built Frontend"
msg_info "Creating Service"
cat <<EOF >/etc/systemd/system/profilarr.service
[Unit]
Description=Profilarr - Configuration Management Platform for Radarr/Sonarr
After=network.target
[Service]
Type=simple
User=root
WorkingDirectory=/opt/profilarr/backend
Environment="PATH=/opt/profilarr/backend/.venv/bin:/usr/local/bin:/usr/bin:/bin"
Environment="PYTHONPATH=/opt/profilarr/backend"
ExecStart=/opt/profilarr/backend/.venv/bin/gunicorn --bind 0.0.0.0:6868 --timeout 600 app.main:create_app()
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
EOF
systemctl enable -q --now profilarr
msg_ok "Created Service"
motd_ssh
customize
cleanup_lxc

View File

@@ -25,7 +25,7 @@ if useradd -r -m -d /opt/pulse-home -s /usr/sbin/nologin pulse; then
msg_ok "Created User"
else
msg_error "User creation failed"
exit 71
exit 1
fi
mkdir -p /etc/pulse

View File

@@ -34,7 +34,7 @@ while true; do
[Nn]|[Nn][Oo]|"")
msg_error "Terms not accepted. Installation cannot proceed."
msg_error "Please review the terms and run the script again if you wish to proceed."
exit 254
exit 1
;;
*)
msg_error "Invalid response. Please enter 'y' for yes or 'n' for no."
@@ -47,7 +47,7 @@ DOWNLOAD_URL=$(curl -s "https://www.splunk.com/en_us/download/splunk-enterprise.
RELEASE=$(echo "$DOWNLOAD_URL" | sed 's|.*/releases/\([^/]*\)/.*|\1|')
$STD curl -fsSL -o "splunk-enterprise.tgz" "$DOWNLOAD_URL" || {
msg_error "Failed to download Splunk Enterprise from the provided link."
exit 250
exit 1
}
$STD tar -xzf "splunk-enterprise.tgz" -C /opt
rm -f "splunk-enterprise.tgz"

View File

@@ -1,62 +0,0 @@
#!/usr/bin/env bash
# Copyright (c) 2021-2026 community-scripts ORG
# Author: MickLesk (CanbiZ)
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
# Source: https://github.com/steveiliop56/tinyauth
source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
color
verb_ip6
catch_errors
setting_up_container
network_check
update_os
msg_info "Installing Dependencies"
$STD apt install -y \
openssl \
apache2-utils
msg_ok "Installed Dependencies"
fetch_and_deploy_gh_release "tinyauth" "steveiliop56/tinyauth" "singlefile" "latest" "/opt/tinyauth" "tinyauth-amd64"
msg_info "Setting up Tinyauth"
PASS=$(openssl rand -base64 8 | tr -dc 'a-zA-Z0-9' | head -c 8)
USER=$(htpasswd -Bbn "tinyauth" "${PASS}")
cat <<EOF >/opt/tinyauth/credentials.txt
Tinyauth Credentials
Username: tinyauth
Password: ${PASS}
EOF
msg_ok "Set up Tinyauth"
read -r -p "${TAB3}Enter your Tinyauth subdomain (e.g. https://tinyauth.example.com): " app_url
msg_info "Creating Service"
cat <<EOF >/opt/tinyauth/.env
TINYAUTH_DATABASE_PATH=/opt/tinyauth/database.db
TINYAUTH_AUTH_USERS='${USER}'
TINYAUTH_APPURL=${app_url}
EOF
cat <<EOF >/etc/systemd/system/tinyauth.service
[Unit]
Description=Tinyauth Service
After=network.target
[Service]
Type=simple
EnvironmentFile=/opt/tinyauth/.env
ExecStart=/opt/tinyauth/tinyauth
WorkingDirectory=/opt/tinyauth
Restart=on-failure
[Install]
WantedBy=multi-user.target
EOF
systemctl enable -q --now tinyauth
msg_ok "Created Service"
motd_ssh
customize
cleanup_lxc

View File

@@ -44,20 +44,7 @@ $STD timescaledb-tune -yes -memory "$ram_for_tsdb"MB
$STD systemctl restart postgresql
msg_ok "Installed TimescaleDB"
PG_DB_NAME="tracearr_db" PG_DB_USER="tracearr" PG_DB_EXTENSIONS="timescaledb,timescaledb_toolkit" PG_DB_GRANT_SUPERUSER="true" setup_postgresql_db
msg_info "Installing tailscale"
setup_deb822_repo \
"tailscale" \
"https://pkgs.tailscale.com/stable/$(get_os_info id)/$(get_os_info codename).noarmor.gpg" \
"https://pkgs.tailscale.com/stable/$(get_os_info id)/" \
"$(get_os_info codename)"
$STD apt install -y tailscale
# Tracearr runs tailscaled in user mode, disable the service.
$STD systemctl disable --now tailscaled
$STD systemctl stop tailscaled
msg_ok "Installed tailscale"
PG_DB_NAME="tracearr_db" PG_DB_USER="tracearr" PG_DB_EXTENSIONS="timescaledb,timescaledb_toolkit" setup_postgresql_db
fetch_and_deploy_gh_release "tracearr" "connorgallopo/Tracearr" "tarball" "latest" "/opt/tracearr.build"
msg_info "Building Tracearr"
@@ -88,7 +75,6 @@ msg_info "Configuring Tracearr"
$STD useradd -r -s /bin/false -U tracearr
$STD chown -R tracearr:tracearr /opt/tracearr
install -d -m 750 -o tracearr -g tracearr /data/tracearr
install -d -m 750 -o tracearr -g tracearr /data/backup
export JWT_SECRET=$(openssl rand -hex 32)
export COOKIE_SECRET=$(openssl rand -hex 32)
cat <<EOF >/data/tracearr/.env
@@ -103,6 +89,7 @@ JWT_SECRET=$JWT_SECRET
COOKIE_SECRET=$COOKIE_SECRET
APP_VERSION=$(cat /root/.tracearr)
#CORS_ORIGIN=http://localhost:5173
#MOBILE_BETA_MODE=true
EOF
chmod 600 /data/tracearr/.env
chown -R tracearr:tracearr /data/tracearr
@@ -153,7 +140,6 @@ if [ -f \$pg_config_file ]; then
fi
fi
systemctl restart postgresql
sudo -u postgres psql -c "ALTER USER tracearr WITH SUPERUSER;"
EOF
chmod +x /data/tracearr/prestart.sh
cat <<EOF >/lib/systemd/system/tracearr.service

53
install/unifi-install.sh Normal file
View File

@@ -0,0 +1,53 @@
#!/usr/bin/env bash
# Copyright (c) 2021-2026 tteck
# Author: tteck (tteckster)
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
# Source: https://ui.com/download/unifi
source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
color
verb_ip6
catch_errors
setting_up_container
network_check
update_os
msg_info "Installing Dependencies"
$STD apt install -y apt-transport-https
curl -fsSL "https://dl.ui.com/unifi/unifi-repo.gpg" -o "/usr/share/keyrings/unifi-repo.gpg"
cat <<EOF | sudo tee /etc/apt/sources.list.d/100-ubnt-unifi.sources >/dev/null
Types: deb
URIs: https://www.ui.com/downloads/unifi/debian
Suites: stable
Components: ubiquiti
Architectures: amd64
Signed-By: /usr/share/keyrings/unifi-repo.gpg
EOF
$STD apt update
msg_ok "Installed Dependencies"
JAVA_VERSION="21" setup_java
if lscpu | grep -q 'avx'; then
MONGO_VERSION="8.0" setup_mongodb
else
msg_error "No AVX detected (CPU-Flag)! We have discontinued support for this. You are welcome to try it manually with a Debian LXC, but due to the many issues with Unifi, we currently only support AVX CPUs."
exit 10
fi
if ! dpkg -l | grep -q 'libssl1.1'; then
msg_info "Installing libssl (if needed)"
curl -fsSL "https://security.debian.org/debian-security/pool/updates/main/o/openssl/libssl1.1_1.1.1w-0+deb11u4_amd64.deb" -o "/tmp/libssl.deb"
$STD dpkg -i /tmp/libssl.deb
rm -f /tmp/libssl.deb
msg_ok "Installed libssl1.1"
fi
msg_info "Installing UniFi Network Server"
$STD apt install -y unifi
msg_ok "Installed UniFi Network Server"
motd_ssh
customize
cleanup_lxc

View File

@@ -16,13 +16,13 @@ update_os
if [[ "${CTTYPE:-1}" != "0" ]]; then
msg_error "UniFi OS Server requires a privileged LXC container."
msg_error "Recreate the container with unprivileged=0."
exit 10
exit 1
fi
if [[ ! -e /dev/net/tun ]]; then
msg_error "Missing /dev/net/tun in container."
msg_error "Enable TUN/TAP (var_tun=yes) or add /dev/net/tun passthrough."
exit 236
exit 1
fi
msg_info "Installing dependencies"
@@ -48,7 +48,7 @@ TEMP_JSON="$(mktemp)"
if ! curl -fsSL "$API_URL" -o "$TEMP_JSON"; then
rm -f "$TEMP_JSON"
msg_error "Failed to fetch data from Ubiquiti API"
exit 250
exit 1
fi
LATEST=$(jq -r '
._embedded.firmware
@@ -62,7 +62,7 @@ UOS_URL=$(echo "$LATEST" | jq -r '._links.data.href')
rm -f "$TEMP_JSON"
if [[ -z "$UOS_URL" || -z "$UOS_VERSION" || "$UOS_URL" == "null" ]]; then
msg_error "Failed to parse UniFi OS Server version or download URL"
exit 250
exit 1
fi
msg_ok "Found UniFi OS Server ${UOS_VERSION}"

View File

@@ -31,7 +31,7 @@ if gpg --verify /tmp/zerotier-install.sh >/dev/null 2>&1; then
$STD bash /tmp/zerotier-install.sh
else
msg_warn "Could not verify signature of Zerotier-One install script. Exiting..."
exit 250
exit 1
fi
msg_ok "Setup Zerotier-One"

View File

@@ -79,7 +79,7 @@ setting_up_container() {
if [ "$(ip addr show | grep 'inet ' | grep -v '127.0.0.1' | awk '{print $2}' | cut -d'/' -f1)" = "" ]; then
echo 1>&2 -e "\n${CROSS}${RD} No Network After $RETRY_NUM Tries${CL}"
echo -e "${NETWORK}Check Network Settings"
exit 121
exit 1
fi
msg_ok "Set up Container OS"
msg_ok "Network Connected: ${BL}$(ip addr show | grep 'inet ' | awk '{print $2}' | cut -d'/' -f1 | tail -n1)${CL}"
@@ -99,7 +99,7 @@ network_check() {
echo -e "${INFO}${RD}Expect Issues Without Internet${CL}"
else
echo -e "${NETWORK}Check Network Settings"
exit 122
exit 1
fi
fi
RESOLVEDIP=$(getent hosts github.com | awk '{ print $1 }')
@@ -119,12 +119,12 @@ update_os() {
local tools_content
tools_content=$(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/tools.func) || {
msg_error "Failed to download tools.func"
exit 115
exit 6
}
source /dev/stdin <<<"$tools_content"
if ! declare -f fetch_and_deploy_gh_release >/dev/null 2>&1; then
msg_error "tools.func loaded but incomplete — missing expected functions"
exit 115
exit 6
fi
msg_ok "Updated Container OS"
post_progress_to_api

View File

@@ -124,7 +124,6 @@ detect_repo_source
# * Generic/Shell errors (1-3, 10, 124-132, 134, 137, 139, 141, 143-146)
# * curl/wget errors (4-8, 16, 18, 22-28, 30, 32-36, 39, 44-48, 51-52, 55-57, 59, 61, 63, 75, 78-79, 92, 95)
# * Package manager errors (APT, DPKG: 100-102, 255)
# * Script Validation & Setup (103-123)
# * BSD sysexits (64-78)
# * Systemd/Service errors (150-154)
# * Python/pip/uv errors (160-162)
@@ -132,9 +131,7 @@ detect_repo_source
# * MySQL/MariaDB errors (180-183)
# * MongoDB errors (190-193)
# * Proxmox custom codes (200-231)
# * Tools & Addon Scripts (232-238)
# * Node.js/npm errors (239, 243, 245-249)
# * Application Install/Update errors (250-254)
# - Returns description string for given exit code
# ------------------------------------------------------------------------------
explain_exit_code() {
@@ -192,29 +189,6 @@ explain_exit_code() {
101) echo "APT: Configuration error (bad sources.list, malformed config)" ;;
102) echo "APT: Lock held by another process (dpkg/apt still running)" ;;
# --- Script Validation & Setup (103-123) ---
103) echo "Validation: Shell is not Bash" ;;
104) echo "Validation: Not running as root (or invoked via sudo)" ;;
105) echo "Validation: Proxmox VE version not supported" ;;
106) echo "Validation: Architecture not supported (ARM / PiMox)" ;;
107) echo "Validation: Kernel key parameters unreadable" ;;
108) echo "Validation: Kernel key limits exceeded" ;;
109) echo "Proxmox: No available container ID after max attempts" ;;
110) echo "Proxmox: Failed to apply default.vars" ;;
111) echo "Proxmox: App defaults file not available" ;;
112) echo "Proxmox: Invalid install menu option" ;;
113) echo "LXC: Under-provisioned — user aborted update" ;;
114) echo "LXC: Storage too low — user aborted update" ;;
115) echo "Download: install.func download failed or incomplete" ;;
116) echo "Proxmox: Default bridge vmbr0 not found" ;;
117) echo "LXC: Container did not reach running state" ;;
118) echo "LXC: No IP assigned to container after timeout" ;;
119) echo "Proxmox: No valid storage for rootdir content" ;;
120) echo "Proxmox: No valid storage for vztmpl content" ;;
121) echo "LXC: Container network not ready (no IP after retries)" ;;
122) echo "LXC: No internet connectivity — user declined to continue" ;;
123) echo "LXC: Local IP detection failed" ;;
# --- BSD sysexits.h (64-78) ---
64) echo "Usage error (wrong arguments)" ;;
65) echo "Data format error (bad input data)" ;;
@@ -302,18 +276,8 @@ explain_exit_code() {
223) echo "Proxmox: Template not available after download" ;;
224) echo "Proxmox: PBS storage is for backups only" ;;
225) echo "Proxmox: No template available for OS/Version" ;;
226) echo "Proxmox: VM disk import or post-creation setup failed" ;;
231) echo "Proxmox: LXC stack upgrade failed" ;;
# --- Tools & Addon Scripts (232-238) ---
232) echo "Tools: Wrong execution environment (run on PVE host, not inside LXC)" ;;
233) echo "Tools: Application not installed (update prerequisite missing)" ;;
234) echo "Tools: No LXC containers found or available" ;;
235) echo "Tools: Backup or restore operation failed" ;;
236) echo "Tools: Required hardware not detected" ;;
237) echo "Tools: Dependency package installation failed" ;;
238) echo "Tools: OS or distribution not supported for this addon" ;;
# --- Node.js / npm / pnpm / yarn (239-249) ---
239) echo "npm/Node.js: Unexpected runtime error or dependency failure" ;;
243) echo "Node.js: Out of memory (JavaScript heap out of memory)" ;;
@@ -323,13 +287,6 @@ explain_exit_code() {
248) echo "Node.js: Invalid C++ addon / N-API failure" ;;
249) echo "npm/pnpm/yarn: Unknown fatal error" ;;
# --- Application Install/Update Errors (250-254) ---
250) echo "App: Download failed or version not determined" ;;
251) echo "App: File extraction failed (corrupt or incomplete archive)" ;;
252) echo "App: Required file or resource not found" ;;
253) echo "App: Data migration required — update aborted" ;;
254) echo "App: User declined prompt or input timed out" ;;
# --- DPKG ---
255) echo "DPKG: Fatal internal error" ;;
@@ -346,20 +303,18 @@ explain_exit_code() {
# - Handles backslashes, quotes, newlines, tabs, and carriage returns
# ------------------------------------------------------------------------------
json_escape() {
# Escape a string for safe JSON embedding using awk (handles any input size).
# Pipeline: strip ANSI → remove control chars → escape \ " TAB → join lines with \n
printf '%s' "$1" \
| sed 's/\x1b\[[0-9;]*[a-zA-Z]//g' \
| tr -d '\000-\010\013\014\016-\037\177\r' \
| awk '
BEGIN { ORS = "" }
{
gsub(/\\/, "\\\\") # backslash → \\
gsub(/"/, "\\\"") # double quote → \"
gsub(/\t/, "\\t") # tab → \t
if (NR > 1) printf "\\n"
printf "%s", $0
}'
local s="$1"
# Strip ANSI escape sequences (color codes etc.)
s=$(printf '%s' "$s" | sed 's/\x1b\[[0-9;]*[a-zA-Z]//g')
s=${s//\\/\\\\}
s=${s//"/\\"/}
s=${s//$'\n'/\\n}
s=${s//$'\r'/}
s=${s//$'\t'/\\t}
# Remove any remaining control characters (0x00-0x1F except those already handled)
# Also remove DEL (0x7F) and invalid high bytes that break JSON parsers
s=$(printf '%s' "$s" | tr -d '\000-\010\013\014\016-\037\177')
printf '%s' "$s"
}
# ------------------------------------------------------------------------------
@@ -395,11 +350,6 @@ get_error_text() {
logfile="$BUILD_LOG"
fi
# Try SILENT_LOGFILE as last resort (captures $STD command output)
if [[ -z "$logfile" || ! -s "$logfile" ]] && [[ -n "${SILENT_LOGFILE:-}" && -s "${SILENT_LOGFILE}" ]]; then
logfile="$SILENT_LOGFILE"
fi
if [[ -n "$logfile" && -s "$logfile" ]]; then
tail -n 20 "$logfile" 2>/dev/null | sed 's/\r$//' | sed 's/\x1b\[[0-9;]*[a-zA-Z]//g'
fi
@@ -445,13 +395,6 @@ get_full_log() {
fi
fi
# Fall back to SILENT_LOGFILE (captures $STD command output)
if [[ -z "$logfile" || ! -s "$logfile" ]]; then
if [[ -n "${SILENT_LOGFILE:-}" && -s "${SILENT_LOGFILE}" ]]; then
logfile="$SILENT_LOGFILE"
fi
fi
if [[ -n "$logfile" && -s "$logfile" ]]; then
# Strip ANSI codes, carriage returns, and anonymize IP addresses (GDPR)
sed 's/\r$//' "$logfile" 2>/dev/null |
@@ -689,23 +632,18 @@ EOF
[[ "${DEV_MODE:-}" == "true" ]] && echo "[DEBUG] Sending to: $TELEMETRY_URL" >&2
[[ "${DEV_MODE:-}" == "true" ]] && echo "[DEBUG] Payload: $JSON_PAYLOAD" >&2
# Send initial "installing" record with retry.
# This record MUST exist for all subsequent updates to succeed.
local http_code="" attempt
for attempt in 1 2 3; do
if [[ "${DEV_MODE:-}" == "true" ]]; then
http_code=$(curl -sS -w "%{http_code}" -m "${TELEMETRY_TIMEOUT}" -X POST "${TELEMETRY_URL}" \
-H "Content-Type: application/json" \
-d "$JSON_PAYLOAD" -o /dev/stderr 2>&1) || http_code="000"
echo "[DEBUG] post_to_api attempt $attempt HTTP=$http_code" >&2
else
http_code=$(curl -sS -w "%{http_code}" -m "${TELEMETRY_TIMEOUT}" -X POST "${TELEMETRY_URL}" \
-H "Content-Type: application/json" \
-d "$JSON_PAYLOAD" -o /dev/null 2>/dev/null) || http_code="000"
fi
[[ "$http_code" =~ ^2[0-9]{2}$ ]] && break
[[ "$attempt" -lt 3 ]] && sleep 1
done
# Fire-and-forget: never block, never fail
local http_code
if [[ "${DEV_MODE:-}" == "true" ]]; then
http_code=$(curl -sS -w "%{http_code}" -m "${TELEMETRY_TIMEOUT}" -X POST "${TELEMETRY_URL}" \
-H "Content-Type: application/json" \
-d "$JSON_PAYLOAD" -o /dev/stderr 2>&1) || true
echo "[DEBUG] HTTP response code: $http_code" >&2
else
curl -fsS -m "${TELEMETRY_TIMEOUT}" -X POST "${TELEMETRY_URL}" \
-H "Content-Type: application/json" \
-d "$JSON_PAYLOAD" &>/dev/null || true
fi
POST_TO_API_DONE=true
}
@@ -796,15 +734,10 @@ post_to_api_vm() {
EOF
)
# Send initial "installing" record with retry (must succeed for updates to work)
local http_code="" attempt
for attempt in 1 2 3; do
http_code=$(curl -sS -w "%{http_code}" -m "${TELEMETRY_TIMEOUT}" -X POST "${TELEMETRY_URL}" \
-H "Content-Type: application/json" \
-d "$JSON_PAYLOAD" -o /dev/null 2>/dev/null) || http_code="000"
[[ "$http_code" =~ ^2[0-9]{2}$ ]] && break
[[ "$attempt" -lt 3 ]] && sleep 1
done
# Fire-and-forget: never block, never fail
curl -fsS -m "${TELEMETRY_TIMEOUT}" -X POST "${TELEMETRY_URL}" \
-H "Content-Type: application/json" \
-d "$JSON_PAYLOAD" &>/dev/null || true
POST_TO_API_DONE=true
}
@@ -900,7 +833,7 @@ post_update_to_api() {
esac
# For failed/unknown status, resolve exit code and error description
local short_error="" medium_error=""
local short_error=""
if [[ "$pb_status" == "failed" ]] || [[ "$pb_status" == "unknown" ]]; then
if [[ "$raw_exit_code" =~ ^[0-9]+$ ]]; then
exit_code="$raw_exit_code"
@@ -920,18 +853,6 @@ post_update_to_api() {
short_error=$(json_escape "$(explain_exit_code "$exit_code")")
error_category=$(categorize_error "$exit_code")
[[ -z "$error" ]] && error="Unknown error"
# Build medium error for attempt 2: explanation + last 100 log lines (≤16KB)
# This is the critical middle ground between full 120KB log and generic-only description
local medium_log=""
medium_log=$(get_full_log 16384) || true # 16KB max
if [[ -z "$medium_log" ]]; then
medium_log=$(get_error_text) || true
fi
local medium_full
medium_full=$(build_error_string "$exit_code" "$medium_log")
medium_error=$(json_escape "$medium_full")
[[ -z "$medium_error" ]] && medium_error="$short_error"
fi
# Calculate duration if timer was started
@@ -948,11 +869,6 @@ post_update_to_api() {
local http_code=""
# Strip 'G' suffix from disk size (VMs set DISK_SIZE=32G)
local DISK_SIZE_API="${DISK_SIZE:-0}"
DISK_SIZE_API="${DISK_SIZE_API%G}"
[[ ! "$DISK_SIZE_API" =~ ^[0-9]+$ ]] && DISK_SIZE_API=0
# ── Attempt 1: Full payload with complete error text (includes full log) ──
local JSON_PAYLOAD
JSON_PAYLOAD=$(
@@ -964,7 +880,7 @@ post_update_to_api() {
"nsapp": "${NSAPP:-unknown}",
"status": "${pb_status}",
"ct_type": ${CT_TYPE:-1},
"disk_size": ${DISK_SIZE_API},
"disk_size": ${DISK_SIZE:-0},
"core_count": ${CORE_COUNT:-0},
"ram_size": ${RAM_SIZE:-0},
"os_type": "${var_os:-}",
@@ -995,7 +911,7 @@ EOF
return 0
fi
# ── Attempt 2: Medium error text (truncated log ≤16KB instead of full 120KB) ──
# ── Attempt 2: Short error text (no full log) ──
sleep 1
local RETRY_PAYLOAD
RETRY_PAYLOAD=$(
@@ -1007,7 +923,7 @@ EOF
"nsapp": "${NSAPP:-unknown}",
"status": "${pb_status}",
"ct_type": ${CT_TYPE:-1},
"disk_size": ${DISK_SIZE_API},
"disk_size": ${DISK_SIZE:-0},
"core_count": ${CORE_COUNT:-0},
"ram_size": ${RAM_SIZE:-0},
"os_type": "${var_os:-}",
@@ -1015,7 +931,7 @@ EOF
"pve_version": "${pve_version}",
"method": "${METHOD:-default}",
"exit_code": ${exit_code},
"error": "${medium_error}",
"error": "${short_error}",
"error_category": "${error_category}",
"install_duration": ${duration},
"cpu_vendor": "${cpu_vendor}",
@@ -1038,7 +954,7 @@ EOF
return 0
fi
# ── Attempt 3: Minimal payload with medium error (bare minimum to set status) ──
# ── Attempt 3: Minimal payload (bare minimum to set status) ──
sleep 2
local MINIMAL_PAYLOAD
MINIMAL_PAYLOAD=$(
@@ -1050,7 +966,7 @@ EOF
"nsapp": "${NSAPP:-unknown}",
"status": "${pb_status}",
"exit_code": ${exit_code},
"error": "${medium_error}",
"error": "${short_error}",
"error_category": "${error_category}",
"install_duration": ${duration}
}

View File

@@ -100,624 +100,58 @@ fi
# ==============================================================================
# SECTION 2: PRE-FLIGHT CHECKS & SYSTEM VALIDATION
# ==============================================================================
#
# Runs comprehensive system checks BEFORE container creation to catch common
# issues early. This prevents users from going through the entire configuration
# menu only to have creation fail due to a system-level problem.
#
# Checks performed (via run_preflight):
# - Kernel: keyring limits (maxkeys/maxbytes for UID 100000)
# - Storage: rootdir support, vztmpl support, available space
# - Network: bridge availability, DNS resolution
# - Repos: enterprise repo subscription validation
# - Cluster: quorum status (if clustered)
# - Proxmox: LXC stack health, container ID availability
# - Template: download server reachability
#
# Design:
# - All checks run and results are collected (no exit on first failure)
# - Clear, actionable error messages with suggested fixes
# - Reports "aborted" status to telemetry (not "failed")
# - Uses existing exit codes for consistency with error_handler/api.func
#
# ==============================================================================
# --- Preflight tracking globals ---
PREFLIGHT_PASSED=0
PREFLIGHT_FAILED=0
PREFLIGHT_WARNINGS=0
PREFLIGHT_FAILURES=()
PREFLIGHT_EXIT_CODE=0
# ------------------------------------------------------------------------------
# preflight_pass() / preflight_fail() / preflight_warn()
#
# - Track individual check results
# - preflight_fail stores message + exit_code for summary
# ------------------------------------------------------------------------------
preflight_pass() {
local msg="$1"
((PREFLIGHT_PASSED++)) || true
echo -e " ${CM} ${GN}${msg}${CL}"
}
preflight_fail() {
local msg="$1"
local exit_code="${2:-1}"
((PREFLIGHT_FAILED++)) || true
PREFLIGHT_FAILURES+=("${exit_code}|${msg}")
[[ "$PREFLIGHT_EXIT_CODE" -eq 0 ]] && PREFLIGHT_EXIT_CODE="$exit_code"
echo -e " ${CROSS} ${RD}${msg}${CL}"
}
preflight_warn() {
local msg="$1"
((PREFLIGHT_WARNINGS++)) || true
echo -e " ${INFO} ${YW}${msg}${CL}"
}
# ------------------------------------------------------------------------------
# preflight_maxkeys()
# maxkeys_check()
#
# - Reads kernel keyring limits (maxkeys, maxbytes)
# - Checks current usage for LXC user (UID 100000)
# - Warns if usage is close to limits and suggests sysctl tuning
# - https://cleveruptime.com/docs/files/proc-key-users
# - https://docs.kernel.org/security/keys/core.html
# - Exits if thresholds are exceeded
# - https://cleveruptime.com/docs/files/proc-key-users | https://docs.kernel.org/security/keys/core.html
# ------------------------------------------------------------------------------
preflight_maxkeys() {
local per_user_maxkeys per_user_maxbytes
maxkeys_check() {
# Read kernel parameters
per_user_maxkeys=$(cat /proc/sys/kernel/keys/maxkeys 2>/dev/null || echo 0)
per_user_maxbytes=$(cat /proc/sys/kernel/keys/maxbytes 2>/dev/null || echo 0)
# Exit if kernel parameters are unavailable
if [[ "$per_user_maxkeys" -eq 0 || "$per_user_maxbytes" -eq 0 ]]; then
preflight_fail "Unable to read kernel key parameters" 107
echo -e " ${TAB}${INFO} Ensure proper permissions to /proc/sys/kernel/keys/"
return 0
msg_error "Unable to read kernel key parameters. Ensure proper permissions."
exit 1
fi
local used_lxc_keys used_lxc_bytes
# Fetch key usage for user ID 100000 (typical for containers)
used_lxc_keys=$(awk '/100000:/ {print $2}' /proc/key-users 2>/dev/null || echo 0)
used_lxc_bytes=$(awk '/100000:/ {split($5, a, "/"); print a[1]}' /proc/key-users 2>/dev/null || echo 0)
local threshold_keys=$((per_user_maxkeys - 100))
local threshold_bytes=$((per_user_maxbytes - 1000))
local new_limit_keys=$((per_user_maxkeys * 2))
local new_limit_bytes=$((per_user_maxbytes * 2))
# Calculate thresholds and suggested new limits
threshold_keys=$((per_user_maxkeys - 100))
threshold_bytes=$((per_user_maxbytes - 1000))
new_limit_keys=$((per_user_maxkeys * 2))
new_limit_bytes=$((per_user_maxbytes * 2))
local failure=0
# Check if key or byte usage is near limits
failure=0
if [[ "$used_lxc_keys" -gt "$threshold_keys" ]]; then
msg_warn "Key usage is near the limit (${used_lxc_keys}/${per_user_maxkeys})"
echo -e "${INFO} Suggested action: Set ${GN}kernel.keys.maxkeys=${new_limit_keys}${CL} in ${BOLD}/etc/sysctl.d/98-community-scripts.conf${CL}."
failure=1
fi
if [[ "$used_lxc_bytes" -gt "$threshold_bytes" ]]; then
msg_warn "Key byte usage is near the limit (${used_lxc_bytes}/${per_user_maxbytes})"
echo -e "${INFO} Suggested action: Set ${GN}kernel.keys.maxbytes=${new_limit_bytes}${CL} in ${BOLD}/etc/sysctl.d/98-community-scripts.conf${CL}."
failure=1
fi
# Provide next steps if issues are detected
if [[ "$failure" -eq 1 ]]; then
preflight_fail "Kernel key limits near threshold (keys: ${used_lxc_keys}/${per_user_maxkeys}, bytes: ${used_lxc_bytes}/${per_user_maxbytes})" 108
echo -e " ${TAB}${INFO} Set ${GN}kernel.keys.maxkeys=${new_limit_keys}${CL} and ${GN}kernel.keys.maxbytes=${new_limit_bytes}${CL}"
echo -e " ${TAB}${INFO} in ${BOLD}/etc/sysctl.d/98-community-scripts.conf${CL}, then run: ${GN}sysctl --system${CL}"
return 0
msg_error "Kernel key limits exceeded - see suggestions above"
exit 1
fi
preflight_pass "Kernel key limits OK (keys: ${used_lxc_keys}/${per_user_maxkeys})"
return 0
}
# ------------------------------------------------------------------------------
# preflight_storage_rootdir()
#
# - Verifies at least one storage supports 'rootdir' content type
# - Without this, no LXC container can be created
# ------------------------------------------------------------------------------
preflight_storage_rootdir() {
local count
count=$(pvesm status -content rootdir 2>/dev/null | awk 'NR>1 {count++} END {print count+0}')
if [[ "$count" -eq 0 ]]; then
preflight_fail "No storage with 'rootdir' support found" 119
echo -e " ${TAB}${INFO} Enable 'rootdir' content on a storage in Datacenter → Storage"
return 0
fi
preflight_pass "Storage with 'rootdir' support available (${count} storage(s))"
return 0
}
# ------------------------------------------------------------------------------
# preflight_storage_vztmpl()
#
# - Verifies at least one storage supports 'vztmpl' content type
# - Required for downloading and storing OS templates
# ------------------------------------------------------------------------------
preflight_storage_vztmpl() {
local count
count=$(pvesm status -content vztmpl 2>/dev/null | awk 'NR>1 {count++} END {print count+0}')
if [[ "$count" -eq 0 ]]; then
preflight_fail "No storage with 'vztmpl' support found" 120
echo -e " ${TAB}${INFO} Enable 'vztmpl' content on a storage in Datacenter → Storage"
return 0
fi
preflight_pass "Storage with 'vztmpl' support available (${count} storage(s))"
return 0
}
# ------------------------------------------------------------------------------
# preflight_storage_space()
#
# - Checks if any rootdir-capable storage has enough free space
# - Uses the app-declared var_disk as minimum requirement
# ------------------------------------------------------------------------------
preflight_storage_space() {
local required_gb="${var_disk:-4}"
local required_kb=$((required_gb * 1024 * 1024))
local has_enough=0
local best_storage=""
local best_free=0
while read -r storage_name _ _ _ _ free_kb _; do
[[ -z "$storage_name" || -z "$free_kb" ]] && continue
[[ "$free_kb" == "0" ]] && continue
if [[ "$free_kb" -ge "$required_kb" ]]; then
has_enough=1
if [[ "$free_kb" -gt "$best_free" ]]; then
best_free="$free_kb"
best_storage="$storage_name"
fi
fi
done < <(pvesm status -content rootdir 2>/dev/null | awk 'NR>1')
if [[ "$has_enough" -eq 0 ]]; then
preflight_fail "No storage has enough space (need ${required_gb}GB for ${APP})" 214
echo -e " ${TAB}${INFO} Free up disk space or add a new storage with sufficient capacity"
return 0
fi
local best_free_fmt
best_free_fmt=$(numfmt --to=iec --from-unit=1024 --suffix=B --format %.1f "$best_free" 2>/dev/null || echo "${best_free}KB")
preflight_pass "Sufficient storage space (${best_storage}: ${best_free_fmt} free, need ${required_gb}GB)"
return 0
}
# ------------------------------------------------------------------------------
# preflight_network_bridge()
#
# - Checks if at least one network bridge exists (vmbr*)
# - Verifies vmbr0 specifically (default bridge used by most scripts)
# ------------------------------------------------------------------------------
preflight_network_bridge() {
local bridges
bridges=$(ip -o link show type bridge 2>/dev/null | grep -oE 'vmbr[0-9]+' | sort -u)
if [[ -z "$bridges" ]]; then
preflight_fail "No network bridge (vmbr*) found" 116
echo -e " ${TAB}${INFO} Create a bridge in Network → Create → Linux Bridge"
return 0
fi
if echo "$bridges" | grep -qx "vmbr0"; then
preflight_pass "Default network bridge vmbr0 available"
else
local first_bridge
first_bridge=$(echo "$bridges" | head -1)
preflight_warn "Default bridge vmbr0 not found, but ${first_bridge} is available"
echo -e " ${TAB}${INFO} Scripts default to vmbr0 — use Advanced Settings to select ${first_bridge}"
fi
return 0
}
# ------------------------------------------------------------------------------
# preflight_dns_resolution()
#
# - Tests if DNS resolution works (required for template downloads)
# - Tries multiple hosts to avoid false positives
# ------------------------------------------------------------------------------
preflight_dns_resolution() {
local test_hosts=("download.proxmox.com" "raw.githubusercontent.com" "community-scripts.org")
local resolved=0
for host in "${test_hosts[@]}"; do
if getent hosts "$host" &>/dev/null; then
resolved=1
break
fi
done
if [[ "$resolved" -eq 0 ]]; then
for host in "${test_hosts[@]}"; do
if command -v nslookup &>/dev/null && nslookup "$host" &>/dev/null; then
resolved=1
break
fi
done
fi
if [[ "$resolved" -eq 0 ]]; then
preflight_fail "DNS resolution failed — cannot reach template servers" 222
echo -e " ${TAB}${INFO} Check /etc/resolv.conf and network connectivity"
return 0
fi
preflight_pass "DNS resolution working"
return 0
}
# ------------------------------------------------------------------------------
# preflight_repo_access()
#
# - Checks if Proxmox enterprise repos are enabled without a valid subscription
# - Scans /etc/apt/sources.list.d/ for enterprise.proxmox.com entries
# - Tests HTTP access to detect 401 Unauthorized
# - Warning only (not a blocker — packages come from no-subscription repo)
# ------------------------------------------------------------------------------
preflight_repo_access() {
local enterprise_files
enterprise_files=$(grep -rlE '^\s*deb\s+https://enterprise\.proxmox\.com' /etc/apt/sources.list.d/ 2>/dev/null || true)
if [[ -z "$enterprise_files" ]]; then
preflight_pass "No enterprise repositories enabled"
return 0
fi
# Enterprise repo found — test if subscription is valid
local http_code
http_code=$(curl -sS -o /dev/null -w "%{http_code}" -m 5 "https://enterprise.proxmox.com/debian/pve/dists/" 2>/dev/null) || http_code="000"
if [[ "$http_code" == "401" || "$http_code" == "403" ]]; then
preflight_warn "Enterprise repo enabled without valid subscription (HTTP ${http_code})"
echo -e " ${TAB}${INFO} apt-get update will show '401 Unauthorized' errors"
echo -e " ${TAB}${INFO} Disable in ${GN}/etc/apt/sources.list.d/${CL} or add a subscription key"
return 0
fi
if [[ "$http_code" =~ ^2[0-9]{2}$ ]]; then
preflight_pass "Enterprise repository accessible (subscription valid)"
else
preflight_warn "Enterprise repo check inconclusive (HTTP ${http_code})"
fi
return 0
}
# ------------------------------------------------------------------------------
# preflight_cluster_quorum()
#
# - Checks cluster quorum status (only if node is part of a cluster)
# - Skipped on standalone nodes
# ------------------------------------------------------------------------------
preflight_cluster_quorum() {
if [[ ! -f /etc/pve/corosync.conf ]]; then
preflight_pass "Standalone node (no cluster quorum needed)"
return 0
fi
if pvecm status 2>/dev/null | awk -F':' '/^Quorate/ { exit ($2 ~ /Yes/) ? 0 : 1 }'; then
preflight_pass "Cluster is quorate"
return 0
fi
preflight_fail "Cluster is not quorate — container operations will fail" 210
echo -e " ${TAB}${INFO} Ensure all cluster nodes are running, or configure a QDevice"
return 0
}
# ------------------------------------------------------------------------------
# preflight_lxc_stack()
#
# - Validates pve-container and lxc-pve packages are installed
# - Checks for available updates (informational only)
# ------------------------------------------------------------------------------
preflight_lxc_stack() {
local pve_container_ver lxc_pve_ver
pve_container_ver=$(dpkg-query -W -f='${Version}\n' pve-container 2>/dev/null || echo "")
lxc_pve_ver=$(dpkg-query -W -f='${Version}\n' lxc-pve 2>/dev/null || echo "")
if [[ -z "$pve_container_ver" ]]; then
preflight_fail "Package 'pve-container' is not installed" 231
echo -e " ${TAB}${INFO} Run: apt-get install pve-container"
return 0
fi
if [[ -z "$lxc_pve_ver" ]]; then
preflight_fail "Package 'lxc-pve' is not installed" 231
echo -e " ${TAB}${INFO} Run: apt-get install lxc-pve"
return 0
fi
local pve_container_cand lxc_pve_cand
pve_container_cand=$(apt-cache policy pve-container 2>/dev/null | awk '/Candidate:/ {print $2}') || true
lxc_pve_cand=$(apt-cache policy lxc-pve 2>/dev/null | awk '/Candidate:/ {print $2}') || true
local update_available=0
if [[ -n "$pve_container_cand" && "$pve_container_cand" != "none" ]]; then
if dpkg --compare-versions "$pve_container_cand" gt "$pve_container_ver" 2>/dev/null; then
update_available=1
fi
fi
if [[ -n "$lxc_pve_cand" && "$lxc_pve_cand" != "none" ]]; then
if dpkg --compare-versions "$lxc_pve_cand" gt "$lxc_pve_ver" 2>/dev/null; then
update_available=1
fi
fi
if [[ "$update_available" -eq 1 ]]; then
preflight_warn "LXC stack update available (current: pve-container=${pve_container_ver}, lxc-pve=${lxc_pve_ver})"
echo -e " ${TAB}${INFO} An upgrade will be offered during container creation if needed"
else
preflight_pass "LXC stack is up to date (pve-container=${pve_container_ver})"
fi
return 0
}
# ------------------------------------------------------------------------------
# preflight_container_id()
#
# - Verifies that container IDs can be allocated
# - Uses pvesh /cluster/nextid (cluster-aware)
# ------------------------------------------------------------------------------
preflight_container_id() {
local nextid
nextid=$(pvesh get /cluster/nextid 2>/dev/null) || true
if [[ -z "$nextid" || ! "$nextid" =~ ^[0-9]+$ ]]; then
preflight_fail "Cannot allocate container ID (pvesh /cluster/nextid failed)" 109
echo -e " ${TAB}${INFO} Check Proxmox cluster health and datacenter.cfg ID ranges"
return 0
fi
preflight_pass "Container IDs available (next: ${nextid})"
return 0
}
# ------------------------------------------------------------------------------
# preflight_template_connectivity()
#
# - Tests connectivity to the Proxmox template download server
# - Warns but does not fail (local templates may be available)
# ------------------------------------------------------------------------------
preflight_template_connectivity() {
local http_code
http_code=$(curl -sS -o /dev/null -w "%{http_code}" -m 5 "http://download.proxmox.com/images/system/" 2>/dev/null) || http_code="000"
if [[ "$http_code" =~ ^2[0-9]{2}$ || "$http_code" =~ ^3[0-9]{2}$ ]]; then
preflight_pass "Template server reachable (download.proxmox.com)"
return 0
fi
local local_count=0
while read -r storage_name _; do
[[ -z "$storage_name" ]] && continue
local count
count=$(pveam list "$storage_name" 2>/dev/null | awk 'NR>1' | wc -l)
local_count=$((local_count + count))
done < <(pvesm status -content vztmpl 2>/dev/null | awk 'NR>1 {print $1}')
if [[ "$local_count" -gt 0 ]]; then
preflight_warn "Template server unreachable, but ${local_count} local template(s) available"
return 0
fi
preflight_fail "Template server unreachable and no local templates available" 222
echo -e " ${TAB}${INFO} Check internet connectivity or manually upload templates"
return 0
}
# ------------------------------------------------------------------------------
# preflight_template_available()
#
# - Validates that a template exists for the configured var_os/var_version
# - Checks both local templates and the online pveam catalog
# - Fails if no matching template can be found anywhere
# ------------------------------------------------------------------------------
preflight_template_available() {
local os="${var_os:-}"
local version="${var_version:-}"
# Skip if os/version not set (e.g. Alpine scripts set them differently)
if [[ -z "$os" || -z "$version" ]]; then
preflight_pass "Template check skipped (OS/version not configured yet)"
return 0
fi
local search_pattern="${os}-${version}"
# Check local templates first
local local_match=0
while read -r storage_name _; do
[[ -z "$storage_name" ]] && continue
if pveam list "$storage_name" 2>/dev/null | awk '{print $1}' | grep -qE "^${storage_name}:vztmpl/${search_pattern}"; then
local_match=1
break
fi
done < <(pvesm status -content vztmpl 2>/dev/null | awk 'NR>1 {print $1}')
if [[ "$local_match" -eq 1 ]]; then
preflight_pass "Template available locally for ${os} ${version}"
return 0
fi
# Check online catalog
local online_match=0
if pveam available -section system 2>/dev/null | awk '{print $2}' | grep -qE "^${search_pattern}[.-]"; then
online_match=1
fi
if [[ "$online_match" -eq 1 ]]; then
preflight_pass "Template available online for ${os} ${version}"
return 0
fi
# Gather available versions for the hint
local available_versions
available_versions=$(
pveam available -section system 2>/dev/null |
awk '{print $2}' |
grep -oE "^${os}-[0-9]+(\.[0-9]+)?" |
sed "s/^${os}-//" |
sort -uV 2>/dev/null | tr '\n' ', ' | sed 's/,$//' | sed 's/,/, /g'
)
preflight_fail "No template found for ${os} ${version}" 225
if [[ -n "$available_versions" ]]; then
echo -e " ${TAB}${INFO} Available ${os} versions: ${GN}${available_versions}${CL}"
fi
echo -e " ${TAB}${INFO} Check var_version in your CT script or use an available version"
return 0
}
# ------------------------------------------------------------------------------
# preflight_package_repos()
#
# - Tests connectivity to OS package repositories from the host
# - Selects repos based on var_os (debian, ubuntu, alpine)
# - Uses HTTP HEAD requests to verify the repo index is reachable
# - Warning only (user may use local mirrors, apt-cacher-ng, etc.)
# ------------------------------------------------------------------------------
preflight_package_repos() {
local os="${var_os:-debian}"
local version="${var_version:-}"
local -a repo_urls=()
local repo_label=""
case "$os" in
debian)
repo_label="Debian"
repo_urls=(
"http://deb.debian.org/debian/dists/${version:-bookworm}/Release"
"http://security.debian.org/debian-security/dists/${version:-bookworm}-security/Release"
)
;;
ubuntu)
repo_label="Ubuntu"
repo_urls=(
"http://archive.ubuntu.com/ubuntu/dists/${version:-jammy}/Release"
"http://security.ubuntu.com/ubuntu/dists/${version:-jammy}-security/Release"
)
;;
alpine)
repo_label="Alpine"
# Alpine versions use x.y format (e.g. 3.20)
local alpine_branch="v${version:-3.20}"
repo_urls=(
"https://dl-cdn.alpinelinux.org/alpine/${alpine_branch}/main/x86_64/APKINDEX.tar.gz"
)
;;
*)
preflight_pass "Package repo check skipped (OS: ${os})"
return 0
;;
esac
local all_ok=true
local failed_urls=()
for url in "${repo_urls[@]}"; do
local http_code
http_code=$(curl -sS -o /dev/null -w "%{http_code}" -m 8 --head "$url" 2>/dev/null) || http_code="000"
# Accept 200, 3xx redirects, and 405 (HEAD not allowed but server is up)
if [[ ! "$http_code" =~ ^(2[0-9]{2}|3[0-9]{2}|405)$ ]]; then
all_ok=false
# Extract hostname for readable output
local host
host=$(echo "$url" | sed -E 's|https?://([^/]+).*|\1|')
failed_urls+=("${host} (HTTP ${http_code})")
fi
done
if [[ "$all_ok" == true ]]; then
preflight_pass "${repo_label} package repositories reachable"
else
local fail_summary
fail_summary=$(printf '%s, ' "${failed_urls[@]}")
fail_summary="${fail_summary%, }"
preflight_warn "${repo_label} package repository not fully reachable: ${fail_summary}"
echo -e " ${TAB}${INFO} Container may fail during ${GN}apt-get update${CL} / ${GN}apk update${CL}"
echo -e " ${TAB}${INFO} Check firewall rules, proxy settings, or DNS from this host"
fi
return 0
}
# ------------------------------------------------------------------------------
# run_preflight()
#
# - Executes all preflight checks and collects results
# - Displays a summary with pass/fail/warn counts
# - On failure: reports to telemetry with "aborted" status and exits cleanly
# - On success: brief pause (2s) then returns (caller shows next screen)
# - Called from install_script() after header_info()
# ------------------------------------------------------------------------------
run_preflight() {
# Reset counters
PREFLIGHT_PASSED=0
PREFLIGHT_FAILED=0
PREFLIGHT_WARNINGS=0
PREFLIGHT_FAILURES=()
PREFLIGHT_EXIT_CODE=0
echo -e "${INFO}${BOLD}${DGN} Running pre-flight checks...${CL}"
echo ""
# --- Kernel checks ---
preflight_maxkeys
# --- Storage checks ---
preflight_storage_rootdir
preflight_storage_vztmpl
preflight_storage_space
# --- Network checks ---
preflight_network_bridge
preflight_dns_resolution
# --- Repository checks ---
preflight_repo_access
# --- Proxmox/Cluster checks ---
preflight_cluster_quorum
preflight_lxc_stack
preflight_container_id
# --- Template availability ---
preflight_template_connectivity
preflight_template_available
# --- Package repository checks ---
preflight_package_repos
echo ""
# --- Summary ---
if [[ "$PREFLIGHT_FAILED" -gt 0 ]]; then
echo -e "${CROSS}${BOLD}${RD} Pre-flight failed: ${PREFLIGHT_FAILED} error(s), ${PREFLIGHT_WARNINGS} warning(s), ${PREFLIGHT_PASSED} passed${CL}"
echo ""
echo -e "${INFO}${BOLD}${DGN} Failure details:${CL}"
for failure in "${PREFLIGHT_FAILURES[@]}"; do
local code="${failure%%|*}"
local msg="${failure#*|}"
echo -e " ${CROSS} [Exit ${code}] ${msg}"
done
echo ""
echo -e "${INFO} Please resolve the above issues before creating a container."
echo -e "${INFO} Documentation: ${BL}https://community-scripts.github.io/ProxmoxVE/${CL}"
# Report to telemetry (if consent was given)
post_preflight_to_api
exit "$PREFLIGHT_EXIT_CODE"
fi
# Success — brief pause so user can see results, then clear for next screen
if [[ "$PREFLIGHT_WARNINGS" -gt 0 ]]; then
echo -e "${CM}${BOLD}${GN} Pre-flight passed with ${PREFLIGHT_WARNINGS} warning(s) (${PREFLIGHT_PASSED} checks passed)${CL}"
else
echo -e "${CM}${BOLD}${GN} All pre-flight checks passed (${PREFLIGHT_PASSED}/${PREFLIGHT_PASSED})${CL}"
fi
sleep 2
# Silent success - only show errors if they exist
}
# ==============================================================================
@@ -921,7 +355,7 @@ get_valid_container_id() {
attempts=$((attempts + 1))
if [[ $attempts -ge $max_attempts ]]; then
msg_error "Could not find available container ID after $max_attempts attempts"
exit 109
exit 1
fi
done
@@ -2417,7 +1851,7 @@ advanced_settings() {
# ═══════════════════════════════════════════════════════════════════════════
# STEP 2: Root Password
# ═════════════════════════════<EFBFBD><EFBFBD><EFBFBD>══════════════════════════════════════════<EFBFBD><EFBFBD><EFBFBD>══
# ═══════════════════════════════════════════════════════════════════════<EFBFBD><EFBFBD><EFBFBD>══
2)
if PW1=$(whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \
--title "ROOT PASSWORD" \
@@ -2601,7 +2035,7 @@ advanced_settings() {
else
whiptail --msgbox "Default bridge 'vmbr0' not found!\n\nPlease configure a network bridge in Proxmox first." 10 58
msg_error "Default bridge 'vmbr0' not found"
exit 116
exit 1
fi
else
if result=$(whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \
@@ -3487,7 +2921,7 @@ echo_default() {
# install_script()
#
# - Main entrypoint for installation mode
# - Runs safety checks (pve_check, root_check, diagnostics_check, run_preflight)
# - Runs safety checks (pve_check, root_check, maxkeys_check, diagnostics_check)
# - Builds interactive menu (Default, Verbose, Advanced, My Defaults, App Defaults, Diagnostics, Storage, Exit)
# - Applies chosen settings and triggers container build
# ------------------------------------------------------------------------------
@@ -3497,6 +2931,7 @@ install_script() {
root_check
arch_check
ssh_check
maxkeys_check
diagnostics_check
if systemctl is-active -q ping-instances.service; then
@@ -3516,9 +2951,8 @@ install_script() {
fi
[[ "${timezone:-}" == Etc/* ]] && timezone="host" # pct doesn't accept Etc/* zones
# Show APP Header + run preflight checks
# Show APP Header
header_info
run_preflight
# --- Support CLI argument as direct preset (default, advanced, …) ---
CHOICE="${mode:-${1:-}}"
@@ -3589,7 +3023,7 @@ install_script() {
3 | mydefaults | MYDEFAULTS | userdefaults | USERDEFAULTS)
default_var_settings || {
msg_error "Failed to apply default.vars"
exit 110
exit 1
}
defaults_target="/usr/local/community-scripts/default.vars"
break
@@ -3606,7 +3040,7 @@ install_script() {
break
else
msg_error "No App Defaults available for ${APP}"
exit 111
exit 1
fi
;;
"$SETTINGS_OPTION" | settings | SETTINGS)
@@ -3617,7 +3051,7 @@ install_script() {
;;
*)
msg_error "Invalid option: $CHOICE"
exit 112
exit 1
;;
esac
done
@@ -3701,7 +3135,7 @@ check_container_resources() {
read -r prompt </dev/tty
if [[ ! ${prompt,,} =~ ^(yes)$ ]]; then
msg_error "Aborted: under-provisioned LXC (${current_cpu} CPU/${current_ram}MB RAM < ${var_cpu} CPU/${var_ram}MB RAM)"
exit 113
exit 1
fi
else
echo -e ""
@@ -3724,7 +3158,7 @@ check_container_storage() {
read -r prompt </dev/tty
if [[ ! ${prompt,,} =~ ^(y|yes)$ ]]; then
msg_error "Aborted: storage too low (${usage}% used)"
exit 114
exit 1
fi
fi
}
@@ -4012,7 +3446,7 @@ start() {
3)
clear
exit_script
exit 0
exit
;;
esac
ensure_profile_loaded
@@ -4122,7 +3556,7 @@ build_container() {
export FUNCTIONS_FILE_PATH="$(curl -fsSL "$_func_url")"
if [[ -z "$FUNCTIONS_FILE_PATH" || ${#FUNCTIONS_FILE_PATH} -lt 100 ]]; then
msg_error "Failed to download install functions from: $_func_url"
exit 115
exit 1
fi
# Core exports for install.func
@@ -4504,7 +3938,7 @@ EOF
local ct_status
ct_status=$(pct status "$CTID" 2>/dev/null || echo "unknown")
msg_error "LXC Container did not reach running state (status: ${ct_status})"
exit 117
exit 1
fi
done
@@ -4533,7 +3967,7 @@ EOF
echo " • Verify static IP configuration (if using static IP)"
echo " • Check Proxmox firewall rules"
echo " • If using Tailscale: Disable MagicDNS temporarily"
exit 118
exit 1
fi
# Verify basic connectivity (ping test)
@@ -4660,8 +4094,15 @@ EOF'
# that sends "configuring" status AFTER the host already reported "failed"
export CONTAINER_INSTALLING=true
lxc-attach -n "$CTID" -- bash -c "$(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/install/${var_install}.sh)"
local lxc_exit=$?
# Capture lxc-attach terminal output to host-side log via tee.
# This is the ONLY reliable way to get install output when:
# - install.func fails to load (DNS error) → no container-side logging
# - install script crashes before logging starts
# - $STD/silent() not used for some commands
# PIPESTATUS[0] gets the real exit code from lxc-attach (not from tee).
local _LXC_CAPTURE_LOG="/tmp/.install-capture-${SESSION_ID}.log"
lxc-attach -n "$CTID" -- bash -c "$(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/install/${var_install}.sh)" 2>&1 | tee "$_LXC_CAPTURE_LOG"
local lxc_exit=${PIPESTATUS[0]}
unset CONTAINER_INSTALLING
@@ -4723,9 +4164,19 @@ EOF'
build_log_copied=true
fi
# Copy and append INSTALL_LOG from container
# Copy and append INSTALL_LOG from container (with timeout to prevent hangs)
local temp_install_log="/tmp/.install-temp-${SESSION_ID}.log"
if pct pull "$CTID" "/root/.install-${SESSION_ID}.log" "$temp_install_log" 2>/dev/null; then
local container_log_ok=false
if timeout 8 pct pull "$CTID" "/root/.install-${SESSION_ID}.log" "$temp_install_log" 2>/dev/null; then
# Only use container log if it has meaningful content (>100 bytes)
if [[ -s "$temp_install_log" ]] && [[ $(stat -c%s "$temp_install_log" 2>/dev/null || echo 0) -gt 100 ]]; then
container_log_ok=true
fi
fi
# PHASE 2: Use container-side log if available, otherwise use host-captured tee output
local _LXC_CAPTURE_LOG="/tmp/.install-capture-${SESSION_ID}.log"
if [[ "$container_log_ok" == true ]]; then
{
echo "================================================================================"
echo "PHASE 2: APPLICATION INSTALLATION (Container)"
@@ -4733,8 +4184,24 @@ EOF'
cat "$temp_install_log"
echo ""
} >>"$combined_log"
rm -f "$temp_install_log"
install_log_copied=true
elif [[ -s "$_LXC_CAPTURE_LOG" ]]; then
# Fallback: host-captured terminal output from lxc-attach
# This captures everything the user saw, including errors when install.func
# failed to load (DNS issues, etc.) and no container-side logging was set up.
{
echo "================================================================================"
echo "PHASE 2: APPLICATION INSTALLATION (Container - captured from terminal)"
echo "================================================================================"
# Strip ANSI escape codes from terminal capture
sed 's/\x1b\[[0-9;]*[a-zA-Z]//g' "$_LXC_CAPTURE_LOG" | sed 's/\r$//'
echo ""
} >>"$combined_log"
install_log_copied=true
fi
rm -f "$temp_install_log"
if [[ "$install_log_copied" == true ]]; then
# Point INSTALL_LOG to combined log so get_full_log() finds it
INSTALL_LOG="$combined_log"
fi
@@ -4920,8 +4387,7 @@ EOF'
echo ""
echo -en "${YW}Select option [1-${max_option}] (default: 1, auto-remove in 60s): ${CL}"
local response=""
if read -t 60 -r response; then
if read -t 60 -r response </dev/tty; then
case "${response:-1}" in
1)
# Remove container
@@ -5008,8 +4474,9 @@ EOF'
# Re-run install script in existing container (don't destroy/recreate)
set +Eeuo pipefail
trap - ERR
lxc-attach -n "$CTID" -- bash -c "$(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/install/${var_install}.sh)"
local apt_retry_exit=$?
local _LXC_CAPTURE_LOG="/tmp/.install-capture-${SESSION_ID}.log"
lxc-attach -n "$CTID" -- bash -c "$(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/install/${var_install}.sh)" 2>&1 | tee "$_LXC_CAPTURE_LOG"
local apt_retry_exit=${PIPESTATUS[0]}
set -Eeuo pipefail
trap 'error_handler' ERR
@@ -5119,6 +4586,9 @@ EOF'
exit $install_exit_code
fi
# Clean up host-side capture log (not needed on success, already in combined_log on failure)
rm -f "/tmp/.install-capture-${SESSION_ID}.log" 2>/dev/null
# Re-enable error handling after successful install or recovery menu completion
set -Eeuo pipefail
trap 'error_handler' ERR
@@ -5534,11 +5004,11 @@ create_lxc_container() {
# Storage capability check
check_storage_support "rootdir" || {
msg_error "No valid storage found for 'rootdir' [Container]"
exit 119
exit 1
}
check_storage_support "vztmpl" || {
msg_error "No valid storage found for 'vztmpl' [Template]"
exit 120
exit 1
}
# Template storage selection
@@ -5816,7 +5286,7 @@ create_lxc_container() {
}
else
msg_custom "🚫" "${YW}" "Installation cancelled"
exit 0
exit 1
fi
else
msg_error "No ${PCT_OSTYPE} templates available"

View File

@@ -276,7 +276,7 @@ shell_check() {
msg_error "Your default shell is currently not set to Bash. To use these scripts, please switch to the Bash shell."
echo -e "\nExiting..."
sleep 2
exit 103
exit 1
fi
}
@@ -293,7 +293,7 @@ root_check() {
msg_error "Please run this script as root."
echo -e "\nExiting..."
sleep 2
exit 104
exit 1
fi
}
@@ -314,7 +314,7 @@ pve_check() {
if ((MINOR < 0 || MINOR > 9)); then
msg_error "This version of Proxmox VE is not supported."
msg_error "Supported: Proxmox VE version 8.0 8.9"
exit 105
exit 1
fi
return 0
fi
@@ -325,7 +325,7 @@ pve_check() {
if ((MINOR < 0 || MINOR > 1)); then
msg_error "This version of Proxmox VE is not yet supported."
msg_error "Supported: Proxmox VE version 9.0 9.1"
exit 105
exit 1
fi
return 0
fi
@@ -333,7 +333,7 @@ pve_check() {
# All other unsupported versions
msg_error "This version of Proxmox VE is not supported."
msg_error "Supported versions: Proxmox VE 8.0 8.9 or 9.0 9.1"
exit 105
exit 1
}
# ------------------------------------------------------------------------------
@@ -348,7 +348,7 @@ arch_check() {
msg_error "This script will not work with PiMox (ARM architecture detected)."
msg_warn "Visit https://github.com/asylumexp/Proxmox for ARM64 support."
sleep 2
exit 106
exit 1
fi
}
@@ -932,13 +932,18 @@ is_alpine() {
#
# - Determines if script should run in verbose mode
# - Checks VERBOSE and var_verbose variables
# - Also returns true if not running in TTY (pipe/redirect scenario)
# - Used by msg_info() to decide between spinner and static output
# - Note: Non-TTY (pipe) scenarios are handled separately in msg_info()
# to allow spinner output to pass through pipes (e.g. lxc-attach | tee)
# ------------------------------------------------------------------------------
is_verbose_mode() {
local verbose="${VERBOSE:-${var_verbose:-no}}"
[[ "$verbose" != "no" ]]
local tty_status
if [[ -t 2 ]]; then
tty_status="interactive"
else
tty_status="not-a-tty"
fi
[[ "$verbose" != "no" || ! -t 2 ]]
}
# ------------------------------------------------------------------------------

View File

@@ -94,29 +94,6 @@ if ! declare -f explain_exit_code &>/dev/null; then
100) echo "APT: Package manager error (broken packages / dependency problems)" ;;
101) echo "APT: Configuration error (bad sources.list, malformed config)" ;;
102) echo "APT: Lock held by another process (dpkg/apt still running)" ;;
# --- Script Validation & Setup (103-123) ---
103) echo "Validation: Shell is not Bash" ;;
104) echo "Validation: Not running as root (or invoked via sudo)" ;;
105) echo "Validation: Proxmox VE version not supported" ;;
106) echo "Validation: Architecture not supported (ARM / PiMox)" ;;
107) echo "Validation: Kernel key parameters unreadable" ;;
108) echo "Validation: Kernel key limits exceeded" ;;
109) echo "Proxmox: No available container ID after max attempts" ;;
110) echo "Proxmox: Failed to apply default.vars" ;;
111) echo "Proxmox: App defaults file not available" ;;
112) echo "Proxmox: Invalid install menu option" ;;
113) echo "LXC: Under-provisioned — user aborted update" ;;
114) echo "LXC: Storage too low — user aborted update" ;;
115) echo "Download: install.func download failed or incomplete" ;;
116) echo "Proxmox: Default bridge vmbr0 not found" ;;
117) echo "LXC: Container did not reach running state" ;;
118) echo "LXC: No IP assigned to container after timeout" ;;
119) echo "Proxmox: No valid storage for rootdir content" ;;
120) echo "Proxmox: No valid storage for vztmpl content" ;;
121) echo "LXC: Container network not ready (no IP after retries)" ;;
122) echo "LXC: No internet connectivity — user declined to continue" ;;
123) echo "LXC: Local IP detection failed" ;;
124) echo "Command timed out (timeout command)" ;;
125) echo "Command failed to start (Docker daemon or execution error)" ;;
126) echo "Command invoked cannot execute (permission problem?)" ;;
@@ -178,16 +155,6 @@ if ! declare -f explain_exit_code &>/dev/null; then
224) echo "Proxmox: PBS storage is for backups only" ;;
225) echo "Proxmox: No template available for OS/Version" ;;
231) echo "Proxmox: LXC stack upgrade failed" ;;
# --- Tools & Addon Scripts (232-238) ---
232) echo "Tools: Wrong execution environment (run on PVE host, not inside LXC)" ;;
233) echo "Tools: Application not installed (update prerequisite missing)" ;;
234) echo "Tools: No LXC containers found or available" ;;
235) echo "Tools: Backup or restore operation failed" ;;
236) echo "Tools: Required hardware not detected" ;;
237) echo "Tools: Dependency package installation failed" ;;
238) echo "Tools: OS or distribution not supported for this addon" ;;
239) echo "npm/Node.js: Unexpected runtime error or dependency failure" ;;
243) echo "Node.js: Out of memory (JavaScript heap out of memory)" ;;
245) echo "Node.js: Invalid command-line option" ;;
@@ -195,14 +162,6 @@ if ! declare -f explain_exit_code &>/dev/null; then
247) echo "Node.js: Fatal internal error" ;;
248) echo "Node.js: Invalid C++ addon / N-API failure" ;;
249) echo "npm/pnpm/yarn: Unknown fatal error" ;;
# --- Application Install/Update Errors (250-254) ---
250) echo "App: Download failed or version not determined" ;;
251) echo "App: File extraction failed (corrupt or incomplete archive)" ;;
252) echo "App: Required file or resource not found" ;;
253) echo "App: Data migration required — update aborted" ;;
254) echo "App: User declined prompt or input timed out" ;;
255) echo "DPKG: Fatal internal error" ;;
*) echo "Unknown error" ;;
esac
@@ -327,9 +286,7 @@ error_handler() {
echo -en "${YW}Remove broken container ${CTID}? (Y/n) [auto-remove in 60s]: ${CL}"
fi
# Read user response
local response=""
if read -t 60 -r response; then
if read -t 60 -r response </dev/tty; then
if [[ -z "$response" || "$response" =~ ^[Yy]$ ]]; then
echo ""
if declare -f msg_info >/dev/null 2>&1; then
@@ -408,29 +365,10 @@ _send_abort_telemetry() {
[[ "${DIAGNOSTICS:-no}" == "no" ]] && return 0
[[ -z "${RANDOM_UUID:-}" ]] && return 0
# Collect last 200 log lines for error diagnosis (best-effort)
# Container context has no get_full_log(), so we gather as much as possible
# Collect last 20 log lines for error diagnosis (best-effort)
local error_text=""
local logfile=""
if [[ -n "${INSTALL_LOG:-}" && -s "${INSTALL_LOG}" ]]; then
logfile="${INSTALL_LOG}"
elif [[ -n "${SILENT_LOGFILE:-}" && -s "${SILENT_LOGFILE}" ]]; then
logfile="${SILENT_LOGFILE}"
fi
if [[ -n "$logfile" ]]; then
error_text=$(tail -n 200 "$logfile" 2>/dev/null | sed 's/\x1b\[[0-9;]*[a-zA-Z]//g; s/\\/\\\\/g; s/"/\\"/g; s/\r//g' | tr '\n' '|' | sed 's/|$//' | head -c 16384 | tr -d '\000-\010\013\014\016-\037\177') || true
fi
# Prepend exit code explanation header (like build_error_string does on host)
local explanation=""
if declare -f explain_exit_code &>/dev/null; then
explanation=$(explain_exit_code "$exit_code" 2>/dev/null) || true
fi
if [[ -n "$explanation" && -n "$error_text" ]]; then
error_text="exit_code=${exit_code} | ${explanation}|---|${error_text}"
elif [[ -n "$explanation" && -z "$error_text" ]]; then
error_text="exit_code=${exit_code} | ${explanation}"
error_text=$(tail -n 20 "$INSTALL_LOG" 2>/dev/null | sed 's/\x1b\[[0-9;]*[a-zA-Z]//g; s/\\/\\\\/g; s/"/\\"/g; s/\r//g' | tr '\n' '|' | sed 's/|$//' | tr -d '\000-\010\013\014\016-\037\177') || true
fi
# Calculate duration if start time is available
@@ -439,17 +377,10 @@ _send_abort_telemetry() {
duration=$(($(date +%s) - DIAGNOSTICS_START_TIME))
fi
# Categorize error if function is available (may not be in minimal container context)
local error_category=""
if declare -f categorize_error &>/dev/null; then
error_category=$(categorize_error "$exit_code" 2>/dev/null) || true
fi
# Build JSON payload with error context
local payload
payload="{\"random_id\":\"${RANDOM_UUID}\",\"execution_id\":\"${EXECUTION_ID:-${RANDOM_UUID}}\",\"type\":\"${TELEMETRY_TYPE:-lxc}\",\"nsapp\":\"${NSAPP:-${app:-unknown}}\",\"status\":\"failed\",\"exit_code\":${exit_code}"
[[ -n "$error_text" ]] && payload="${payload},\"error\":\"${error_text}\""
[[ -n "$error_category" ]] && payload="${payload},\"error_category\":\"${error_category}\""
[[ -n "$duration" ]] && payload="${payload},\"duration\":${duration}"
payload="${payload}}"

View File

@@ -126,7 +126,7 @@ setting_up_container() {
if [ "$(hostname -I)" = "" ]; then
echo 1>&2 -e "\n${CROSS}${RD} No Network After $RETRY_NUM Tries${CL}"
echo -e "${NETWORK}Check Network Settings"
exit 121
exit 1
fi
rm -rf /usr/lib/python3.*/EXTERNALLY-MANAGED
systemctl disable -q --now systemd-networkd-wait-online.service
@@ -177,7 +177,7 @@ network_check() {
echo -e "${INFO}${RD}Expect Issues Without Internet${CL}"
else
echo -e "${NETWORK}Check Network Settings"
exit 122
exit 1
fi
fi
@@ -242,12 +242,12 @@ EOF
local tools_content
tools_content=$(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/tools.func) || {
msg_error "Failed to download tools.func"
exit 115
exit 6
}
source /dev/stdin <<<"$tools_content"
if ! declare -f fetch_and_deploy_gh_release >/dev/null 2>&1; then
msg_error "tools.func loaded but incomplete — missing expected functions"
exit 115
exit 6
fi
}

View File

@@ -934,11 +934,7 @@ upgrade_package() {
# ------------------------------------------------------------------------------
# Repository availability check with caching
# ------------------------------------------------------------------------------
# Note: Must use -gA (global) because tools.func is sourced inside update_os()
# function scope. Plain 'declare -A' would create a local variable that gets
# destroyed when update_os() returns, causing "unbound variable" errors later
# when setup_postgresql/verify_repo_available tries to access the cache key.
declare -gA _REPO_CACHE 2>/dev/null || declare -A _REPO_CACHE 2>/dev/null || true
declare -A _REPO_CACHE 2>/dev/null || true
verify_repo_available() {
local repo_url="$1"
@@ -1736,13 +1732,6 @@ setup_deb822_repo() {
rm -f "$tmp_gpg"
return 1
}
else
# Already binary — copy directly
cp -f "$tmp_gpg" "/etc/apt/keyrings/${name}.gpg" || {
msg_error "Failed to install GPG key for ${name}"
rm -f "$tmp_gpg"
return 1
}
fi
rm -f "$tmp_gpg"
chmod 644 "/etc/apt/keyrings/${name}.gpg"
@@ -2206,12 +2195,7 @@ check_for_gh_release() {
local clean_tags=()
for t in "${raw_tags[@]}"; do
# Only strip leading 'v' when followed by a digit (e.g. v1.2.3)
if [[ "$t" =~ ^v[0-9] ]]; then
clean_tags+=("${t:1}")
else
clean_tags+=("$t")
fi
clean_tags+=("${t#v}")
done
local latest_raw="${raw_tags[0]}"
@@ -2324,12 +2308,7 @@ check_for_codeberg_release() {
local clean_tags=()
for t in "${raw_tags[@]}"; do
# Only strip leading 'v' when followed by a digit (e.g. v1.2.3)
if [[ "$t" =~ ^v[0-9] ]]; then
clean_tags+=("${t:1}")
else
clean_tags+=("$t")
fi
clean_tags+=("${t#v}")
done
local latest_raw="${raw_tags[0]}"
@@ -3159,10 +3138,7 @@ function fetch_and_deploy_gh_release() {
local json tag_name
json=$(</tmp/gh_rel.json)
tag_name=$(echo "$json" | jq -r '.tag_name // .name // empty')
# Only strip leading 'v' when followed by a digit (e.g. v1.2.3), not words like "version/..."
[[ "$tag_name" =~ ^v[0-9] ]] && version="${tag_name:1}" || version="$tag_name"
# Sanitize version for use in filenames (replace / with -)
local version_safe="${version//\//-}"
[[ "$tag_name" =~ ^v ]] && version="${tag_name:1}" || version="$tag_name"
if [[ "$current_version" == "$version" ]]; then
$STD msg_ok "$app is already up-to-date (v$version)"
@@ -3183,7 +3159,7 @@ function fetch_and_deploy_gh_release() {
# GitHub API's tarball_url/zipball_url can return HTTP 300 Multiple Choices
# when a branch and tag share the same name. Use explicit refs/tags/ URL instead.
local direct_tarball_url="https://github.com/$repo/archive/refs/tags/$tag_name.tar.gz"
filename="${app_lc}-${version_safe}.tar.gz"
filename="${app_lc}-${version}.tar.gz"
curl $download_timeout -fsSL -o "$tmpdir/$filename" "$direct_tarball_url" || {
msg_error "Download failed: $direct_tarball_url"
@@ -5153,7 +5129,7 @@ current_ip="$(get_current_ip)"
if [[ -z "$current_ip" ]]; then
echo "[ERROR] Could not detect local IP" >&2
exit 123
exit 1
fi
if [[ -f "$IP_FILE" ]]; then
@@ -5654,20 +5630,20 @@ function setup_mongodb() {
# - Handles Debian Trixie libaio1t64 transition
#
# Variables:
# USE_MYSQL_REPO - Use official MySQL repository (default: true)
# Set to "false" to use distro packages instead
# USE_MYSQL_REPO - Set to "true" to use official MySQL repository
# (default: false, uses distro packages)
# MYSQL_VERSION - MySQL version to install when using official repo
# (e.g. 8.0, 8.4) (default: 8.0)
#
# Examples:
# setup_mysql # Uses official MySQL repo, 8.0
# MYSQL_VERSION="8.4" setup_mysql # Specific version from MySQL repo
# USE_MYSQL_REPO=false setup_mysql # Uses distro package instead
# setup_mysql # Uses distro package (recommended)
# USE_MYSQL_REPO=true setup_mysql # Uses official MySQL repo
# USE_MYSQL_REPO=true MYSQL_VERSION="8.4" setup_mysql # Specific version
# ------------------------------------------------------------------------------
function setup_mysql() {
local MYSQL_VERSION="${MYSQL_VERSION:-8.0}"
local USE_MYSQL_REPO="${USE_MYSQL_REPO:-true}"
local USE_MYSQL_REPO="${USE_MYSQL_REPO:-false}"
local DISTRO_ID DISTRO_CODENAME
DISTRO_ID=$(awk -F= '/^ID=/{print $2}' /etc/os-release | tr -d '"')
DISTRO_CODENAME=$(awk -F= '/^VERSION_CODENAME=/{print $2}' /etc/os-release)
@@ -6368,21 +6344,21 @@ EOF
# - Restores dumped data post-upgrade
#
# Variables:
# USE_PGDG_REPO - Use official PGDG repository (default: true)
# Set to "false" to use distro packages instead
# USE_PGDG_REPO - Set to "true" to use official PGDG repository
# (default: false, uses distro packages)
# PG_VERSION - Major PostgreSQL version (e.g. 15, 16) (default: 16)
# PG_MODULES - Comma-separated list of modules (e.g. "postgis,contrib")
#
# Examples:
# setup_postgresql # Uses PGDG repo, PG 16
# PG_VERSION="17" setup_postgresql # Specific version from PGDG
# USE_PGDG_REPO=false setup_postgresql # Uses distro package instead
# setup_postgresql # Uses distro package (recommended)
# USE_PGDG_REPO=true setup_postgresql # Uses official PGDG repo
# USE_PGDG_REPO=true PG_VERSION="17" setup_postgresql # Specific version from PGDG
# ------------------------------------------------------------------------------
function setup_postgresql() {
local PG_VERSION="${PG_VERSION:-16}"
local PG_MODULES="${PG_MODULES:-}"
local USE_PGDG_REPO="${USE_PGDG_REPO:-true}"
local USE_PGDG_REPO="${USE_PGDG_REPO:-false}"
local DISTRO_ID DISTRO_CODENAME
DISTRO_ID=$(awk -F= '/^ID=/{print $2}' /etc/os-release | tr -d '"')
DISTRO_CODENAME=$(awk -F= '/^VERSION_CODENAME=/{print $2}' /etc/os-release)

View File

@@ -244,7 +244,7 @@ curl_handler() {
if [[ -z "$url" ]]; then
msg_error "no valid url or option entered for curl_handler"
exit 64
exit 1
fi
$STD msg_info "Fetching: $url"
@@ -273,7 +273,7 @@ curl_handler() {
rm -f /tmp/curl_error.log
fi
__curl_err_handler "$exit_code" "$url" "$curl_stderr"
exit "$exit_code"
exit 1 # hard exit if exit_code is not 0
fi
$STD printf "\r\033[K${INFO}${YW}Retry $attempt/$max_retries in ${delay}s...${CL}" >&2
@@ -316,7 +316,7 @@ __curl_err_handler() {
esac
[[ -n "$curl_msg" ]] && printf "%s\n" "$curl_msg" >&2
exit "$exit_code"
exit 1
}
# ------------------------------------------------------------------------------
@@ -331,7 +331,7 @@ shell_check() {
msg_error "Your default shell is currently not set to Bash. To use these scripts, please switch to the Bash shell."
echo -e "\nExiting..."
sleep 2
exit 103
exit
fi
}
@@ -352,11 +352,11 @@ clear_line() {
#
# - Determines if script should run in verbose mode
# - Checks VERBOSE and var_verbose variables
# - Note: Non-TTY (pipe) scenarios are handled separately in msg_info()
# - Also returns true if not running in TTY (pipe/redirect scenario)
# ------------------------------------------------------------------------------
is_verbose_mode() {
local verbose="${VERBOSE:-${var_verbose:-no}}"
[[ "$verbose" != "no" ]]
[[ "$verbose" != "no" || ! -t 2 ]]
}
### dev spinner ###
@@ -552,7 +552,7 @@ check_root() {
msg_error "Please run this script as root."
echo -e "\nExiting..."
sleep 2
exit 104
exit
fi
}
@@ -562,7 +562,7 @@ pve_check() {
echo -e "Requires Proxmox Virtual Environment Version 8.1 - 8.4 or 9.0 - 9.1."
echo -e "Exiting..."
sleep 2
exit 105
exit
fi
}
@@ -572,21 +572,21 @@ arch_check() {
echo -e "\n ${YWB}Visit https://github.com/asylumexp/Proxmox for ARM64 support. \n"
echo -e "Exiting..."
sleep 2
exit 106
exit
fi
}
exit_script() {
clear
echo -e "\n${CROSS}${RD}User exited script${CL}\n"
exit 0
exit
}
check_hostname_conflict() {
local hostname="$1"
if qm list | awk '{print $2}' | grep -qx "$hostname"; then
msg_error "Hostname $hostname already in use by another VM."
exit 206
exit 1
fi
}

View File

@@ -73,7 +73,7 @@ fi
DISTRO=$(pct exec "$CTID" -- cat /etc/os-release | grep -w "ID" | cut -d'=' -f2 | tr -d '"')
if [[ "$DISTRO" != "debian" && "$DISTRO" != "ubuntu" ]]; then
msg "\e[1;31m Error: This script only supports Debian or Ubuntu LXC containers. Detected: $DISTRO. Aborting...\e[0m"
exit 238
exit 1
fi
CTID_CONFIG_PATH=/etc/pve/lxc/${CTID}.conf

View File

@@ -32,7 +32,7 @@ header_info
if ! command -v pveversion &>/dev/null; then
msg_error "This script must be run on the Proxmox VE host (not inside an LXC container)"
exit 232
exit 1
fi
while true; do
@@ -64,7 +64,7 @@ while [[ -z "${CTID}" ]]; do
CTID=$(whiptail --backtitle "Proxmox VE Helper Scripts" --title "Containers on $NODE" --radiolist \
"\nSelect a container to add Tailscale to:\n" \
16 $((MSG_MAX_LENGTH + 23)) 6 \
"${CTID_MENU[@]}" 3>&1 1>&2 2>&3) || exit 0
"${CTID_MENU[@]}" 3>&1 1>&2 2>&3) || exit 1
done
CTID_CONFIG_PATH="/etc/pve/lxc/${CTID}.conf"

View File

@@ -69,7 +69,7 @@ elif [[ -f "/etc/debian_version" ]]; then
SERVICE_PATH="/etc/systemd/system/adguardhome-sync.service"
else
msg_error "Unsupported OS detected. Exiting."
exit 238
exit 1
fi
# ==============================================================================
@@ -312,7 +312,7 @@ if [[ "${type:-}" == "update" ]]; then
update
else
msg_error "${APP} is not installed. Nothing to update."
exit 233
exit 1
fi
exit 0
fi

View File

@@ -87,11 +87,11 @@ function update() {
function check_docker() {
if ! command -v docker &>/dev/null; then
msg_error "Docker is not installed. This script requires an existing Docker LXC. Exiting."
exit 10
exit 1
fi
if ! docker compose version &>/dev/null; then
msg_error "Docker Compose plugin is not available. Please install it before running this script. Exiting."
exit 10
exit 1
fi
msg_ok "Docker $(docker --version | cut -d' ' -f3 | tr -d ',') and Docker Compose are available"
}
@@ -171,7 +171,7 @@ if [[ "${type:-}" == "update" ]]; then
update
else
msg_error "${APP} is not installed. Nothing to update."
exit 233
exit 1
fi
exit 0
fi

View File

@@ -93,7 +93,7 @@ function check_or_install_docker() {
msg_ok "Docker Compose is available"
else
msg_error "Docker Compose plugin is not available. Please install it."
exit 10
exit 1
fi
return
fi
@@ -103,7 +103,7 @@ function check_or_install_docker() {
read -r install_docker_prompt
if [[ ! "${install_docker_prompt,,}" =~ ^(y|yes)$ ]]; then
msg_error "Docker is required for ${APP}. Exiting."
exit 10
exit 1
fi
msg_info "Installing Docker"
@@ -165,7 +165,7 @@ if [[ "${type:-}" == "update" ]]; then
update
else
msg_error "${APP} is not installed. Nothing to update."
exit 233
exit 1
fi
exit 0
fi

View File

@@ -49,7 +49,7 @@ elif grep -qE 'ID=debian|ID=ubuntu' /etc/os-release; then
SERVICE_PATH="/etc/systemd/system/copyparty.service"
else
msg_error "Unsupported OS detected. Exiting."
exit 238
exit 1
fi
# ==============================================================================
@@ -318,7 +318,7 @@ if [[ "${type:-}" == "update" ]]; then
update
else
msg_error "${APP} is not installed. Nothing to update."
exit 233
exit 1
fi
exit 0
fi

View File

@@ -51,7 +51,7 @@ EOF
# ==============================================================================
if ! grep -qE 'ID=debian|ID=ubuntu' /etc/os-release 2>/dev/null; then
echo -e "${CROSS} Unsupported OS detected. This script only supports Debian and Ubuntu."
exit 238
exit 1
fi
# ==============================================================================
@@ -183,7 +183,7 @@ if [[ "${type:-}" == "update" ]]; then
update
else
msg_error "${APP} is not installed. Nothing to update."
exit 233
exit 1
fi
exit 0
fi

View File

@@ -99,7 +99,7 @@ function check_or_install_docker() {
msg_ok "Docker Compose is available"
else
msg_error "Docker Compose plugin is not available. Please install it."
exit 10
exit 1
fi
return
fi
@@ -109,7 +109,7 @@ function check_or_install_docker() {
read -r install_docker_prompt
if [[ ! "${install_docker_prompt,,}" =~ ^(y|yes)$ ]]; then
msg_error "Docker is required for ${APP}. Exiting."
exit 10
exit 1
fi
msg_info "Installing Docker"
@@ -160,7 +160,7 @@ if [[ "${type:-}" == "update" ]]; then
update
else
msg_error "${APP} is not installed. Nothing to update."
exit 233
exit 1
fi
exit 0
fi

View File

@@ -92,7 +92,7 @@ function check_or_install_docker() {
msg_ok "Docker Compose is available"
else
msg_error "Docker Compose plugin is not available. Please install it."
exit 10
exit 1
fi
return
fi
@@ -102,7 +102,7 @@ function check_or_install_docker() {
read -r install_docker_prompt
if [[ ! "${install_docker_prompt,,}" =~ ^(y|yes)$ ]]; then
msg_error "Docker is required for ${APP}. Exiting."
exit 10
exit 1
fi
msg_info "Installing Docker"
@@ -163,7 +163,7 @@ if [[ "${type:-}" == "update" ]]; then
update
else
msg_error "${APP} is not installed. Nothing to update."
exit 233
exit 1
fi
exit 0
fi

View File

@@ -54,7 +54,7 @@ elif [[ -f "/etc/debian_version" ]]; then
PKG_MANAGER="apt-get install -y"
else
echo -e "${CROSS} Unsupported OS detected. Exiting."
exit 238
exit 1
fi
header_info

View File

@@ -52,7 +52,7 @@ elif [[ -f "/etc/debian_version" ]]; then
PKG_MANAGER="apt-get install -y"
else
echo -e "${CROSS} Unsupported OS detected. Exiting."
exit 238
exit 1
fi
header_info

View File

@@ -93,7 +93,7 @@ EOF
update_glances_debian() {
if [[ ! -d /opt/glances/.venv ]]; then
msg_error "$APP is not installed"
exit 233
exit 1
fi
msg_info "Updating $APP"
cd /opt/glances
@@ -160,7 +160,7 @@ EOF
update_glances_alpine() {
if [[ ! -d /opt/glances/.venv ]]; then
msg_error "$APP is not installed"
exit 233
exit 1
fi
msg_info "Updating $APP"
cd /opt/glances

View File

@@ -52,13 +52,13 @@ EOF
# ==============================================================================
if [[ -f "/etc/alpine-release" ]]; then
msg_error "Alpine is not supported for ${APP}. Use Debian."
exit 238
exit 1
elif [[ -f "/etc/debian_version" ]]; then
OS="Debian"
SERVICE_PATH="/etc/systemd/system/immich-proxy.service"
else
echo -e "${CROSS} Unsupported OS detected. Exiting."
exit 238
exit 1
fi
# ==============================================================================
@@ -231,7 +231,7 @@ if [[ "${type:-}" == "update" ]]; then
update
else
msg_error "${APP} is not installed. Nothing to update."
exit 233
exit 1
fi
exit 0
fi

View File

@@ -52,13 +52,13 @@ EOF
# ==============================================================================
if [[ -f "/etc/alpine-release" ]]; then
msg_error "Alpine is not supported for ${APP}. Use Debian/Ubuntu."
exit 238
exit 1
elif [[ -f "/etc/debian_version" ]]; then
OS="Debian"
SERVICE_PATH="/etc/systemd/system/jellystat.service"
else
echo -e "${CROSS} Unsupported OS detected. Exiting."
exit 238
exit 1
fi
# ==============================================================================
@@ -326,7 +326,7 @@ if [[ "${type:-}" == "update" ]]; then
update
else
msg_error "${APP} is not installed. Nothing to update."
exit 233
exit 1
fi
exit 0
fi

View File

@@ -37,7 +37,7 @@ function find_compose_file() {
COMPOSE_FILE=$(find "$INSTALL_PATH" -maxdepth 1 -type f -name '*.compose.yaml' ! -name 'compose.env' | head -n1)
if [[ -z "${COMPOSE_FILE:-}" ]]; then
msg_error "No valid compose file found in ${INSTALL_PATH}!"
exit 233
exit 1
fi
COMPOSE_BASENAME=$(basename "$COMPOSE_FILE")
}
@@ -48,7 +48,7 @@ function check_legacy_db() {
echo -e "${YW}This configuration is no longer supported since Komodo v1.18.0.${CL}"
echo -e "${YW}Please follow the migration guide:${CL}"
echo -e "${BGN}https://github.com/community-scripts/ProxmoxVE/discussions/5689${CL}\n"
exit 238
exit 1
fi
}
@@ -79,14 +79,14 @@ function update() {
BACKUP_FILE="${INSTALL_PATH}/${COMPOSE_BASENAME}.bak_$(date +%Y%m%d_%H%M%S)"
cp "$COMPOSE_FILE" "$BACKUP_FILE" || {
msg_error "Failed to create backup of ${COMPOSE_BASENAME}!"
exit 235
exit 1
}
GITHUB_URL="https://raw.githubusercontent.com/moghtech/komodo/main/compose/${COMPOSE_BASENAME}"
if ! curl -fsSL "$GITHUB_URL" -o "$COMPOSE_FILE"; then
msg_error "Failed to download ${COMPOSE_BASENAME} from GitHub!"
mv "$BACKUP_FILE" "$COMPOSE_FILE"
exit 115
exit 1
fi
if ! grep -qxF 'COMPOSE_KOMODO_BACKUPS_PATH=/etc/komodo/backups' "$COMPOSE_ENV"; then
@@ -129,7 +129,7 @@ function check_or_install_docker() {
msg_ok "Docker Compose is available"
else
msg_error "Docker Compose plugin is not available. Please install it."
exit 10
exit 1
fi
return
fi
@@ -139,7 +139,7 @@ function check_or_install_docker() {
read -r install_docker_prompt
if [[ ! "${install_docker_prompt,,}" =~ ^(y|yes)$ ]]; then
msg_error "Docker is required for ${APP}. Exiting."
exit 10
exit 1
fi
msg_info "Installing Docker"
@@ -239,7 +239,7 @@ if [[ "${type:-}" == "update" ]]; then
update
else
msg_error "${APP} is not installed. Nothing to update."
exit 233
exit 1
fi
exit 0
fi

View File

@@ -58,7 +58,7 @@ pve_check() {
if ((MINOR < 0 || MINOR > 9)); then
msg_error "This version of Proxmox VE is not supported."
msg_error "Supported: Proxmox VE version 8.0 8.9"
exit 105
exit 1
fi
return 0
fi
@@ -69,7 +69,7 @@ pve_check() {
if ((MINOR < 0 || MINOR > 1)); then
msg_error "This version of Proxmox VE is not yet supported."
msg_error "Supported: Proxmox VE version 9.09.1.x"
exit 105
exit 1
fi
return 0
fi
@@ -77,19 +77,19 @@ pve_check() {
# All other unsupported versions
msg_error "This version of Proxmox VE is not supported."
msg_error "Supported versions: Proxmox VE 8.0 8.9 or 9.09.1.x"
exit 105
exit 1
}
detect_codename() {
source /etc/os-release
if [[ "$ID" != "debian" ]]; then
msg_error "Unsupported base OS: $ID (only Proxmox VE / Debian supported)."
exit 238
exit 1
fi
CODENAME="${VERSION_CODENAME:-}"
if [[ -z "$CODENAME" ]]; then
msg_error "Could not detect Debian codename."
exit 71
exit 1
fi
echo "$CODENAME"
}
@@ -124,7 +124,7 @@ install() {
PKG=$(get_latest_repo_pkg "$REPO_URL")
if [[ -z "$PKG" ]]; then
msg_error "Could not find netdata-repo package for Debian $CODENAME"
exit 237
exit 1
fi
curl -fsSL "${REPO_URL}${PKG}" -o "$PKG"
$STD dpkg -i "$PKG"

View File

@@ -36,7 +36,7 @@ SERVICE_PATH="/etc/systemd/system/nextcloud-exporter.service"
# ==============================================================================
if ! grep -qE 'ID=debian|ID=ubuntu' /etc/os-release 2>/dev/null; then
echo -e "${CROSS} Unsupported OS detected. This script only supports Debian and Ubuntu."
exit 238
exit 1
fi
# ==============================================================================
@@ -170,7 +170,7 @@ if [[ "${type:-}" == "update" ]]; then
update
else
msg_error "Nextcloud-Exporter is not installed. Nothing to update."
exit 233
exit 1
fi
exit 0
fi

View File

@@ -52,7 +52,7 @@ elif [[ -f "/etc/debian_version" ]]; then
INSTALL_DIR="$INSTALL_DIR_DEBIAN"
else
echo -e "${CROSS} Unsupported OS detected. Exiting."
exit 238
exit 1
fi
header_info
@@ -72,7 +72,7 @@ function check_internet() {
msg_ok "Internet connectivity OK"
else
msg_error "Internet connectivity or GitHub unreachable (Status $HTTP_CODE). Exiting."
exit 115
exit 1
fi
}
@@ -105,7 +105,7 @@ function install_php_and_modules() {
msg_info "Installing missing PHP packages: ${MISSING_PACKAGES[*]}"
if ! apt-get update &>/dev/null || ! apt-get install -y "${MISSING_PACKAGES[@]}" &>/dev/null; then
msg_error "Failed to install required PHP modules. Exiting."
exit 237
exit 1
fi
msg_ok "Installed missing PHP packages"
else
@@ -132,7 +132,7 @@ function install_phpmyadmin() {
msg_info "Downloading ${TARBALL_URL}"
if ! curl -fsSL "$TARBALL_URL" -o /tmp/phpmyadmin.tar.gz; then
msg_error "Download failed: $TARBALL_URL"
exit 115
exit 1
fi
mkdir -p "$INSTALL_DIR"
@@ -188,7 +188,7 @@ EOF
msg_ok "Started PHP-FPM service: $PHP_FPM_SERVICE"
else
msg_error "Failed to start PHP-FPM service: $PHP_FPM_SERVICE"
exit 150
exit 1
fi
$STD rc-service lighttpd start
@@ -237,7 +237,7 @@ function update_phpmyadmin() {
if ! curl -fsSL "$TARBALL_URL" -o /tmp/phpmyadmin.tar.gz; then
msg_error "Download failed: $TARBALL_URL"
exit 115
exit 1
fi
BACKUP_DIR="/tmp/phpmyadmin-backup-$(date +%Y%m%d-%H%M%S)"
@@ -280,7 +280,7 @@ if is_phpmyadmin_installed; then
;;
*)
echo -e "${YW}⚠️ Invalid input. Exiting.${CL}"
exit 112
exit 1
;;
esac
else

View File

@@ -41,7 +41,7 @@ elif grep -qE 'ID=debian|ID=ubuntu' /etc/os-release; then
SERVICE_PATH="/etc/systemd/system/pihole-exporter.service"
else
echo -e "${CROSS} Unsupported OS detected. Exiting."
exit 238
exit 1
fi
# ==============================================================================
@@ -207,7 +207,7 @@ if [[ "${type:-}" == "update" ]]; then
update
else
msg_error "Pihole-Exporter is not installed. Nothing to update."
exit 233
exit 1
fi
exit 0
fi

View File

@@ -32,7 +32,7 @@ AUTH_TOKEN_FILE="/etc/prometheus-paperless-ngx-exporter/paperless_auth_token_fil
# ==============================================================================
if ! grep -qE 'ID=debian|ID=ubuntu' /etc/os-release 2>/dev/null; then
echo -e "${CROSS} Unsupported OS detected. This script only supports Debian and Ubuntu."
exit 238
exit 1
fi
# ==============================================================================
@@ -144,7 +144,7 @@ if [[ "${type:-}" == "update" ]]; then
update
else
msg_error "Prometheus-Paperless-NGX-Exporter is not installed. Nothing to update."
exit 233
exit 1
fi
exit 0
fi

View File

@@ -41,7 +41,7 @@ elif grep -qE 'ID=debian|ID=ubuntu' /etc/os-release; then
SERVICE_PATH="/etc/systemd/system/qbittorrent-exporter.service"
else
echo -e "${CROSS} Unsupported OS detected. Exiting."
exit 238
exit 1
fi
# ==============================================================================
@@ -200,7 +200,7 @@ if [[ "${type:-}" == "update" ]]; then
update
else
msg_error "qBittorrent-Exporter is not installed. Nothing to update."
exit 233
exit 1
fi
exit 0
fi

View File

@@ -62,7 +62,7 @@ function check_or_install_docker() {
msg_ok "Docker Compose is available"
else
msg_error "Docker Compose plugin is not available. Please install it."
exit 10
exit 1
fi
return
fi
@@ -72,7 +72,7 @@ function check_or_install_docker() {
read -r install_docker_prompt
if [[ ! "${install_docker_prompt,,}" =~ ^(y|yes)$ ]]; then
msg_error "Docker is required for ${APP}. Exiting."
exit 10
exit 1
fi
msg_info "Installing Docker"
@@ -168,14 +168,14 @@ if [[ "${type:-}" == "update" ]]; then
update
else
msg_error "${APP} is not installed. Nothing to update."
exit 233
exit 1
fi
exit 0
fi
if [[ -f /etc/alpine-release ]]; then
msg_error "${APP} does not support Alpine Linux. Please use a Debian or Ubuntu based LXC."
exit 238
exit 1
fi
header_info

View File

@@ -62,7 +62,7 @@ function msg_error() {
}
if [ -z "$(ls -A /var/lib/docker/volumes/hass_config/_data/backups/)" ]; then
msg_error "No backups found! \n"
exit 235
exit 1
fi
DIR=/var/lib/docker/volumes/hass_config/_data/restore
if [ -d "$DIR" ]; then

View File

@@ -62,7 +62,7 @@ function msg_error() {
}
if [ -z "$(ls -A /root/.homeassistant/backups/)" ]; then
msg_error "No backups found! \n"
exit 235
exit 1
fi
DIR=/root/.homeassistant/restore
if [ -d "$DIR" ]; then

View File

@@ -39,7 +39,7 @@ ROOT_FS=$(df -Th "/" | awk 'NR==2 {print $2}')
if [ "$ROOT_FS" != "ext4" ]; then
whiptail --backtitle "Proxmox VE Helper Scripts" \
--title "Warning" \
--yesno "Root filesystem is not ext4 ($ROOT_FS).\nContinue anyway?" 12 80 || exit 0
--yesno "Root filesystem is not ext4 ($ROOT_FS).\nContinue anyway?" 12 80 || exit 1
fi
NODE=$(hostname)

View File

@@ -57,7 +57,7 @@ done
if [ ${#kernels_to_remove[@]} -eq 0 ]; then
echo -e "${RD}No valid selection made. Exiting.${CL}"
exit 0
exit 1
fi
# Confirm removal
@@ -66,7 +66,7 @@ printf "%s\n" "${kernels_to_remove[@]}"
read -rp "Proceed with removal? (y/n): " confirm
if [[ "$confirm" != "y" ]]; then
echo -e "${RD}Aborted.${CL}"
exit 0
exit 1
fi
# Remove kernels

View File

@@ -51,7 +51,7 @@ containers=$(pct list | tail -n +2 | awk '{print $0 " " $4}')
if [ -z "$containers" ]; then
whiptail --title "LXC Container Delete" --msgbox "No LXC containers available!" 10 60
exit 234
exit 1
fi
menu_items=("ALL" "Delete ALL containers" "OFF") # Add as first option
@@ -72,7 +72,7 @@ CHOICES=$(whiptail --title "LXC Container Delete" \
if [ -z "$CHOICES" ]; then
whiptail --title "LXC Container Delete" \
--msgbox "No containers selected!" 10 60
exit 0
exit 1
fi
read -p "Delete containers manually or automatically? (Default: manual) m/a: " DELETE_MODE

View File

@@ -47,7 +47,7 @@ function msg_warn() { echo -e "${WARN} ${YWB}${1}"; }
# Check for root privileges
if [ "$(id -u)" -ne 0 ]; then
msg_error "Error: This script must be run as root."
exit 104
exit 1
fi
if ! command -v ethtool >/dev/null 2>&1; then
@@ -55,7 +55,7 @@ if ! command -v ethtool >/dev/null 2>&1; then
apt-get update &>/dev/null
apt-get install -y ethtool &>/dev/null || {
msg_error "Failed to install ethtool. Exiting."
exit 237
exit 1
}
msg_ok "ethtool installed successfully"
fi
@@ -86,7 +86,7 @@ done
if [ ${#INTERFACES[@]} -eq 0 ]; then
whiptail --title "Error" --msgbox "No Intel e1000e or e1000 network interfaces found!" 10 60
msg_error "No Intel e1000e or e1000 network interfaces found! Exiting."
exit 236
exit 1
fi
msg_ok "Found ${BL}$COUNT${GN} Intel e1000e/e1000 interfaces"

View File

@@ -41,7 +41,7 @@ header_info
virt=$(systemd-detect-virt)
if [ "$virt" != "none" ]; then
msg_error "This script must be run on bare metal. Detected virtual environment: $virt"
exit 232
exit 1
fi
# Attempt to obtain the current loaded microcode revision

View File

@@ -83,7 +83,7 @@ main() {
if command -v pveversion >/dev/null 2>&1; then
echo -e "\n🛑 PVE Detected, Wrong Script!\n"
exit 232
exit 1
fi
local CODENAME
@@ -95,7 +95,7 @@ main() {
*)
msg_error "Unsupported Debian codename: $CODENAME"
echo -e "Supported: bookworm (PBS 3.x) and trixie (PBS 4.x)"
exit 105
exit 1
;;
esac
}

View File

@@ -49,7 +49,7 @@ declare -f init_tool_telemetry &>/dev/null && init_tool_telemetry "post-pmg-inst
if ! grep -q "Proxmox Mail Gateway" /etc/issue 2>/dev/null; then
msg_error "This script is only intended for Proxmox Mail Gateway"
exit 232
exit 1
fi
repo_state() {

View File

@@ -88,19 +88,19 @@ main() {
if [[ "$PVE_MAJOR" == "8" ]]; then
if ((PVE_MINOR < 0 || PVE_MINOR > 9)); then
msg_error "Unsupported Proxmox 8 version"
exit 105
exit 1
fi
start_routines_8
elif [[ "$PVE_MAJOR" == "9" ]]; then
if ((PVE_MINOR < 0 || PVE_MINOR > 1)); then
msg_error "Only Proxmox 9.0-9.1.x is currently supported"
exit 105
exit 1
fi
start_routines_9
else
msg_error "Unsupported Proxmox VE major version: $PVE_MAJOR"
echo -e "Supported: 8.08.9.x and 9.09.1.x"
exit 105
exit 1
fi
}

View File

@@ -25,7 +25,7 @@ header_info "$APP"
check_root() {
if [[ $EUID -ne 0 ]]; then
msg_error "Script must be run as root"
exit 104
exit 1
fi
}
@@ -63,7 +63,7 @@ select_container() {
if [[ ${#lxc_list[@]} -eq 0 ]]; then
msg_error "No containers found"
exit 234
exit 1
fi
PS3="Enter number of container to convert: "
@@ -101,7 +101,7 @@ backup_container() {
if [ -z "$BACKUP_PATH" ] || ! grep -q "Backup job finished successfully" "$vzdump_output"; then
rm "$vzdump_output"
msg_error "Backup failed"
exit 235
exit 1
fi
rm "$vzdump_output"
msg_ok "Backup complete: $BACKUP_PATH"
@@ -126,7 +126,7 @@ perform_conversion() {
msg_ok "Conversion successful"
else
msg_error "Conversion failed"
exit 235
exit 1
fi
}

View File

@@ -140,7 +140,7 @@ function backup_container() {
msg_ok "Backup created"
else
msg_error "Backup failed for container $1"
exit 235
exit 1
fi
}
@@ -183,7 +183,7 @@ containers=$(pct list | tail -n +2 | awk '{print $0 " " $4}')
if [ -z "$containers" ]; then
whiptail --title "LXC Container Update" --msgbox "No LXC containers available!" 10 60
exit 234
exit 1
fi
menu_items=()
@@ -242,7 +242,7 @@ if [[ -n "$var_container" ]]; then
if [[ -z "$CHOICE" ]]; then
msg_error "No containers matched the selection criteria: $var_container ${var_tags:-community-script|proxmox-helper-scripts}"
exit 234
exit 1
fi
msg_ok "Selected containers: $CHOICE"
else
@@ -253,7 +253,7 @@ else
if [ -z "$CHOICE" ]; then
whiptail --title "LXC Container Update" \
--msgbox "No containers selected!" 10 60
exit 0
exit 1
fi
fi
@@ -284,7 +284,7 @@ if [ "$BACKUP_CHOICE" == "yes" ]; then
if [ -z "$STORAGES" ]; then
msg_error "No storage with 'backup' support found!"
exit 119
exit 1
fi
# Determine storage based on var_backup_storage
@@ -296,7 +296,7 @@ if [ "$BACKUP_CHOICE" == "yes" ]; then
else
msg_error "Specified backup storage '$var_backup_storage' not found or doesn't support backups!"
msg_info "Available storages: $(echo $STORAGES | tr '\n' ' ')"
exit 119
exit 1
fi
else
MENU_ITEMS=()
@@ -308,7 +308,7 @@ if [ "$BACKUP_CHOICE" == "yes" ]; then
if [ -z "$STORAGE_CHOICE" ]; then
msg_error "No storage selected!"
exit 0
exit 1
fi
fi
fi
@@ -436,11 +436,11 @@ for container in $CHOICE; do
msg_ok "Restored LXC from backup"
else
msg_error "Restored LXC from backup failed"
exit 235
exit 1
fi
else
msg_error "Update failed for container $container. Exiting"
exit "$exit_code"
exit 1
fi
done

View File

@@ -158,7 +158,7 @@ pve_check() {
if ((MINOR < 0 || MINOR > 9)); then
msg_error "This version of Proxmox VE is not supported."
msg_error "Supported: Proxmox VE version 8.0 8.9"
exit 105
exit 1
fi
return 0
fi
@@ -169,7 +169,7 @@ pve_check() {
if ((MINOR < 0 || MINOR > 1)); then
msg_error "This version of Proxmox VE is not supported."
msg_error "Supported: Proxmox VE version 9.0 9.1"
exit 105
exit 1
fi
return 0
fi
@@ -177,7 +177,7 @@ pve_check() {
# All other unsupported versions
msg_error "This version of Proxmox VE is not supported."
msg_error "Supported versions: Proxmox VE 8.0 8.x or 9.0 9.1"
exit 105
exit 1
}
function arch_check() {
@@ -513,7 +513,7 @@ DISK_REF_IMPORTED="$(printf '%s\n' "$IMPORT_OUT" | sed -n "s/.*successfully impo
[[ -z "$DISK_REF_IMPORTED" ]] && {
msg_error "Unable to determine imported disk reference."
echo "$IMPORT_OUT"
exit 226
exit 1
}
msg_ok "Imported disk (${CL}${BL}${DISK_REF_IMPORTED}${CL})"

View File

@@ -158,7 +158,7 @@ pve_check() {
if ((MINOR < 0 || MINOR > 9)); then
msg_error "This version of Proxmox VE is not supported."
msg_error "Supported: Proxmox VE version 8.0 8.9"
exit 105
exit 1
fi
return 0
fi
@@ -169,7 +169,7 @@ pve_check() {
if ((MINOR < 0 || MINOR > 1)); then
msg_error "This version of Proxmox VE is not supported."
msg_error "Supported: Proxmox VE version 9.0 9.1"
exit 105
exit 1
fi
return 0
fi
@@ -177,7 +177,7 @@ pve_check() {
# All other unsupported versions
msg_error "This version of Proxmox VE is not supported."
msg_error "Supported versions: Proxmox VE 8.0 8.x or 9.0 9.1"
exit 105
exit 1
}
function arch_check() {

View File

@@ -158,7 +158,7 @@ pve_check() {
if ((MINOR < 0 || MINOR > 9)); then
msg_error "This version of Proxmox VE is not supported."
msg_error "Supported: Proxmox VE version 8.0 8.9"
exit 105
exit 1
fi
return 0
fi
@@ -169,7 +169,7 @@ pve_check() {
if ((MINOR < 0 || MINOR > 1)); then
msg_error "This version of Proxmox VE is not supported."
msg_error "Supported: Proxmox VE version 9.0 9.1"
exit 105
exit 1
fi
return 0
fi
@@ -177,7 +177,7 @@ pve_check() {
# All other unsupported versions
msg_error "This version of Proxmox VE is not supported."
msg_error "Supported versions: Proxmox VE 8.0 8.x or 9.0 9.1"
exit 105
exit 1
}
function arch_check() {

View File

@@ -638,7 +638,7 @@ DISK_REF_IMPORTED="$(printf '%s\n' "$IMPORT_OUT" | sed -n "s/.*successfully impo
[[ -z "$DISK_REF_IMPORTED" ]] && {
msg_error "Unable to determine imported disk reference."
echo "$IMPORT_OUT"
exit 226
exit 1
}
msg_ok "Imported disk (${CL}${BL}${DISK_REF_IMPORTED}${CL})"

View File

@@ -163,7 +163,7 @@ pve_check() {
if ((MINOR < 0 || MINOR > 9)); then
msg_error "This version of Proxmox VE is not supported."
msg_error "Supported: Proxmox VE version 8.0 8.9"
exit 105
exit 1
fi
return 0
fi
@@ -174,7 +174,7 @@ pve_check() {
if ((MINOR < 0 || MINOR > 1)); then
msg_error "This version of Proxmox VE is not supported."
msg_error "Supported: Proxmox VE version 9.0 9.1"
exit 105
exit 1
fi
return 0
fi
@@ -182,7 +182,7 @@ pve_check() {
# All other unsupported versions
msg_error "This version of Proxmox VE is not supported."
msg_error "Supported versions: Proxmox VE 8.0 8.x or 9.0 9.1"
exit 105
exit 1
}
function arch_check() {
@@ -221,7 +221,7 @@ function ensure_pv() {
if ! apt-get update -qq &>/dev/null || ! apt-get install -y pv &>/dev/null; then
msg_error "Failed to install pv automatically."
echo -e "\nPlease run manually on the Proxmox host:\n apt install pv\n"
exit 237
exit 1
fi
msg_ok "Installed pv"
fi
@@ -249,14 +249,14 @@ function download_and_validate_xz() {
if ! curl -fSL -o "$file" "$url"; then
msg_error "Download failed: $url"
rm -f "$file"
exit 115
exit 1
fi
# Validate again
if ! xz -t "$file" &>/dev/null; then
msg_error "Downloaded file $(basename "$file") is corrupted. Please try again later."
rm -f "$file"
exit 115
exit 1
fi
msg_ok "Downloaded and validated $(basename "$file")"
}
@@ -272,7 +272,7 @@ function extract_xz_with_pv() {
if ! xz -dc "$file" | pv -N "Extracting" >"$target"; then
msg_error "Failed to extract $file"
rm -f "$target"
exit 115
exit 1
fi
msg_ok "Decompressed to $target"
}
@@ -592,7 +592,7 @@ DISK_REF="$(printf '%s\n' "$IMPORT_OUT" | sed -n "s/.*successfully imported disk
[[ -z "$DISK_REF" ]] && {
msg_error "Unable to determine imported disk reference."
echo "$IMPORT_OUT"
exit 226
exit 1
}
msg_ok "Imported disk (${CL}${BL}${DISK_REF}${CL})"

View File

@@ -159,7 +159,7 @@ pve_check() {
if ((MINOR < 0 || MINOR > 9)); then
msg_error "This version of Proxmox VE is not supported."
msg_error "Supported: Proxmox VE version 8.0 8.9"
exit 105
exit 1
fi
return 0
fi
@@ -170,7 +170,7 @@ pve_check() {
if ((MINOR < 0 || MINOR > 1)); then
msg_error "This version of Proxmox VE is not supported."
msg_error "Supported: Proxmox VE version 9.0 9.1"
exit 105
exit 1
fi
return 0
fi
@@ -178,7 +178,7 @@ pve_check() {
# All other unsupported versions
msg_error "This version of Proxmox VE is not supported."
msg_error "Supported versions: Proxmox VE 8.0 8.x or 9.0 9.1"
exit 105
exit 1
}
function arch_check() {

Some files were not shown because too many files have changed in this diff Show More