Compare commits

..

1 Commits

Author SHA1 Message Date
CanbiZ (MickLesk)
62ce92e8d2 fix(dispatcharr): migrate from requirements.txt to uv sync (pyproject.toml) 2026-02-12 13:09:49 +01:00
46 changed files with 324 additions and 864 deletions

View File

@@ -401,68 +401,20 @@ Exercise vigilance regarding copycat or coat-tailing sites that seek to exploit
</details>
## 2026-02-13
### 🚀 Updated Scripts
- #### 🐞 Bug Fixes
- OpenWebUI: pin numba constraint [@MickLesk](https://github.com/MickLesk) ([#11874](https://github.com/community-scripts/ProxmoxVE/pull/11874))
- Planka: add migrate step to update function [@ZimmermannLeon](https://github.com/ZimmermannLeon) ([#11877](https://github.com/community-scripts/ProxmoxVE/pull/11877))
- Pangolin: switch sqlite-specific back to generic [@MickLesk](https://github.com/MickLesk) ([#11868](https://github.com/community-scripts/ProxmoxVE/pull/11868))
- [Hotfix] Jotty: Copy contents of config backup into /opt/jotty/config [@vhsdream](https://github.com/vhsdream) ([#11864](https://github.com/community-scripts/ProxmoxVE/pull/11864))
- #### 🔧 Refactor
- chore(donetick): add config entry for v0.1.73 [@tomfrenzel](https://github.com/tomfrenzel) ([#11872](https://github.com/community-scripts/ProxmoxVE/pull/11872))
- Refactor: Radicale [@vhsdream](https://github.com/vhsdream) ([#11850](https://github.com/community-scripts/ProxmoxVE/pull/11850))
### 📡 API
- #### ✨ New Features
- error-handler: Implement json_escape and enhance error handling [@MickLesk](https://github.com/MickLesk) ([#11875](https://github.com/community-scripts/ProxmoxVE/pull/11875))
### 🌐 Website
- #### 📝 Script Information
- SQLServer-2025: add PVE9/Kernel 6.x incompatibility warning [@MickLesk](https://github.com/MickLesk) ([#11829](https://github.com/community-scripts/ProxmoxVE/pull/11829))
## 2026-02-12
### 🚀 Updated Scripts
- #### 🐞 Bug Fixes
- EMQX: increase disk to 6GB and add optional MQ disable prompt [@MickLesk](https://github.com/MickLesk) ([#11844](https://github.com/community-scripts/ProxmoxVE/pull/11844))
- Increased the Grafana container default disk size. [@shtefko](https://github.com/shtefko) ([#11840](https://github.com/community-scripts/ProxmoxVE/pull/11840))
- Pangolin: Update database generation command in install script [@tremor021](https://github.com/tremor021) ([#11825](https://github.com/community-scripts/ProxmoxVE/pull/11825))
- Deluge: add python3-setuptools as dep [@MickLesk](https://github.com/MickLesk) ([#11833](https://github.com/community-scripts/ProxmoxVE/pull/11833))
- Dispatcharr: migrate to uv sync [@MickLesk](https://github.com/MickLesk) ([#11831](https://github.com/community-scripts/ProxmoxVE/pull/11831))
- #### ✨ New Features
- Archlinux-VM: fix LVM/LVM-thin storage and improve error reporting | VM's add correct exit_code for analytics [@MickLesk](https://github.com/MickLesk) ([#11842](https://github.com/community-scripts/ProxmoxVE/pull/11842))
- Debian13-VM: Optimize First Boot & add noCloud/Cloud Selection [@MickLesk](https://github.com/MickLesk) ([#11810](https://github.com/community-scripts/ProxmoxVE/pull/11810))
### 💾 Core
- #### ✨ New Features
- tools.func: auto-detect binary vs armored GPG keys in setup_deb822_repo [@MickLesk](https://github.com/MickLesk) ([#11841](https://github.com/community-scripts/ProxmoxVE/pull/11841))
- core: remove old Go API and extend misc/api.func with new backend [@MickLesk](https://github.com/MickLesk) ([#11822](https://github.com/community-scripts/ProxmoxVE/pull/11822))
- #### 🔧 Refactor
- error_handler: prevent stuck 'installing' status [@MickLesk](https://github.com/MickLesk) ([#11845](https://github.com/community-scripts/ProxmoxVE/pull/11845))
### 🧰 Tools
- #### 🐞 Bug Fixes
- Tailscale: fix DNS check and keyrings directory issues [@MickLesk](https://github.com/MickLesk) ([#11837](https://github.com/community-scripts/ProxmoxVE/pull/11837))
## 2026-02-11
### 🆕 New Scripts

View File

@@ -9,7 +9,7 @@ APP="Alpine-Grafana"
var_tags="${var_tags:-alpine;monitoring}"
var_cpu="${var_cpu:-1}"
var_ram="${var_ram:-256}"
var_disk="${var_disk:-2}"
var_disk="${var_disk:-1}"
var_os="${var_os:-alpine}"
var_version="${var_version:-3.23}"
var_unprivileged="${var_unprivileged:-1}"

View File

@@ -28,7 +28,6 @@ function update_script() {
exit
fi
msg_info "Updating Deluge"
ensure_dependencies python3-setuptools
$STD apt update
$STD pip3 install deluge[all] --upgrade
msg_ok "Updated Deluge"

View File

@@ -42,8 +42,7 @@ function update_script() {
msg_info "Restoring Configurations"
mv /opt/selfhosted.yaml /opt/donetick/config
grep -q 'http://localhost"$' /opt/donetick/config/selfhosted.yaml || sed -i '/https:\/\/localhost"$/a\ - "http://localhost"' /opt/donetick/config/selfhosted.yaml
grep -q 'capacitor://localhost' /opt/donetick/config/selfhosted.yaml || sed -i '/http:\/\/localhost"$/a\ - "capacitor://localhost"' /opt/donetick/config/selfhosted.yaml
sed -i '/capacitor:\/\/localhost/d' /opt/donetick/config/selfhosted.yaml
mv /opt/donetick.db /opt/donetick
msg_ok "Restored Configurations"

View File

@@ -9,7 +9,7 @@ APP="EMQX"
var_tags="${var_tags:-mqtt}"
var_cpu="${var_cpu:-2}"
var_ram="${var_ram:-1024}"
var_disk="${var_disk:-6}"
var_disk="${var_disk:-4}"
var_os="${var_os:-debian}"
var_version="${var_version:-13}"
var_unprivileged="${var_unprivileged:-1}"

View File

@@ -1,7 +1,7 @@
#!/usr/bin/env bash
source <(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVE/raw/branch/main/misc/build.func)
# Copyright (c) 2021-2026 community-scripts ORG
# Authors: MickLesk (CanbiZ) | Co-Author: remz1337
source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/build.func)
# Copyright (c) 2021-2026 tteck
# Authors: tteck (tteckster)
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
# Source: https://frigate.video/
@@ -11,7 +11,7 @@ var_cpu="${var_cpu:-4}"
var_ram="${var_ram:-4096}"
var_disk="${var_disk:-20}"
var_os="${var_os:-debian}"
var_version="${var_version:-12}"
var_version="${var_version:-11}"
var_unprivileged="${var_unprivileged:-0}"
var_gpu="${var_gpu:-yes}"
@@ -21,15 +21,15 @@ color
catch_errors
function update_script() {
header_info
check_container_storage
check_container_resources
if [[ ! -f /etc/systemd/system/frigate.service ]]; then
msg_error "No ${APP} Installation Found!"
exit
fi
msg_error "To update Frigate, create a new container and transfer your configuration."
header_info
check_container_storage
check_container_resources
if [[ ! -f /etc/systemd/system/frigate.service ]]; then
msg_error "No ${APP} Installation Found!"
exit
fi
msg_error "To update Frigate, create a new container and transfer your configuration."
exit
}
start

View File

@@ -9,7 +9,7 @@ APP="Grafana"
var_tags="${var_tags:-monitoring;visualization}"
var_cpu="${var_cpu:-1}"
var_ram="${var_ram:-512}"
var_disk="${var_disk:-4}"
var_disk="${var_disk:-2}"
var_os="${var_os:-debian}"
var_version="${var_version:-13}"
var_unprivileged="${var_unprivileged:-1}"

View File

@@ -46,7 +46,7 @@ function update_script() {
msg_info "Restoring configuration & data"
mv /opt/app.env /opt/jotty/.env
[[ -d /opt/data ]] && mv /opt/data /opt/jotty/data
[[ -d /opt/jotty/config ]] && cp -a /opt/config/* /opt/jotty/config && rm -rf /opt/config
[[ -d /opt/jotty/config ]] && mv /opt/config/* /opt/jotty/config
msg_ok "Restored configuration & data"
msg_info "Starting Service"

View File

@@ -44,7 +44,7 @@ function update_script() {
msg_info "Installing uv-based Open-WebUI"
PYTHON_VERSION="3.12" setup_uv
$STD uv tool install --python 3.12 --constraint <(echo "numba>=0.60") open-webui[all]
$STD uv tool install --python 3.12 open-webui[all]
msg_ok "Installed uv-based Open-WebUI"
msg_info "Restoring data"
@@ -126,7 +126,7 @@ EOF
msg_info "Updating Open WebUI via uv"
PYTHON_VERSION="3.12" setup_uv
$STD uv tool install --force --python 3.12 --constraint <(echo "numba>=0.60") open-webui[all]
$STD uv tool upgrade --python 3.12 open-webui[all]
systemctl restart open-webui
msg_ok "Updated Open WebUI"
msg_ok "Updated successfully!"

View File

@@ -51,7 +51,7 @@ function update_script() {
$STD npm run db:generate
$STD npm run build
$STD npm run build:cli
$STD npm run db:push
$STD npm run db:sqlite:push
cp -R .next/standalone ./
chmod +x ./dist/cli.mjs
cp server/db/names.json ./dist/names.json

View File

@@ -61,12 +61,6 @@ function update_script() {
rm -rf "$BK"
msg_ok "Restored data"
msg_ok "Migrate Database"
cd /opt/planka
$STD npm run db:upgrade
$STD npm run db:migrate
msg_ok "Migrated Database"
msg_info "Starting Service"
systemctl start planka
msg_ok "Started Service"

View File

@@ -28,55 +28,16 @@ function update_script() {
exit
fi
if check_for_gh_release "Radicale" "Kozea/Radicale"; then
msg_info "Stopping service"
systemctl stop radicale
msg_ok "Stopped service"
msg_info "Updating ${APP}"
$STD python3 -m venv /opt/radicale
source /opt/radicale/bin/activate
$STD python3 -m pip install --upgrade https://github.com/Kozea/Radicale/archive/master.tar.gz
msg_ok "Updated ${APP}"
msg_info "Backing up users file"
cp /opt/radicale/users /opt/radicale_users_backup
msg_ok "Backed up users file"
PYTHON_VERSION="3.13" setup_uv
CLEAN_INSTALL=1 fetch_and_deploy_gh_release "Radicale" "Kozea/Radicale" "tarball" "latest" "/opt/radicale"
msg_info "Restoring users file"
rm -f /opt/radicale/users
mv /opt/radicale_users_backup /opt/radicale/users
msg_ok "Restored users file"
if grep -q 'start.sh' /etc/systemd/system/radicale.service; then
sed -i -e '/^Description/i[Unit]' \
-e '\|^ExecStart|iWorkingDirectory=/opt/radicale' \
-e 's|^ExecStart=.*|ExecStart=/usr/local/bin/uv run -m radicale --config /etc/radicale/config|' /etc/systemd/system/radicale.service
systemctl daemon-reload
fi
if [[ ! -f /etc/radicale/config ]]; then
msg_info "Migrating to config file (/etc/radicale/config)"
mkdir -p /etc/radicale
cat <<EOF >/etc/radicale/config
[server]
hosts = 0.0.0.0:5232
[auth]
type = htpasswd
htpasswd_filename = /opt/radicale/users
htpasswd_encryption = sha512
[storage]
type = multifilesystem
filesystem_folder = /var/lib/radicale/collections
[web]
type = internal
EOF
msg_ok "Migrated to config (/etc/radicale/config)"
fi
msg_info "Starting service"
systemctl start radicale
msg_ok "Started service"
msg_ok "Updated Successfully!"
fi
msg_info "Starting Service"
systemctl enable -q --now radicale
msg_ok "Started Service"
msg_ok "Updated successfully!"
exit
}

View File

@@ -21,7 +21,7 @@
"resources": {
"cpu": 2,
"ram": 1024,
"hdd": 6,
"hdd": 4,
"os": "debian",
"version": "13"
}

View File

@@ -1,44 +0,0 @@
{
"name": "Frigate",
"slug": "frigate",
"categories": [
15
],
"date_created": "2026-02-13",
"type": "ct",
"updateable": false,
"privileged": false,
"config_path": "/config/config.yml",
"interface_port": 5000,
"documentation": "https://frigate.io/",
"website": "https://frigate.io/",
"logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/frigate-light.webp",
"description": "Frigate is a complete and local NVR (Network Video Recorder) with realtime AI object detection for CCTV cameras.",
"install_methods": [
{
"type": "default",
"script": "ct/frigate.sh",
"resources": {
"cpu": 4,
"ram": 4096,
"hdd": 20,
"os": "Debian",
"version": "12"
}
}
],
"default_credentials": {
"username": null,
"password": null
},
"notes": [
{
"text": "SemanticSearch is not pre-installed due to high resource requirements (8+ cores, 16-24GB RAM, GPU recommended). Manual configuration required if needed.",
"type": "info"
},
{
"text": "OpenVino detector may fail on older CPUs (pre-Haswell/AVX2). If you encounter 'Illegal instruction' errors, consider using alternative detectors.",
"type": "warning"
}
]
}

View File

@@ -0,0 +1,44 @@
{
"name": "Frigate",
"slug": "frigate",
"categories": [
15
],
"date_created": "2024-05-02",
"type": "ct",
"updateable": false,
"privileged": true,
"interface_port": 5000,
"documentation": "https://docs.frigate.video/",
"website": "https://frigate.video/",
"logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/frigate.webp",
"config_path": "",
"description": "Frigate is an open source NVR built around real-time AI object detection. All processing is performed locally on your own hardware, and your camera feeds never leave your home.",
"install_methods": [
{
"type": "default",
"script": "ct/frigate.sh",
"resources": {
"cpu": 4,
"ram": 4096,
"hdd": 20,
"os": "debian",
"version": "11"
}
}
],
"default_credentials": {
"username": null,
"password": null
},
"notes": [
{
"text": "Discussions (explore more advanced methods): `https://github.com/tteck/Proxmox/discussions/2711`",
"type": "info"
},
{
"text": "go2rtc Interface port:`1984`",
"type": "info"
}
]
}

View File

@@ -1,5 +1,5 @@
{
"generated": "2026-02-13T12:11:36Z",
"generated": "2026-02-12T06:25:40Z",
"versions": [
{
"slug": "2fauth",
@@ -193,9 +193,9 @@
{
"slug": "cleanuparr",
"repo": "Cleanuparr/Cleanuparr",
"version": "v2.6.1",
"version": "v2.5.1",
"pinned": false,
"date": "2026-02-13T10:00:19Z"
"date": "2026-01-11T00:46:17Z"
},
{
"slug": "cloudreve",
@@ -298,9 +298,9 @@
{
"slug": "donetick",
"repo": "donetick/donetick",
"version": "v0.1.73",
"version": "v0.1.71",
"pinned": false,
"date": "2026-02-12T23:42:30Z"
"date": "2026-02-11T06:01:13Z"
},
{
"slug": "drawio",
@@ -403,9 +403,9 @@
{
"slug": "ghostfolio",
"repo": "ghostfolio/ghostfolio",
"version": "2.238.0",
"version": "2.237.0",
"pinned": false,
"date": "2026-02-12T18:28:55Z"
"date": "2026-02-08T13:59:53Z"
},
{
"slug": "gitea",
@@ -543,9 +543,9 @@
{
"slug": "huntarr",
"repo": "plexguide/Huntarr.io",
"version": "9.2.4.1",
"version": "9.2.3",
"pinned": false,
"date": "2026-02-12T22:17:47Z"
"date": "2026-02-07T04:44:20Z"
},
{
"slug": "immich-public-proxy",
@@ -571,16 +571,16 @@
{
"slug": "invoiceninja",
"repo": "invoiceninja/invoiceninja",
"version": "v5.12.59",
"version": "v5.12.57",
"pinned": false,
"date": "2026-02-13T02:26:13Z"
"date": "2026-02-11T23:08:56Z"
},
{
"slug": "jackett",
"repo": "Jackett/Jackett",
"version": "v0.24.1103",
"version": "v0.24.1098",
"pinned": false,
"date": "2026-02-13T05:53:23Z"
"date": "2026-02-12T05:56:25Z"
},
{
"slug": "jellystat",
@@ -599,9 +599,9 @@
{
"slug": "jotty",
"repo": "fccview/jotty",
"version": "1.20.0",
"version": "1.19.1",
"pinned": false,
"date": "2026-02-12T09:23:30Z"
"date": "2026-01-26T21:30:39Z"
},
{
"slug": "kapowarr",
@@ -823,9 +823,9 @@
{
"slug": "metube",
"repo": "alexta69/metube",
"version": "2026.02.12",
"version": "2026.02.08",
"pinned": false,
"date": "2026-02-12T21:05:49Z"
"date": "2026-02-08T17:01:37Z"
},
{
"slug": "miniflux",
@@ -998,9 +998,9 @@
{
"slug": "pangolin",
"repo": "fosrl/pangolin",
"version": "1.15.4",
"version": "1.15.3",
"pinned": false,
"date": "2026-02-13T00:54:02Z"
"date": "2026-02-12T06:10:19Z"
},
{
"slug": "paperless-ai",
@@ -1026,9 +1026,9 @@
{
"slug": "patchmon",
"repo": "PatchMon/PatchMon",
"version": "v1.4.0",
"version": "v1.3.7",
"pinned": false,
"date": "2026-02-13T10:39:03Z"
"date": "2025-12-25T11:08:14Z"
},
{
"slug": "paymenter",
@@ -1219,13 +1219,6 @@
"pinned": false,
"date": "2025-11-16T22:39:01Z"
},
{
"slug": "radicale",
"repo": "Kozea/Radicale",
"version": "v3.6.0",
"pinned": false,
"date": "2026-01-10T06:56:46Z"
},
{
"slug": "rclone",
"repo": "rclone/rclone",
@@ -1299,9 +1292,9 @@
{
"slug": "scraparr",
"repo": "thecfu/scraparr",
"version": "v3.0.3",
"version": "v3.0.1",
"pinned": false,
"date": "2026-02-12T14:20:56Z"
"date": "2026-02-11T17:42:23Z"
},
{
"slug": "seelf",
@@ -1439,9 +1432,9 @@
{
"slug": "termix",
"repo": "Termix-SSH/Termix",
"version": "release-1.11.1-tag",
"version": "release-1.11.0-tag",
"pinned": false,
"date": "2026-02-13T04:49:16Z"
"date": "2026-01-25T02:09:52Z"
},
{
"slug": "the-lounge",
@@ -1467,9 +1460,9 @@
{
"slug": "tianji",
"repo": "msgbyte/tianji",
"version": "v1.31.12",
"version": "v1.31.10",
"pinned": false,
"date": "2026-02-12T19:06:14Z"
"date": "2026-02-04T17:21:04Z"
},
{
"slug": "traccar",
@@ -1558,9 +1551,9 @@
{
"slug": "upsnap",
"repo": "seriousm4x/UpSnap",
"version": "5.2.8",
"version": "5.2.7",
"pinned": false,
"date": "2026-02-13T00:02:37Z"
"date": "2026-01-07T23:48:00Z"
},
{
"slug": "uptimekuma",

View File

@@ -21,7 +21,7 @@
"resources": {
"cpu": 1,
"ram": 512,
"hdd": 4,
"hdd": 2,
"os": "debian",
"version": "13"
}
@@ -32,7 +32,7 @@
"resources": {
"cpu": 1,
"ram": 256,
"hdd": 2,
"hdd": 1,
"os": "alpine",
"version": "3.23"
}

View File

@@ -12,7 +12,7 @@
"documentation": "https://radicale.org/master.html#documentation-1",
"website": "https://radicale.org/",
"logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/radicale.webp",
"config_path": "/etc/radicale/config",
"config_path": "/etc/radicale/config or ~/.config/radicale/config",
"description": "Radicale is a small but powerful CalDAV (calendars, to-do lists) and CardDAV (contacts)",
"install_methods": [
{

View File

@@ -32,10 +32,6 @@
"password": null
},
"notes": [
{
"text": "SQL Server (2025) SQLPAL is incompatible with Proxmox VE 9 (Kernel 6.12+) in LXC containers. Use a VM instead or the SQL-Server 2022 LXC.",
"type": "warning"
},
{
"text": "If you choose not to run the installation setup, execute: `/opt/mssql/bin/mssql-conf setup` in LXC shell.",
"type": "info"

View File

@@ -16,8 +16,7 @@ update_os
msg_info "Installing Dependencies"
$STD apt install -y \
python3-pip \
python3-libtorrent \
python3-setuptools
python3-libtorrent
msg_ok "Installed Dependencies"
msg_info "Installing Deluge"

View File

@@ -38,18 +38,6 @@ rm -f "$DEB_FILE"
echo "$LATEST_VERSION" >~/.emqx
msg_ok "Installed EMQX"
read -r -p "${TAB3}Would you like to disable the EMQX MQ feature? (reduces disk/CPU usage) <y/N> " prompt
if [[ ${prompt,,} =~ ^(y|yes)$ ]]; then
msg_info "Disabling EMQX MQ feature"
mkdir -p /etc/emqx
if ! grep -q "^mq.enable" /etc/emqx/emqx.conf 2>/dev/null; then
echo "mq.enable = false" >>/etc/emqx/emqx.conf
else
sed -i 's/^mq.enable.*/mq.enable = false/' /etc/emqx/emqx.conf
fi
msg_ok "Disabled EMQX MQ feature"
fi
msg_info "Starting EMQX service"
$STD systemctl enable -q --now emqx
msg_ok "Enabled EMQX service"

View File

@@ -1,8 +1,8 @@
#!/usr/bin/env bash
# Copyright (c) 2021-2026 community-scripts ORG
# Authors: MickLesk (CanbiZ)
# Co-Authors: remz1337
# Copyright (c) 2021-2026 tteck
# Author: tteck (tteckster)
# Co-Author: remz1337
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
# Source: https://frigate.video/
@@ -14,221 +14,60 @@ setting_up_container
network_check
update_os
source /etc/os-release
if [[ "$VERSION_ID" != "12" ]]; then
msg_error "Frigate requires Debian 12 (Bookworm) due to Python 3.11 dependencies"
exit 1
fi
msg_info "Converting APT sources to DEB822 format"
if [ -f /etc/apt/sources.list ]; then
cat > /etc/apt/sources.list.d/debian.sources <<'EOF'
Types: deb
URIs: http://deb.debian.org/debian
Suites: bookworm
Components: main contrib
Signed-By: /usr/share/keyrings/debian-archive-keyring.gpg
Types: deb
URIs: http://deb.debian.org/debian
Suites: bookworm-updates
Components: main contrib
Signed-By: /usr/share/keyrings/debian-archive-keyring.gpg
Types: deb
URIs: http://security.debian.org
Suites: bookworm-security
Components: main contrib
Signed-By: /usr/share/keyrings/debian-archive-keyring.gpg
EOF
mv /etc/apt/sources.list /etc/apt/sources.list.bak
$STD apt update
fi
msg_ok "Converted APT sources"
msg_info "Installing Dependencies"
$STD apt install -y \
xz-utils \
python3 \
python3-dev \
python3-pip \
gcc \
pkg-config \
libhdf5-dev \
build-essential \
automake \
libtool \
ccache \
libusb-1.0-0-dev \
apt-transport-https \
cmake \
git \
libgtk-3-dev \
libavcodec-dev \
libavformat-dev \
libswscale-dev \
libv4l-dev \
libxvidcore-dev \
libx264-dev \
libjpeg-dev \
libpng-dev \
libtiff-dev \
gfortran \
openexr \
libssl-dev \
libtbbmalloc2 \
libtbb-dev \
libdc1394-dev \
libopenexr-dev \
libgstreamer-plugins-base1.0-dev \
libgstreamer1.0-dev \
tclsh \
libopenblas-dev \
liblapack-dev \
make \
moreutils
msg_info "Installing Dependencies (Patience)"
$STD apt-get install -y {git,ca-certificates,automake,build-essential,xz-utils,libtool,ccache,pkg-config,libgtk-3-dev,libavcodec-dev,libavformat-dev,libswscale-dev,libv4l-dev,libxvidcore-dev,libx264-dev,libjpeg-dev,libpng-dev,libtiff-dev,gfortran,openexr,libatlas-base-dev,libssl-dev,libtbb2,libtbb-dev,libdc1394-22-dev,libopenexr-dev,libgstreamer-plugins-base1.0-dev,libgstreamer1.0-dev,gcc,gfortran,libopenblas-dev,liblapack-dev,libusb-1.0-0-dev,jq,moreutils}
msg_ok "Installed Dependencies"
msg_info "Setup Python3"
$STD apt-get install -y {python3,python3-dev,python3-setuptools,python3-distutils,python3-pip}
$STD pip install --upgrade pip
msg_ok "Setup Python3"
NODE_VERSION="22" setup_nodejs
msg_info "Installing go2rtc"
mkdir -p /usr/local/go2rtc/bin
cd /usr/local/go2rtc/bin
curl -fsSL "https://github.com/AlexxIT/go2rtc/releases/latest/download/go2rtc_linux_amd64" -o "go2rtc"
chmod +x go2rtc
$STD ln -svf /usr/local/go2rtc/bin/go2rtc /usr/local/bin/go2rtc
msg_ok "Installed go2rtc"
setup_hwaccel
export TARGETARCH="amd64"
export CCACHE_DIR=/root/.ccache
export CCACHE_MAXSIZE=2G
export APT_KEY_DONT_WARN_ON_DANGEROUS_USAGE=DontWarn
export PIP_BREAK_SYSTEM_PACKAGES=1
export NVIDIA_VISIBLE_DEVICES=all
export NVIDIA_DRIVER_CAPABILITIES="compute,video,utility"
export TOKENIZERS_PARALLELISM=true
export TRANSFORMERS_NO_ADVISORY_WARNINGS=1
export OPENCV_FFMPEG_LOGLEVEL=8
export HAILORT_LOGGER_PATH=NONE
fetch_and_deploy_gh_release "frigate" "blakeblackshear/frigate" "tarball" "v0.16.4" "/opt/frigate"
msg_info "Building Nginx"
$STD bash /opt/frigate/docker/main/build_nginx.sh
sed -e '/s6-notifyoncheck/ s/^#*/#/' -i /opt/frigate/docker/main/rootfs/etc/s6-overlay/s6-rc.d/nginx/run
ln -sf /usr/local/nginx/sbin/nginx /usr/local/bin/nginx
msg_ok "Built Nginx"
msg_info "Building SQLite Extensions"
$STD bash /opt/frigate/docker/main/build_sqlite_vec.sh
msg_ok "Built SQLite Extensions"
fetch_and_deploy_gh_release "go2rtc" "AlexxIT/go2rtc" "singlefile" "latest" "/usr/local/go2rtc/bin" "go2rtc_linux_amd64"
msg_info "Installing Tempio"
sed -i 's|/rootfs/usr/local|/usr/local|g' /opt/frigate/docker/main/install_tempio.sh
$STD bash /opt/frigate/docker/main/install_tempio.sh
ln -sf /usr/local/tempio/bin/tempio /usr/local/bin/tempio
msg_ok "Installed Tempio"
msg_info "Building libUSB"
fetch_and_deploy_gh_release "libusb" "libusb/libusb" "tarball" "v1.0.26" "/opt/libusb"
cd /opt/libusb
$STD ./bootstrap.sh
$STD ./configure CC='ccache gcc' CCX='ccache g++' --disable-udev --enable-shared
$STD make -j "$(nproc)"
cd /opt/libusb/libusb
mkdir -p /usr/local/lib /usr/local/include/libusb-1.0 /usr/local/lib/pkgconfig
$STD bash ../libtool --mode=install /usr/bin/install -c libusb-1.0.la /usr/local/lib
install -c -m 644 libusb.h /usr/local/include/libusb-1.0
cd /opt/libusb/
install -c -m 644 libusb-1.0.pc /usr/local/lib/pkgconfig
ldconfig
msg_ok "Built libUSB"
msg_info "Installing Python Dependencies"
$STD pip3 install -r /opt/frigate/docker/main/requirements.txt
msg_ok "Installed Python Dependencies"
msg_info "Building Python Wheels (Patience)"
mkdir -p /wheels
sed -i 's|^SQLITE3_VERSION=.*|SQLITE3_VERSION="version-3.46.0"|g' /opt/frigate/docker/main/build_pysqlite3.sh
$STD bash /opt/frigate/docker/main/build_pysqlite3.sh
for i in {1..3}; do
$STD pip3 wheel --wheel-dir=/wheels -r /opt/frigate/docker/main/requirements-wheels.txt --default-timeout=300 --retries=3 && break
[[ $i -lt 3 ]] && sleep 10
done
msg_ok "Built Python Wheels"
NODE_VERSION="22" NODE_MODULE="yarn" setup_nodejs
msg_info "Downloading Inference Models"
mkdir -p /models /openvino-model
wget -q -O /edgetpu_model.tflite https://github.com/google-coral/test_data/raw/release-frogfish/ssdlite_mobiledet_coco_qat_postprocess_edgetpu.tflite
wget -q -O /models/cpu_model.tflite https://github.com/google-coral/test_data/raw/release-frogfish/ssdlite_mobiledet_coco_qat_postprocess.tflite
cp /opt/frigate/labelmap.txt /labelmap.txt
msg_ok "Downloaded Inference Models"
msg_info "Downloading Audio Model"
wget -q -O /tmp/yamnet.tar.gz https://www.kaggle.com/api/v1/models/google/yamnet/tfLite/classification-tflite/1/download
$STD tar xzf /tmp/yamnet.tar.gz -C /
mv /1.tflite /cpu_audio_model.tflite
cp /opt/frigate/audio-labelmap.txt /audio-labelmap.txt
rm -f /tmp/yamnet.tar.gz
msg_ok "Downloaded Audio Model"
msg_info "Installing HailoRT Runtime"
$STD bash /opt/frigate/docker/main/install_hailort.sh
msg_info "Installing Frigate v0.14.1 (Perseverance)"
cd ~
mkdir -p /opt/frigate/models
curl -fsSL "https://github.com/blakeblackshear/frigate/archive/refs/tags/v0.14.1.tar.gz" -o "frigate.tar.gz"
tar -xzf frigate.tar.gz -C /opt/frigate --strip-components 1
rm -rf frigate.tar.gz
cd /opt/frigate
$STD pip3 wheel --wheel-dir=/wheels -r /opt/frigate/docker/main/requirements-wheels.txt
cp -a /opt/frigate/docker/main/rootfs/. /
sed -i '/^.*unset DEBIAN_FRONTEND.*$/d' /opt/frigate/docker/main/install_deps.sh
echo "libedgetpu1-max libedgetpu/accepted-eula boolean true" | debconf-set-selections
echo "libedgetpu1-max libedgetpu/install-confirm-max boolean true" | debconf-set-selections
$STD bash /opt/frigate/docker/main/install_deps.sh
export TARGETARCH="amd64"
echo 'libc6 libraries/restart-without-asking boolean true' | debconf-set-selections
$STD /opt/frigate/docker/main/install_deps.sh
$STD apt update
$STD ln -svf /usr/lib/btbn-ffmpeg/bin/ffmpeg /usr/local/bin/ffmpeg
$STD ln -svf /usr/lib/btbn-ffmpeg/bin/ffprobe /usr/local/bin/ffprobe
$STD pip3 install -U /wheels/*.whl
ldconfig
msg_ok "Installed HailoRT Runtime"
msg_info "Installing OpenVino"
$STD pip3 install -r /opt/frigate/docker/main/requirements-ov.txt
msg_ok "Installed OpenVino"
msg_info "Building OpenVino Model"
cd /models
wget -q http://download.tensorflow.org/models/object_detection/ssdlite_mobilenet_v2_coco_2018_05_09.tar.gz
$STD tar -zxf ssdlite_mobilenet_v2_coco_2018_05_09.tar.gz --no-same-owner
if python3 /opt/frigate/docker/main/build_ov_model.py 2>&1; then
cp /models/ssdlite_mobilenet_v2.xml /openvino-model/
cp /models/ssdlite_mobilenet_v2.bin /openvino-model/
wget -q https://github.com/openvinotoolkit/open_model_zoo/raw/master/data/dataset_classes/coco_91cl_bkgr.txt -O /openvino-model/coco_91cl_bkgr.txt
sed -i 's/truck/car/g' /openvino-model/coco_91cl_bkgr.txt
msg_ok "Built OpenVino Model"
else
msg_warn "OpenVino build failed (CPU may not support required instructions). Frigate will use CPU model."
fi
msg_info "Building Frigate Application (Patience)"
cd /opt/frigate
$STD pip3 install -r /opt/frigate/docker/main/requirements-dev.txt
$STD bash /opt/frigate/.devcontainer/initialize.sh
$STD /opt/frigate/.devcontainer/initialize.sh
$STD make version
cd /opt/frigate/web
$STD npm install
$STD npm run build
cp -r /opt/frigate/web/dist/* /opt/frigate/web/
sed -i '/^s6-svc -O \.$/s/^/#/' /opt/frigate/docker/main/rootfs/etc/s6-overlay/s6-rc.d/frigate/run
msg_ok "Built Frigate Application"
msg_info "Configuring Frigate"
mkdir -p /config /media/frigate
cp -r /opt/frigate/config/. /config
curl -fsSL "https://github.com/intel-iot-devkit/sample-videos/raw/master/person-bicycle-car-detection.mp4" -o "/media/frigate/person-bicycle-car-detection.mp4"
echo "tmpfs /tmp/cache tmpfs defaults 0 0" >>/etc/fstab
cat <<EOF >/etc/frigate.env
DEFAULT_FFMPEG_VERSION="7.0"
INCLUDED_FFMPEG_VERSIONS="7.0:5.0"
EOF
sed -i '/^s6-svc -O \.$/s/^/#/' /opt/frigate/docker/main/rootfs/etc/s6-overlay/s6-rc.d/frigate/run
cat <<EOF >/config/config.yml
mqtt:
enabled: false
cameras:
test:
ffmpeg:
#hwaccel_args: preset-vaapi
inputs:
- path: /media/frigate/person-bicycle-car-detection.mp4
input_args: -re -stream_loop -1 -fflags +genpts
@@ -239,42 +78,96 @@ cameras:
height: 1080
width: 1920
fps: 5
auth:
enabled: false
detect:
enabled: false
EOF
ln -sf /config/config.yml /opt/frigate/config/config.yml
if [[ "$CTTYPE" == "0" ]]; then
sed -i -e 's/^kvm:x:104:$/render:x:104:root,frigate/' -e 's/^render:x:105:root$/kvm:x:105:/' /etc/group
else
sed -i -e 's/^kvm:x:104:$/render:x:104:frigate/' -e 's/^render:x:105:$/kvm:x:105:/' /etc/group
fi
echo "tmpfs /tmp/cache tmpfs defaults 0 0" >>/etc/fstab
msg_ok "Installed Frigate"
if grep -q -o -m1 -E 'avx[^ ]*|sse4_2' /proc/cpuinfo; then
if grep -q -o -m1 -E 'avx[^ ]*' /proc/cpuinfo; then
msg_ok "AVX Support Detected"
msg_info "Installing Openvino Object Detection Model (Resilience)"
$STD pip install -r /opt/frigate/docker/main/requirements-ov.txt
cd /opt/frigate/models
export ENABLE_ANALYTICS=NO
$STD /usr/local/bin/omz_downloader --name ssdlite_mobilenet_v2 --num_attempts 2
$STD /usr/local/bin/omz_converter --name ssdlite_mobilenet_v2 --precision FP16 --mo /usr/local/bin/mo
cd /
cp -r /opt/frigate/models/public/ssdlite_mobilenet_v2 openvino-model
curl -fsSL "https://github.com/openvinotoolkit/open_model_zoo/raw/master/data/dataset_classes/coco_91cl_bkgr.txt" -o "openvino-model/coco_91cl_bkgr.txt"
sed -i 's/truck/car/g' openvino-model/coco_91cl_bkgr.txt
cat <<EOF >>/config/config.yml
ffmpeg:
hwaccel_args: auto
detectors:
detector01:
ov:
type: openvino
device: CPU
model:
path: /openvino-model/FP16/ssdlite_mobilenet_v2.xml
model:
width: 300
height: 300
input_tensor: nhwc
input_pixel_format: bgr
path: /openvino-model/ssdlite_mobilenet_v2.xml
labelmap_path: /openvino-model/coco_91cl_bkgr.txt
EOF
msg_ok "Installed Openvino Object Detection Model"
else
cat <<EOF >>/config/config.yml
ffmpeg:
hwaccel_args: auto
model:
path: /cpu_model.tflite
EOF
fi
msg_ok "Configured Frigate"
msg_info "Installing Coral Object Detection Model (Patience)"
cd /opt/frigate
export CCACHE_DIR=/root/.ccache
export CCACHE_MAXSIZE=2G
curl -fsSL "https://github.com/libusb/libusb/archive/v1.0.26.zip" -o "v1.0.26.zip"
$STD unzip v1.0.26.zip
rm v1.0.26.zip
cd libusb-1.0.26
$STD ./bootstrap.sh
$STD ./configure --disable-udev --enable-shared
$STD make -j $(nproc --all)
cd /opt/frigate/libusb-1.0.26/libusb
mkdir -p /usr/local/lib
$STD /bin/bash ../libtool --mode=install /usr/bin/install -c libusb-1.0.la '/usr/local/lib'
mkdir -p /usr/local/include/libusb-1.0
$STD /usr/bin/install -c -m 644 libusb.h '/usr/local/include/libusb-1.0'
ldconfig
cd /
curl -fsSL "https://github.com/google-coral/test_data/raw/release-frogfish/ssdlite_mobiledet_coco_qat_postprocess_edgetpu.tflite" -o "edgetpu_model.tflite"
curl -fsSL "https://github.com/google-coral/test_data/raw/release-frogfish/ssdlite_mobiledet_coco_qat_postprocess.tflite" -o "cpu_model.tflite"
cp /opt/frigate/labelmap.txt /labelmap.txt
curl -fsSL "https://www.kaggle.com/api/v1/models/google/yamnet/tfLite/classification-tflite/1/download" -o "yamnet-tflite-classification-tflite-v1.tar.gz"
tar xzf yamnet-tflite-classification-tflite-v1.tar.gz
rm -rf yamnet-tflite-classification-tflite-v1.tar.gz
mv 1.tflite cpu_audio_model.tflite
cp /opt/frigate/audio-labelmap.txt /audio-labelmap.txt
mkdir -p /media/frigate
curl -fsSL "https://github.com/intel-iot-devkit/sample-videos/raw/master/person-bicycle-car-detection.mp4" -o "/media/frigate/person-bicycle-car-detection.mp4"
msg_ok "Installed Coral Object Detection Model"
msg_info "Building Nginx with Custom Modules"
$STD /opt/frigate/docker/main/build_nginx.sh
sed -e '/s6-notifyoncheck/ s/^#*/#/' -i /opt/frigate/docker/main/rootfs/etc/s6-overlay/s6-rc.d/nginx/run
ln -sf /usr/local/nginx/sbin/nginx /usr/local/bin/nginx
msg_ok "Built Nginx"
msg_info "Installing Tempio"
sed -i 's|/rootfs/usr/local|/usr/local|g' /opt/frigate/docker/main/install_tempio.sh
$STD /opt/frigate/docker/main/install_tempio.sh
ln -sf /usr/local/tempio/bin/tempio /usr/local/bin/tempio
msg_ok "Installed Tempio"
msg_info "Creating Services"
cat <<EOF >/etc/systemd/system/create_directories.service
[Unit]
Description=Create necessary directories for Frigate logs
Before=frigate.service go2rtc.service nginx.service
Description=Create necessary directories for logs
[Service]
Type=oneshot
@@ -283,11 +176,13 @@ ExecStart=/bin/bash -c '/bin/mkdir -p /dev/shm/logs/{frigate,go2rtc,nginx} && /b
[Install]
WantedBy=multi-user.target
EOF
systemctl enable -q --now create_directories
sleep 3
cat <<EOF >/etc/systemd/system/go2rtc.service
[Unit]
Description=go2rtc streaming service
After=network.target create_directories.service
Description=go2rtc service
After=network.target
After=create_directories.service
StartLimitIntervalSec=0
[Service]
@@ -295,8 +190,7 @@ Type=simple
Restart=always
RestartSec=1
User=root
EnvironmentFile=/etc/frigate.env
ExecStartPre=+rm -f /dev/shm/logs/go2rtc/current
ExecStartPre=+rm /dev/shm/logs/go2rtc/current
ExecStart=/bin/bash -c "bash /opt/frigate/docker/main/rootfs/etc/s6-overlay/s6-rc.d/go2rtc/run 2> >(/usr/bin/ts '%%Y-%%m-%%d %%H:%%M:%%.S ' >&2) | /usr/bin/ts '%%Y-%%m-%%d %%H:%%M:%%.S '"
StandardOutput=file:/dev/shm/logs/go2rtc/current
StandardError=file:/dev/shm/logs/go2rtc/current
@@ -304,11 +198,13 @@ StandardError=file:/dev/shm/logs/go2rtc/current
[Install]
WantedBy=multi-user.target
EOF
systemctl enable -q --now go2rtc
sleep 3
cat <<EOF >/etc/systemd/system/frigate.service
[Unit]
Description=Frigate NVR service
After=go2rtc.service create_directories.service
Description=Frigate service
After=go2rtc.service
After=create_directories.service
StartLimitIntervalSec=0
[Service]
@@ -316,8 +212,8 @@ Type=simple
Restart=always
RestartSec=1
User=root
EnvironmentFile=/etc/frigate.env
ExecStartPre=+rm -f /dev/shm/logs/frigate/current
# Environment=PLUS_API_KEY=
ExecStartPre=+rm /dev/shm/logs/frigate/current
ExecStart=/bin/bash -c "bash /opt/frigate/docker/main/rootfs/etc/s6-overlay/s6-rc.d/frigate/run 2> >(/usr/bin/ts '%%Y-%%m-%%d %%H:%%M:%%.S ' >&2) | /usr/bin/ts '%%Y-%%m-%%d %%H:%%M:%%.S '"
StandardOutput=file:/dev/shm/logs/frigate/current
StandardError=file:/dev/shm/logs/frigate/current
@@ -325,11 +221,13 @@ StandardError=file:/dev/shm/logs/frigate/current
[Install]
WantedBy=multi-user.target
EOF
systemctl enable -q --now frigate
sleep 3
cat <<EOF >/etc/systemd/system/nginx.service
[Unit]
Description=Nginx reverse proxy for Frigate
After=frigate.service create_directories.service
Description=Nginx service
After=frigate.service
After=create_directories.service
StartLimitIntervalSec=0
[Service]
@@ -337,7 +235,7 @@ Type=simple
Restart=always
RestartSec=1
User=root
ExecStartPre=+rm -f /dev/shm/logs/nginx/current
ExecStartPre=+rm /dev/shm/logs/nginx/current
ExecStart=/bin/bash -c "bash /opt/frigate/docker/main/rootfs/etc/s6-overlay/s6-rc.d/nginx/run 2> >(/usr/bin/ts '%%Y-%%m-%%d %%H:%%M:%%.S ' >&2) | /usr/bin/ts '%%Y-%%m-%%d %%H:%%M:%%.S '"
StandardOutput=file:/dev/shm/logs/nginx/current
StandardError=file:/dev/shm/logs/nginx/current
@@ -345,20 +243,8 @@ StandardError=file:/dev/shm/logs/nginx/current
[Install]
WantedBy=multi-user.target
EOF
systemctl daemon-reload
systemctl enable -q --now create_directories
sleep 2
systemctl enable -q --now go2rtc
sleep 2
systemctl enable -q --now frigate
sleep 2
systemctl enable -q --now nginx
msg_ok "Created Services"
msg_info "Cleaning Up"
rm -rf /opt/libusb /wheels /models/*.tar.gz
msg_ok "Cleaned Up"
msg_ok "Configured Services"
motd_ssh
customize

View File

@@ -24,7 +24,7 @@ setup_hwaccel
PYTHON_VERSION="3.12" setup_uv
msg_info "Installing Open WebUI"
$STD uv tool install --python 3.12 --constraint <(echo "numba>=0.60") open-webui[all]
$STD uv tool install --python 3.12 open-webui[all]
msg_ok "Installed Open WebUI"
read -r -p "${TAB3}Would you like to add Ollama? <y/N> " prompt

View File

@@ -36,7 +36,7 @@ $STD npm ci
$STD npm run set:sqlite
$STD npm run set:oss
rm -rf server/private
$STD npm run db:generate
$STD npm run db:sqlite:generate
$STD npm run build
$STD npm run build:cli
cp -R .next/standalone ./
@@ -178,7 +178,7 @@ http:
servers:
- url: "http://$LOCAL_IP:3000"
EOF
$STD npm run db:push
$STD npm run db:sqlite:push
. /etc/os-release
if [ "$VERSION_CODENAME" = "trixie" ]; then

View File

@@ -14,51 +14,42 @@ network_check
update_os
msg_info "Installing Dependencies"
$STD apt install -y apache2-utils
$STD apt install -y \
apache2-utils \
python3-pip \
python3-venv
msg_ok "Installed Dependencies"
PYTHON_VERSION="3.13" setup_uv
fetch_and_deploy_gh_release "Radicale" "Kozea/Radicale" "tarball" "latest" "/opt/radicale"
msg_info "Setting up Radicale"
cd /opt/radicale
python3 -m venv /opt/radicale
source /opt/radicale/bin/activate
$STD python3 -m pip install --upgrade https://github.com/Kozea/Radicale/archive/master.tar.gz
RNDPASS=$(openssl rand -base64 18 | tr -dc 'a-zA-Z0-9' | head -c13)
$STD htpasswd -c -b -5 /opt/radicale/users admin "$RNDPASS"
$STD htpasswd -c -b -5 /opt/radicale/users admin $RNDPASS
{
echo "Radicale Credentials"
echo "Admin User: admin"
echo "Admin Password: $RNDPASS"
} >>~/radicale.creds
msg_ok "Done setting up Radicale"
mkdir -p /etc/radicale
cat <<EOF >/etc/radicale/config
[server]
hosts = 0.0.0.0:5232
msg_info "Setup Service"
[auth]
type = htpasswd
htpasswd_filename = /opt/radicale/users
htpasswd_encryption = sha512
[storage]
type = multifilesystem
filesystem_folder = /var/lib/radicale/collections
[web]
type = internal
cat <<EOF >/opt/radicale/start.sh
#!/usr/bin/env bash
source /opt/radicale/bin/activate
python3 -m radicale --storage-filesystem-folder=/var/lib/radicale/collections --hosts 0.0.0.0:5232 --auth-type htpasswd --auth-htpasswd-filename /opt/radicale/users --auth-htpasswd-encryption sha512
EOF
msg_ok "Set up Radicale"
msg_info "Creating Service"
chmod +x /opt/radicale/start.sh
cat <<EOF >/etc/systemd/system/radicale.service
[Unit]
Description=A simple CalDAV (calendar) and CardDAV (contact) server
After=network.target
Requires=network.target
[Service]
WorkingDirectory=/opt/radicale
ExecStart=/usr/local/bin/uv run -m radicale --config /etc/radicale/config
ExecStart=/opt/radicale/start.sh
Restart=on-failure
# User=radicale
# Deny other users access to the calendar data

View File

@@ -15,18 +15,16 @@ update_os
msg_info "Installing Dependencies"
$STD apt install -y apt-transport-https
curl -fsSL "https://dl.ui.com/unifi/unifi-repo.gpg" -o "/usr/share/keyrings/unifi-repo.gpg"
cat <<EOF | sudo tee /etc/apt/sources.list.d/100-ubnt-unifi.sources >/dev/null
Types: deb
URIs: https://www.ui.com/downloads/unifi/debian
Suites: stable
Components: ubiquiti
Architectures: amd64
Signed-By: /usr/share/keyrings/unifi-repo.gpg
EOF
$STD apt update
msg_ok "Installed Dependencies"
setup_deb822_repo \
"unifi" \
"https://dl.ui.com/unifi/unifi-repo.gpg" \
"https://www.ui.com/downloads/unifi/debian" \
"stable" \
"ubiquiti" \
"amd64"
JAVA_VERSION="21" setup_java
if lscpu | grep -q 'avx'; then

View File

@@ -153,7 +153,7 @@ explain_exit_code() {
126) echo "Command invoked cannot execute (permission problem?)" ;;
127) echo "Command not found" ;;
128) echo "Invalid argument to exit" ;;
130) echo "Aborted by user (SIGINT)" ;;
130) echo "Terminated by Ctrl+C (SIGINT)" ;;
134) echo "Process aborted (SIGABRT - possibly Node.js heap overflow)" ;;
137) echo "Killed (SIGKILL / Out of memory?)" ;;
139) echo "Segmentation fault (core dumped)" ;;
@@ -233,43 +233,6 @@ explain_exit_code() {
esac
}
# ------------------------------------------------------------------------------
# json_escape()
#
# - Escapes a string for safe JSON embedding
# - Handles backslashes, quotes, newlines, tabs, and carriage returns
# ------------------------------------------------------------------------------
json_escape() {
local s="$1"
s=${s//\\/\\\\}
s=${s//"/\\"}
s=${s//$'\n'/\\n}
s=${s//$'\r'/}
s=${s//$'\t'/\\t}
echo "$s"
}
# ------------------------------------------------------------------------------
# get_error_text()
#
# - Returns last 20 lines of the active log (INSTALL_LOG or BUILD_LOG)
# - Falls back to empty string if no log is available
# ------------------------------------------------------------------------------
get_error_text() {
local logfile=""
if declare -f get_active_logfile >/dev/null 2>&1; then
logfile=$(get_active_logfile)
elif [[ -n "${INSTALL_LOG:-}" ]]; then
logfile="$INSTALL_LOG"
elif [[ -n "${BUILD_LOG:-}" ]]; then
logfile="$BUILD_LOG"
fi
if [[ -n "$logfile" && -s "$logfile" ]]; then
tail -n 20 "$logfile" 2>/dev/null | sed 's/\r$//'
fi
}
# ==============================================================================
# SECTION 2: TELEMETRY FUNCTIONS
# ==============================================================================
@@ -390,9 +353,6 @@ detect_ram() {
# - Never blocks or fails script execution
# ------------------------------------------------------------------------------
post_to_api() {
# Prevent duplicate submissions (post_to_api is called from multiple places)
[[ "${POST_TO_API_DONE:-}" == "true" ]] && return 0
# Silent fail - telemetry should never break scripts
command -v curl &>/dev/null || {
[[ "${DEV_MODE:-}" == "true" ]] && echo "[DEBUG] curl not found, skipping" >&2
@@ -480,8 +440,6 @@ EOF
-H "Content-Type: application/json" \
-d "$JSON_PAYLOAD" &>/dev/null || true
fi
POST_TO_API_DONE=true
}
# ------------------------------------------------------------------------------
@@ -493,7 +451,6 @@ EOF
# * ct_type=2 (VM instead of LXC)
# * type="vm"
# * Disk size without 'G' suffix
# - Includes hardware detection: CPU, GPU, RAM speed
# - Only executes if DIAGNOSTICS=yes and RANDOM_UUID is set
# - Never blocks or fails script execution
# ------------------------------------------------------------------------------
@@ -516,27 +473,6 @@ post_to_api_vm() {
pve_version=$(pveversion 2>/dev/null | awk -F'[/ ]' '{print $2}') || true
fi
# Detect GPU if not already set
if [[ -z "${GPU_VENDOR:-}" ]]; then
detect_gpu
fi
local gpu_vendor="${GPU_VENDOR:-unknown}"
local gpu_model="${GPU_MODEL:-}"
local gpu_passthrough="${GPU_PASSTHROUGH:-unknown}"
# Detect CPU if not already set
if [[ -z "${CPU_VENDOR:-}" ]]; then
detect_cpu
fi
local cpu_vendor="${CPU_VENDOR:-unknown}"
local cpu_model="${CPU_MODEL:-}"
# Detect RAM if not already set
if [[ -z "${RAM_SPEED:-}" ]]; then
detect_ram
fi
local ram_speed="${RAM_SPEED:-}"
# Remove 'G' suffix from disk size
local DISK_SIZE_API="${DISK_SIZE%G}"
@@ -556,12 +492,6 @@ post_to_api_vm() {
"os_version": "${var_version:-}",
"pve_version": "${pve_version}",
"method": "${METHOD:-default}",
"cpu_vendor": "${cpu_vendor}",
"cpu_model": "${cpu_model}",
"gpu_vendor": "${gpu_vendor}",
"gpu_model": "${gpu_model}",
"gpu_passthrough": "${gpu_passthrough}",
"ram_speed": "${ram_speed}",
"repo_source": "${REPO_SOURCE}"
}
EOF
@@ -638,13 +568,7 @@ post_update_to_api() {
else
exit_code=1
fi
local error_text=""
error_text=$(get_error_text)
if [[ -n "$error_text" ]]; then
error=$(json_escape "$error_text")
else
error=$(json_escape "$(explain_exit_code "$exit_code")")
fi
error=$(explain_exit_code "$exit_code")
error_category=$(categorize_error "$exit_code")
[[ -z "$error" ]] && error="Unknown error"
fi
@@ -734,9 +658,6 @@ categorize_error() {
# Configuration errors
203 | 204 | 205 | 206 | 207 | 208) echo "config" ;;
# Aborted by user
130) echo "aborted" ;;
# Resource errors (OOM, etc)
137 | 134) echo "resource" ;;
@@ -801,13 +722,7 @@ post_tool_to_api() {
if [[ "$status" == "failed" ]]; then
[[ ! "$exit_code" =~ ^[0-9]+$ ]] && exit_code=1
local error_text=""
error_text=$(get_error_text)
if [[ -n "$error_text" ]]; then
error=$(json_escape "$error_text")
else
error=$(json_escape "$(explain_exit_code "$exit_code")")
fi
error=$(explain_exit_code "$exit_code")
error_category=$(categorize_error "$exit_code")
fi
@@ -868,13 +783,7 @@ post_addon_to_api() {
if [[ "$status" == "failed" ]]; then
[[ ! "$exit_code" =~ ^[0-9]+$ ]] && exit_code=1
local error_text=""
error_text=$(get_error_text)
if [[ -n "$error_text" ]]; then
error=$(json_escape "$error_text")
else
error=$(json_escape "$(explain_exit_code "$exit_code")")
fi
error=$(explain_exit_code "$exit_code")
error_category=$(categorize_error "$exit_code")
fi
@@ -967,13 +876,7 @@ post_update_to_api_extended() {
else
exit_code=1
fi
local error_text=""
error_text=$(get_error_text)
if [[ -n "$error_text" ]]; then
error=$(json_escape "$error_text")
else
error=$(json_escape "$(explain_exit_code "$exit_code")")
fi
error=$(explain_exit_code "$exit_code")
error_category=$(categorize_error "$exit_code")
[[ -z "$error" ]] && error="Unknown error"
fi

View File

@@ -4046,10 +4046,12 @@ EOF'
if [[ $install_exit_code -ne 0 ]]; then
msg_error "Installation failed in container ${CTID} (exit code: ${install_exit_code})"
# Copy install log from container BEFORE API call so get_error_text() can read it
# Report failure to telemetry API
post_update_to_api "failed" "$install_exit_code"
# Copy both logs from container before potential deletion
local build_log_copied=false
local install_log_copied=false
local host_install_log="/tmp/install-lxc-${CTID}-${SESSION_ID}.log"
if [[ -n "$CTID" && -n "${SESSION_ID:-}" ]]; then
# Copy BUILD_LOG (creation log) if it exists
@@ -4057,22 +4059,15 @@ EOF'
cp "${BUILD_LOG}" "/tmp/create-lxc-${CTID}-${SESSION_ID}.log" 2>/dev/null && build_log_copied=true
fi
# Copy INSTALL_LOG from container to host
if pct pull "$CTID" "/root/.install-${SESSION_ID}.log" "$host_install_log" 2>/dev/null; then
# Copy INSTALL_LOG from container
if pct pull "$CTID" "/root/.install-${SESSION_ID}.log" "/tmp/install-lxc-${CTID}-${SESSION_ID}.log" 2>/dev/null; then
install_log_copied=true
# Point INSTALL_LOG to host copy so get_error_text() finds it
INSTALL_LOG="$host_install_log"
fi
fi
# Report failure to telemetry API (now with log available on host)
post_update_to_api "failed" "$install_exit_code"
# Show available logs
if [[ -n "$CTID" && -n "${SESSION_ID:-}" ]]; then
# Show available logs
echo ""
[[ "$build_log_copied" == true ]] && echo -e "${GN}${CL} Container creation log: ${BL}/tmp/create-lxc-${CTID}-${SESSION_ID}.log${CL}"
[[ "$install_log_copied" == true ]] && echo -e "${GN}${CL} Installation log: ${BL}${host_install_log}${CL}"
[[ "$install_log_copied" == true ]] && echo -e "${GN}${CL} Installation log: ${BL}/tmp/install-lxc-${CTID}-${SESSION_ID}.log${CL}"
fi
# Dev mode: Keep container or open breakpoint shell

View File

@@ -243,18 +243,6 @@ error_handler() {
# ------------------------------------------------------------------------------
on_exit() {
local exit_code=$?
# Report orphaned "installing" records to telemetry API
# Catches ALL exit paths: errors (non-zero), signals, AND clean exits where
# post_to_api was called ("installing" sent) but post_update_to_api was never called
if [[ "${POST_TO_API_DONE:-}" == "true" && "${POST_UPDATE_DONE:-}" != "true" ]]; then
if declare -f post_update_to_api >/dev/null 2>&1; then
if [[ $exit_code -ne 0 ]]; then
post_update_to_api "failed" "$exit_code"
else
post_update_to_api "failed" "1"
fi
fi
fi
[[ -n "${lockfile:-}" && -e "$lockfile" ]] && rm -f "$lockfile"
exit "$exit_code"
}
@@ -267,10 +255,6 @@ on_exit() {
# - Exits with code 130 (128 + SIGINT=2)
# ------------------------------------------------------------------------------
on_interrupt() {
# Report interruption to telemetry API (prevents stuck "installing" records)
if declare -f post_update_to_api >/dev/null 2>&1; then
post_update_to_api "failed" "130"
fi
if declare -f msg_error >/dev/null 2>&1; then
msg_error "Interrupted by user (SIGINT)"
else
@@ -288,10 +272,6 @@ on_interrupt() {
# - Triggered by external process termination
# ------------------------------------------------------------------------------
on_terminate() {
# Report termination to telemetry API (prevents stuck "installing" records)
if declare -f post_update_to_api >/dev/null 2>&1; then
post_update_to_api "failed" "143"
fi
if declare -f msg_error >/dev/null 2>&1; then
msg_error "Terminated by signal (SIGTERM)"
else

View File

@@ -465,7 +465,6 @@ manage_tool_repository() {
msg_error "Failed to download MongoDB GPG key"
return 1
fi
chmod 644 "/etc/apt/keyrings/mongodb-server-${version}.gpg"
# Setup repository
local distro_codename
@@ -1295,33 +1294,12 @@ setup_deb822_repo() {
return 1
}
# Import GPG key (auto-detect binary vs ASCII-armored format)
local tmp_gpg
tmp_gpg=$(mktemp) || return 1
curl -fsSL "$gpg_url" -o "$tmp_gpg" || {
msg_error "Failed to download GPG key for ${name}"
rm -f "$tmp_gpg"
# Import GPG
curl -fsSL "$gpg_url" | gpg --dearmor --yes -o "/etc/apt/keyrings/${name}.gpg" || {
msg_error "Failed to import GPG key for ${name}"
return 1
}
if grep -q "BEGIN PGP" "$tmp_gpg" 2>/dev/null; then
# ASCII-armored — dearmor to binary
gpg --dearmor --yes -o "/etc/apt/keyrings/${name}.gpg" < "$tmp_gpg" || {
msg_error "Failed to dearmor GPG key for ${name}"
rm -f "$tmp_gpg"
return 1
}
else
# Already in binary GPG format — copy directly
cp "$tmp_gpg" "/etc/apt/keyrings/${name}.gpg" || {
msg_error "Failed to install GPG key for ${name}"
rm -f "$tmp_gpg"
return 1
}
fi
rm -f "$tmp_gpg"
chmod 644 "/etc/apt/keyrings/${name}.gpg"
# Write deb822
{
echo "Types: deb"

View File

@@ -75,37 +75,14 @@ pct exec "$CTID" -- bash -c '
set -e
export DEBIAN_FRONTEND=noninteractive
# Source os-release properly (handles quoted values)
source /etc/os-release
ID=$(grep "^ID=" /etc/os-release | cut -d"=" -f2)
VER=$(grep "^VERSION_CODENAME=" /etc/os-release | cut -d"=" -f2)
# Fallback if DNS is poisoned or blocked
# fallback if DNS is poisoned or blocked
ORIG_RESOLV="/etc/resolv.conf"
BACKUP_RESOLV="/tmp/resolv.conf.backup"
# Check DNS resolution using multiple methods (dig may not be installed)
dns_check_failed=true
if command -v dig &>/dev/null; then
if dig +short pkgs.tailscale.com 2>/dev/null | grep -qvE "^127\.|^0\.0\.0\.0$|^$"; then
dns_check_failed=false
fi
elif command -v host &>/dev/null; then
if host pkgs.tailscale.com 2>/dev/null | grep -q "has address"; then
dns_check_failed=false
fi
elif command -v nslookup &>/dev/null; then
if nslookup pkgs.tailscale.com 2>/dev/null | grep -q "Address:"; then
dns_check_failed=false
fi
elif command -v getent &>/dev/null; then
if getent hosts pkgs.tailscale.com &>/dev/null; then
dns_check_failed=false
fi
else
# No DNS tools available, try curl directly and assume DNS works
dns_check_failed=false
fi
if $dns_check_failed; then
if ! dig +short pkgs.tailscale.com | grep -qvE "^127\.|^0\.0\.0\.0$"; then
echo "[INFO] DNS resolution for pkgs.tailscale.com failed (blocked or redirected)."
echo "[INFO] Temporarily overriding /etc/resolv.conf with Cloudflare DNS (1.1.1.1)"
cp "$ORIG_RESOLV" "$BACKUP_RESOLV"
@@ -115,22 +92,17 @@ fi
if ! command -v curl &>/dev/null; then
echo "[INFO] curl not found, installing..."
apt-get update -qq
apt update -qq
apt install -y curl >/dev/null
apt-get install -y curl >/dev/null
fi
# Ensure keyrings directory exists
mkdir -p /usr/share/keyrings
curl -fsSL "https://pkgs.tailscale.com/stable/${ID}/${VERSION_CODENAME}.noarmor.gpg" \
curl -fsSL https://pkgs.tailscale.com/stable/${ID}/${VER}.noarmor.gpg \
| tee /usr/share/keyrings/tailscale-archive-keyring.gpg >/dev/null
echo "deb [signed-by=/usr/share/keyrings/tailscale-archive-keyring.gpg] https://pkgs.tailscale.com/stable/${ID} ${VERSION_CODENAME} main" \
echo "deb [signed-by=/usr/share/keyrings/tailscale-archive-keyring.gpg] https://pkgs.tailscale.com/stable/${ID} ${VER} main" \
>/etc/apt/sources.list.d/tailscale.list
apt-get update -qq
apt update -qq
apt install -y tailscale >/dev/null
apt-get install -y tailscale >/dev/null
if [[ -f /tmp/resolv.conf.backup ]]; then
echo "[INFO] Restoring original /etc/resolv.conf"

View File

@@ -70,7 +70,7 @@ function error_handler() {
local line_number="$1"
local command="$2"
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
post_update_to_api "failed" "${exit_code}"
post_update_to_api "failed" "${command}"
echo -e "\n$error_message\n"
cleanup_vmid
}
@@ -203,6 +203,7 @@ function exit-script() {
function default_settings() {
VMID=$(get_valid_nextid)
FORMAT=",efitype=4m"
MACHINE=""
DISK_SIZE="4G"
DISK_CACHE=""
@@ -258,9 +259,11 @@ function advanced_settings() {
3>&1 1>&2 2>&3); then
if [ "$MACH" = q35 ]; then
echo -e "${CONTAINERTYPE}${BOLD}${DGN}Machine Type: ${BGN}$MACH${CL}"
FORMAT=""
MACHINE=" -machine q35"
else
echo -e "${CONTAINERTYPE}${BOLD}${DGN}Machine Type: ${BGN}$MACH${CL}"
FORMAT=",efitype=4m"
MACHINE=""
fi
else
@@ -473,45 +476,31 @@ case $STORAGE_TYPE in
nfs | dir | cifs)
DISK_EXT=".qcow2"
DISK_REF="$VMID/"
DISK_IMPORT="--format qcow2"
DISK_IMPORT="-format qcow2"
THIN=""
;;
btrfs)
DISK_EXT=".raw"
DISK_REF="$VMID/"
DISK_IMPORT="--format raw"
DISK_IMPORT="-format raw"
FORMAT=",efitype=4m"
THIN=""
;;
*)
DISK_EXT=""
DISK_REF=""
DISK_IMPORT="--format raw"
;;
esac
for i in {0,1}; do
disk="DISK$i"
eval DISK"${i}"=vm-"${VMID}"-disk-"${i}"${DISK_EXT:-}
eval DISK"${i}"_REF="${STORAGE}":"${DISK_REF:-}"${!disk}
done
msg_info "Creating a Arch Linux VM"
qm create $VMID -agent 1${MACHINE} -tablet 0 -localtime 1 -bios ovmf${CPU_TYPE} -cores $CORE_COUNT -memory $RAM_SIZE \
-name $HN -tags community-script -net0 virtio,bridge=$BRG,macaddr=$MAC$VLAN$MTU -onboot 1 -ostype l26 -scsihw virtio-scsi-pci
if qm disk import --help >/dev/null 2>&1; then
IMPORT_CMD=(qm disk import)
else
IMPORT_CMD=(qm importdisk)
fi
IMPORT_OUT="$("${IMPORT_CMD[@]}" "$VMID" "${FILE}" "$STORAGE" ${DISK_IMPORT:-} 2>&1 || true)"
DISK_REF_IMPORTED="$(printf '%s\n' "$IMPORT_OUT" | sed -n "s/.*successfully imported disk '\([^']\+\)'.*/\1/p" | tr -d "\r\"'")"
[[ -z "$DISK_REF_IMPORTED" ]] && DISK_REF_IMPORTED="$(pvesm list "$STORAGE" | awk -v id="$VMID" '$5 ~ ("vm-"id"-disk-") {print $1":"$5}' | sort | tail -n1)"
[[ -z "$DISK_REF_IMPORTED" ]] && {
msg_error "Unable to determine imported disk reference."
echo "$IMPORT_OUT"
exit 1
}
msg_ok "Imported disk (${CL}${BL}${DISK_REF_IMPORTED}${CL})"
pvesm alloc $STORAGE $VMID $DISK0 4M 1>&/dev/null
qm importdisk $VMID ${FILE} $STORAGE ${DISK_IMPORT:-} 1>&/dev/null
qm set $VMID \
-efidisk0 ${STORAGE}:0,efitype=4m \
-scsi0 ${DISK_REF_IMPORTED},${DISK_CACHE}${THIN%,} \
-efidisk0 ${DISK0_REF}${FORMAT} \
-scsi0 ${DISK1_REF},${DISK_CACHE}${THIN}size=${DISK_SIZE} \
-ide2 ${STORAGE}:cloudinit \
-boot order=scsi0 \
-serial0 socket >/dev/null

View File

@@ -70,7 +70,7 @@ function error_handler() {
local line_number="$1"
local command="$2"
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
post_update_to_api "failed" "${exit_code}"
post_update_to_api "failed" "${command}"
echo -e "\n$error_message\n"
cleanup_vmid
}
@@ -201,17 +201,6 @@ function exit-script() {
exit
}
function select_cloud_init() {
if (whiptail --backtitle "Proxmox VE Helper Scripts" --title "CLOUD-INIT" \
--yesno "Enable Cloud-Init for VM configuration?\n\nCloud-Init allows automatic configuration of:\n- User accounts and passwords\n- SSH keys\n- Network settings (DHCP/Static)\n- DNS configuration\n\nYou can also configure these settings later in Proxmox UI.\n\nNote: Without Cloud-Init, the nocloud image will be used with console auto-login." --defaultno 18 68); then
CLOUD_INIT="yes"
echo -e "${CLOUD}${BOLD}${DGN}Cloud-Init: ${BGN}yes${CL}"
else
CLOUD_INIT="no"
echo -e "${CLOUD}${BOLD}${DGN}Cloud-Init: ${BGN}no${CL}"
fi
}
function default_settings() {
VMID=$(get_valid_nextid)
FORMAT=",efitype=4m"
@@ -227,6 +216,7 @@ function default_settings() {
VLAN=""
MTU=""
START_VM="yes"
CLOUD_INIT="no"
METHOD="default"
echo -e "${CONTAINERID}${BOLD}${DGN}Virtual Machine ID: ${BGN}${VMID}${CL}"
echo -e "${CONTAINERTYPE}${BOLD}${DGN}Machine Type: ${BGN}i440fx${CL}"
@@ -240,7 +230,7 @@ function default_settings() {
echo -e "${MACADDRESS}${BOLD}${DGN}MAC Address: ${BGN}${MAC}${CL}"
echo -e "${VLANTAG}${BOLD}${DGN}VLAN: ${BGN}Default${CL}"
echo -e "${DEFAULT}${BOLD}${DGN}Interface MTU Size: ${BGN}Default${CL}"
select_cloud_init
echo -e "${CLOUD}${BOLD}${DGN}Configure Cloud-init: ${BGN}no${CL}"
echo -e "${GATEWAY}${BOLD}${DGN}Start VM when completed: ${BGN}yes${CL}"
echo -e "${CREATING}${BOLD}${DGN}Creating a Debian 13 VM using the above default settings${CL}"
}
@@ -410,7 +400,13 @@ function advanced_settings() {
exit-script
fi
select_cloud_init
if (whiptail --backtitle "Proxmox VE Helper Scripts" --title "CLOUD-INIT" --yesno "Configure the VM with Cloud-init?" --defaultno 10 58); then
echo -e "${CLOUD}${BOLD}${DGN}Configure Cloud-init: ${BGN}yes${CL}"
CLOUD_INIT="yes"
else
echo -e "${CLOUD}${BOLD}${DGN}Configure Cloud-init: ${BGN}no${CL}"
CLOUD_INIT="no"
fi
if (whiptail --backtitle "Proxmox VE Helper Scripts" --title "START VIRTUAL MACHINE" --yesno "Start VM when completed?" 10 58); then
echo -e "${GATEWAY}${BOLD}${DGN}Start VM when completed: ${BGN}yes${CL}"
@@ -477,17 +473,6 @@ else
fi
msg_ok "Using ${CL}${BL}$STORAGE${CL} ${GN}for Storage Location."
msg_ok "Virtual Machine ID is ${CL}${BL}$VMID${CL}."
# ==============================================================================
# PREREQUISITES
# ==============================================================================
if ! command -v virt-customize &>/dev/null; then
msg_info "Installing libguestfs-tools"
apt-get update >/dev/null 2>&1
apt-get install -y libguestfs-tools >/dev/null 2>&1
msg_ok "Installed libguestfs-tools"
fi
msg_info "Retrieving the URL for the Debian 13 Qcow2 Disk Image"
if [ "$CLOUD_INIT" == "yes" ]; then
URL=https://cloud.debian.org/images/cloud/trixie/latest/debian-13-genericcloud-amd64.qcow2
@@ -501,50 +486,6 @@ echo -en "\e[1A\e[0K"
FILE=$(basename $URL)
msg_ok "Downloaded ${CL}${BL}${FILE}${CL}"
# ==============================================================================
# IMAGE CUSTOMIZATION
# ==============================================================================
msg_info "Customizing ${FILE} image"
WORK_FILE=$(mktemp --suffix=.qcow2)
cp "$FILE" "$WORK_FILE"
# Set hostname
virt-customize -q -a "$WORK_FILE" --hostname "${HN}" >/dev/null 2>&1
# Prepare for unique machine-id on first boot
virt-customize -q -a "$WORK_FILE" --run-command "truncate -s 0 /etc/machine-id" >/dev/null 2>&1
virt-customize -q -a "$WORK_FILE" --run-command "rm -f /var/lib/dbus/machine-id" >/dev/null 2>&1
# Disable systemd-firstboot to prevent interactive prompts blocking the console
virt-customize -q -a "$WORK_FILE" --run-command "systemctl disable systemd-firstboot.service 2>/dev/null; rm -f /etc/systemd/system/sysinit.target.wants/systemd-firstboot.service; ln -sf /dev/null /etc/systemd/system/systemd-firstboot.service" >/dev/null 2>&1 || true
# Pre-seed firstboot settings so it won't prompt even if triggered
virt-customize -q -a "$WORK_FILE" --run-command "echo 'Etc/UTC' > /etc/timezone && ln -sf /usr/share/zoneinfo/Etc/UTC /etc/localtime" >/dev/null 2>&1 || true
virt-customize -q -a "$WORK_FILE" --run-command "touch /etc/locale.conf" >/dev/null 2>&1 || true
if [ "$CLOUD_INIT" == "yes" ]; then
# Cloud-Init handles SSH and login
virt-customize -q -a "$WORK_FILE" --run-command "sed -i 's/^#*PermitRootLogin.*/PermitRootLogin yes/' /etc/ssh/sshd_config" >/dev/null 2>&1 || true
virt-customize -q -a "$WORK_FILE" --run-command "sed -i 's/^#*PasswordAuthentication.*/PasswordAuthentication yes/' /etc/ssh/sshd_config" >/dev/null 2>&1 || true
else
# Configure auto-login on serial console (ttyS0) and virtual console (tty1)
virt-customize -q -a "$WORK_FILE" --run-command "mkdir -p /etc/systemd/system/serial-getty@ttyS0.service.d" >/dev/null 2>&1 || true
virt-customize -q -a "$WORK_FILE" --run-command 'cat > /etc/systemd/system/serial-getty@ttyS0.service.d/autologin.conf << EOF
[Service]
ExecStart=
ExecStart=-/sbin/agetty --autologin root --noclear %I \$TERM
EOF' >/dev/null 2>&1 || true
virt-customize -q -a "$WORK_FILE" --run-command "mkdir -p /etc/systemd/system/getty@tty1.service.d" >/dev/null 2>&1 || true
virt-customize -q -a "$WORK_FILE" --run-command 'cat > /etc/systemd/system/getty@tty1.service.d/autologin.conf << EOF
[Service]
ExecStart=
ExecStart=-/sbin/agetty --autologin root --noclear %I \$TERM
EOF' >/dev/null 2>&1 || true
fi
msg_ok "Customized image"
STORAGE_TYPE=$(pvesm status -storage "$STORAGE" | awk 'NR>1 {print $2}')
case $STORAGE_TYPE in
nfs | dir)
@@ -560,11 +501,6 @@ btrfs)
FORMAT=",efitype=4m"
THIN=""
;;
*)
DISK_EXT=""
DISK_REF=""
DISK_IMPORT="-format raw"
;;
esac
for i in {0,1}; do
disk="DISK$i"
@@ -576,7 +512,7 @@ msg_info "Creating a Debian 13 VM"
qm create $VMID -agent 1${MACHINE} -tablet 0 -localtime 1 -bios ovmf${CPU_TYPE} -cores $CORE_COUNT -memory $RAM_SIZE \
-name $HN -tags community-script -net0 virtio,bridge=$BRG,macaddr=$MAC$VLAN$MTU -onboot 1 -ostype l26 -scsihw virtio-scsi-pci
pvesm alloc $STORAGE $VMID $DISK0 4M 1>&/dev/null
qm importdisk $VMID ${WORK_FILE} $STORAGE ${DISK_IMPORT:-} 1>&/dev/null
qm importdisk $VMID ${FILE} $STORAGE ${DISK_IMPORT:-} 1>&/dev/null
if [ "$CLOUD_INIT" == "yes" ]; then
qm set $VMID \
-efidisk0 ${DISK0_REF}${FORMAT} \
@@ -591,10 +527,6 @@ else
-boot order=scsi0 \
-serial0 socket >/dev/null
fi
# Clean up work file
rm -f "$WORK_FILE"
DESCRIPTION=$(
cat <<EOF
<div align='center'>

View File

@@ -70,7 +70,7 @@ function error_handler() {
local line_number="$1"
local command="$2"
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
post_update_to_api "failed" "${exit_code}"
post_update_to_api "failed" "${command}"
echo -e "\n$error_message\n"
cleanup_vmid
}
@@ -501,11 +501,6 @@ btrfs)
FORMAT=",efitype=4m"
THIN=""
;;
*)
DISK_EXT=""
DISK_REF=""
DISK_IMPORT="-format raw"
;;
esac
for i in {0,1}; do
disk="DISK$i"

View File

@@ -45,7 +45,7 @@ function error_handler() {
local line_number="$1"
local command="$2"
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
post_update_to_api "failed" "${exit_code}"
post_update_to_api "failed" "${command}"
echo -e "\n$error_message\n"
cleanup_vmid
}

View File

@@ -74,7 +74,7 @@ function error_handler() {
local line_number="$1"
local command="$2"
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
post_update_to_api "failed" "${exit_code}"
post_update_to_api "failed" "${command}"
echo -e "\n$error_message\n"
cleanup_vmid
}

View File

@@ -71,7 +71,7 @@ function error_handler() {
local line_number="$1"
local command="$2"
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
post_update_to_api "failed" "${exit_code}"
post_update_to_api "failed" "${command}"
echo -e "\n$error_message\n"
cleanup_vmid
}
@@ -566,11 +566,6 @@ zfspool)
DISK_REF=""
DISK_IMPORT="-format raw"
;;
*)
DISK_EXT=""
DISK_REF=""
DISK_IMPORT="-format raw"
;;
esac
DISK_VAR="vm-${VMID}-disk-0${DISK_EXT:-}"

View File

@@ -70,7 +70,7 @@ function error_handler() {
local line_number="$1"
local command="$2"
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
post_update_to_api "failed" "${exit_code}"
post_update_to_api "failed" "${command}"
echo -e "\n$error_message\n"
cleanup_vmid
}
@@ -487,11 +487,6 @@ btrfs)
FORMAT=",efitype=4m"
THIN=""
;;
*)
DISK_EXT=""
DISK_REF=""
DISK_IMPORT="-format raw"
;;
esac
for i in {0,1,2}; do
disk="DISK$i"

View File

@@ -74,7 +74,7 @@ function error_handler() {
local exit_code="$?"
local line_number="$1"
local command="$2"
post_update_to_api "failed" "$exit_code"
post_update_to_api "failed" "$command"
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
echo -e "\n$error_message\n"
cleanup_vmid

View File

@@ -48,7 +48,7 @@ function error_handler() {
local exit_code="$?"
local line_number="$1"
local command="$2"
post_update_to_api "failed" "$exit_code"
post_update_to_api "failed" "$command"
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
echo -e "\n$error_message\n"
cleanup_vmid
@@ -619,11 +619,6 @@ btrfs)
FORMAT=",efitype=4m"
THIN=""
;;
*)
DISK_EXT=""
DISK_REF=""
DISK_IMPORT="-format raw"
;;
esac
for i in {0,1}; do
disk="DISK$i"

View File

@@ -71,7 +71,7 @@ function error_handler() {
local line_number="$1"
local command="$2"
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
post_update_to_api "failed" "${exit_code}"
post_update_to_api "failed" "${command}"
echo -e "\n$error_message\n"
cleanup_vmid
}
@@ -500,11 +500,6 @@ btrfs)
FORMAT=",efitype=4m"
THIN=""
;;
*)
DISK_EXT=""
DISK_REF=""
DISK_IMPORT="-format raw"
;;
esac
for i in {0,1,2}; do
disk="DISK$i"

View File

@@ -79,7 +79,7 @@ function error_handler() {
local line_number="$1"
local command="$2"
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
post_update_to_api "failed" "${exit_code}"
post_update_to_api "failed" "${command}"
echo -e "\n$error_message\n"
cleanup_vmid
}
@@ -402,11 +402,6 @@ nfs | dir)
DISK_REF="$VMID/"
DISK_IMPORT="-format qcow2"
;;
*)
DISK_EXT=""
DISK_REF=""
DISK_IMPORT="-format raw"
;;
esac
for i in {0,1}; do
disk="DISK$i"

View File

@@ -66,7 +66,7 @@ function error_handler() {
local exit_code="$?"
local line_number="$1"
local command="$2"
post_update_to_api "failed" "$exit_code"
post_update_to_api "failed" "$command"
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
echo -e "\n$error_message\n"
cleanup_vmid
@@ -482,11 +482,6 @@ btrfs)
FORMAT=",efitype=4m"
THIN=""
;;
*)
DISK_EXT=""
DISK_REF=""
DISK_IMPORT="-format raw"
;;
esac
for i in {0,1}; do
disk="DISK$i"

View File

@@ -69,7 +69,7 @@ function error_handler() {
local exit_code="$?"
local line_number="$1"
local command="$2"
post_update_to_api "failed" "$exit_code"
post_update_to_api "failed" "$command"
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
echo -e "\n$error_message\n"
cleanup_vmid
@@ -484,11 +484,6 @@ btrfs)
FORMAT=",efitype=4m"
THIN=""
;;
*)
DISK_EXT=""
DISK_REF=""
DISK_IMPORT="-format raw"
;;
esac
for i in {0,1}; do
disk="DISK$i"

View File

@@ -68,7 +68,7 @@ function error_handler() {
local exit_code="$?"
local line_number="$1"
local command="$2"
post_update_to_api "failed" "$exit_code"
post_update_to_api "failed" "$command"
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
echo -e "\n$error_message\n"
cleanup_vmid
@@ -483,11 +483,6 @@ btrfs)
FORMAT=",efitype=4m"
THIN=""
;;
*)
DISK_EXT=""
DISK_REF=""
DISK_IMPORT="-format raw"
;;
esac
for i in {0,1}; do
disk="DISK$i"

View File

@@ -69,7 +69,7 @@ function error_handler() {
local line_number="$1"
local command="$2"
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
post_update_to_api "failed" "${exit_code}"
post_update_to_api "failed" "${command}"
echo -e "\n$error_message\n"
cleanup_vmid
}