mirror of
https://github.com/community-scripts/ProxmoxVE.git
synced 2026-02-14 17:23:25 +01:00
Compare commits
54 Commits
fix/teleme
...
automated/
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
fd003ed737 | ||
|
|
6b3653627c | ||
|
|
733ad75dc1 | ||
|
|
60aaaab3e7 | ||
|
|
1f735cb31f | ||
|
|
adcbe8dae2 | ||
|
|
bba520dbbf | ||
|
|
b43963d352 | ||
|
|
0957a23366 | ||
|
|
9f3588dd8d | ||
|
|
f23414a1a8 | ||
|
|
2a8bb76dcf | ||
|
|
bf85ef2a8b | ||
|
|
cc89cdbab1 | ||
|
|
d6f3f03f8a | ||
|
|
55e35d7f11 | ||
|
|
3b9f8d4a93 | ||
|
|
6c5377adec | ||
|
|
eeb349346b | ||
|
|
d271c16799 | ||
|
|
4774c54861 | ||
|
|
4bf63bae35 | ||
|
|
f2b7c9638d | ||
|
|
551f89e46f | ||
|
|
4f571a1eb6 | ||
|
|
3156e8e363 | ||
|
|
60ebdc97a5 | ||
|
|
20ec369338 | ||
|
|
4907a906c3 | ||
|
|
27e3a4301e | ||
|
|
43fb75f2b4 | ||
|
|
899d0e4baa | ||
|
|
85584b105d | ||
|
|
3fe6f50414 | ||
|
|
724a066aed | ||
|
|
cd6e8ecbbe | ||
|
|
8083c0c0e1 | ||
|
|
29836f35ed | ||
|
|
17d3d4297c | ||
|
|
2b921736e6 | ||
|
|
ddabe81dd8 | ||
|
|
19c5671d3f | ||
|
|
2326520d17 | ||
|
|
7964d39e32 | ||
|
|
f7cf7c8adc | ||
|
|
744191cb84 | ||
|
|
291ed4c5ad | ||
|
|
f9612c5aba | ||
|
|
403a839ac0 | ||
|
|
41c89413ef | ||
|
|
fa11528a7b | ||
|
|
2a03c86384 | ||
|
|
57b4e10b93 | ||
|
|
4b22c7cc2d |
56
CHANGELOG.md
56
CHANGELOG.md
@@ -401,26 +401,80 @@ Exercise vigilance regarding copycat or coat-tailing sites that seek to exploit
|
|||||||
|
|
||||||
</details>
|
</details>
|
||||||
|
|
||||||
|
## 2026-02-14
|
||||||
|
|
||||||
|
### ❔ Uncategorized
|
||||||
|
|
||||||
|
- Disable UniFi script - APT packages no longer available [@Copilot](https://github.com/Copilot) ([#11898](https://github.com/community-scripts/ProxmoxVE/pull/11898))
|
||||||
|
|
||||||
|
## 2026-02-13
|
||||||
|
|
||||||
|
### 🚀 Updated Scripts
|
||||||
|
|
||||||
|
- #### 🐞 Bug Fixes
|
||||||
|
|
||||||
|
- OpenWebUI: pin numba constraint [@MickLesk](https://github.com/MickLesk) ([#11874](https://github.com/community-scripts/ProxmoxVE/pull/11874))
|
||||||
|
- Planka: add migrate step to update function [@ZimmermannLeon](https://github.com/ZimmermannLeon) ([#11877](https://github.com/community-scripts/ProxmoxVE/pull/11877))
|
||||||
|
- Pangolin: switch sqlite-specific back to generic [@MickLesk](https://github.com/MickLesk) ([#11868](https://github.com/community-scripts/ProxmoxVE/pull/11868))
|
||||||
|
- [Hotfix] Jotty: Copy contents of config backup into /opt/jotty/config [@vhsdream](https://github.com/vhsdream) ([#11864](https://github.com/community-scripts/ProxmoxVE/pull/11864))
|
||||||
|
|
||||||
|
- #### 🔧 Refactor
|
||||||
|
|
||||||
|
- Refactor: Radicale [@vhsdream](https://github.com/vhsdream) ([#11850](https://github.com/community-scripts/ProxmoxVE/pull/11850))
|
||||||
|
- chore(donetick): add config entry for v0.1.73 [@tomfrenzel](https://github.com/tomfrenzel) ([#11872](https://github.com/community-scripts/ProxmoxVE/pull/11872))
|
||||||
|
|
||||||
|
### 💾 Core
|
||||||
|
|
||||||
|
- #### 🔧 Refactor
|
||||||
|
|
||||||
|
- core: retry reporting with fallback payloads [@MickLesk](https://github.com/MickLesk) ([#11885](https://github.com/community-scripts/ProxmoxVE/pull/11885))
|
||||||
|
|
||||||
|
### 📡 API
|
||||||
|
|
||||||
|
- #### ✨ New Features
|
||||||
|
|
||||||
|
- error-handler: Implement json_escape and enhance error handling [@MickLesk](https://github.com/MickLesk) ([#11875](https://github.com/community-scripts/ProxmoxVE/pull/11875))
|
||||||
|
|
||||||
|
### 🌐 Website
|
||||||
|
|
||||||
|
- #### 📝 Script Information
|
||||||
|
|
||||||
|
- SQLServer-2025: add PVE9/Kernel 6.x incompatibility warning [@MickLesk](https://github.com/MickLesk) ([#11829](https://github.com/community-scripts/ProxmoxVE/pull/11829))
|
||||||
|
|
||||||
## 2026-02-12
|
## 2026-02-12
|
||||||
|
|
||||||
### 🚀 Updated Scripts
|
### 🚀 Updated Scripts
|
||||||
|
|
||||||
- #### 🐞 Bug Fixes
|
- #### 🐞 Bug Fixes
|
||||||
|
|
||||||
|
- EMQX: increase disk to 6GB and add optional MQ disable prompt [@MickLesk](https://github.com/MickLesk) ([#11844](https://github.com/community-scripts/ProxmoxVE/pull/11844))
|
||||||
|
- Increased the Grafana container default disk size. [@shtefko](https://github.com/shtefko) ([#11840](https://github.com/community-scripts/ProxmoxVE/pull/11840))
|
||||||
|
- Pangolin: Update database generation command in install script [@tremor021](https://github.com/tremor021) ([#11825](https://github.com/community-scripts/ProxmoxVE/pull/11825))
|
||||||
- Deluge: add python3-setuptools as dep [@MickLesk](https://github.com/MickLesk) ([#11833](https://github.com/community-scripts/ProxmoxVE/pull/11833))
|
- Deluge: add python3-setuptools as dep [@MickLesk](https://github.com/MickLesk) ([#11833](https://github.com/community-scripts/ProxmoxVE/pull/11833))
|
||||||
- Dispatcharr: migrate to uv sync [@MickLesk](https://github.com/MickLesk) ([#11831](https://github.com/community-scripts/ProxmoxVE/pull/11831))
|
- Dispatcharr: migrate to uv sync [@MickLesk](https://github.com/MickLesk) ([#11831](https://github.com/community-scripts/ProxmoxVE/pull/11831))
|
||||||
- Pangolin: Update database generation command in install script [@tremor021](https://github.com/tremor021) ([#11825](https://github.com/community-scripts/ProxmoxVE/pull/11825))
|
|
||||||
|
|
||||||
- #### ✨ New Features
|
- #### ✨ New Features
|
||||||
|
|
||||||
|
- Archlinux-VM: fix LVM/LVM-thin storage and improve error reporting | VM's add correct exit_code for analytics [@MickLesk](https://github.com/MickLesk) ([#11842](https://github.com/community-scripts/ProxmoxVE/pull/11842))
|
||||||
- Debian13-VM: Optimize First Boot & add noCloud/Cloud Selection [@MickLesk](https://github.com/MickLesk) ([#11810](https://github.com/community-scripts/ProxmoxVE/pull/11810))
|
- Debian13-VM: Optimize First Boot & add noCloud/Cloud Selection [@MickLesk](https://github.com/MickLesk) ([#11810](https://github.com/community-scripts/ProxmoxVE/pull/11810))
|
||||||
|
|
||||||
### 💾 Core
|
### 💾 Core
|
||||||
|
|
||||||
- #### ✨ New Features
|
- #### ✨ New Features
|
||||||
|
|
||||||
|
- tools.func: auto-detect binary vs armored GPG keys in setup_deb822_repo [@MickLesk](https://github.com/MickLesk) ([#11841](https://github.com/community-scripts/ProxmoxVE/pull/11841))
|
||||||
- core: remove old Go API and extend misc/api.func with new backend [@MickLesk](https://github.com/MickLesk) ([#11822](https://github.com/community-scripts/ProxmoxVE/pull/11822))
|
- core: remove old Go API and extend misc/api.func with new backend [@MickLesk](https://github.com/MickLesk) ([#11822](https://github.com/community-scripts/ProxmoxVE/pull/11822))
|
||||||
|
|
||||||
|
- #### 🔧 Refactor
|
||||||
|
|
||||||
|
- error_handler: prevent stuck 'installing' status [@MickLesk](https://github.com/MickLesk) ([#11845](https://github.com/community-scripts/ProxmoxVE/pull/11845))
|
||||||
|
|
||||||
|
### 🧰 Tools
|
||||||
|
|
||||||
|
- #### 🐞 Bug Fixes
|
||||||
|
|
||||||
|
- Tailscale: fix DNS check and keyrings directory issues [@MickLesk](https://github.com/MickLesk) ([#11837](https://github.com/community-scripts/ProxmoxVE/pull/11837))
|
||||||
|
|
||||||
## 2026-02-11
|
## 2026-02-11
|
||||||
|
|
||||||
### 🆕 New Scripts
|
### 🆕 New Scripts
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ APP="Alpine-Grafana"
|
|||||||
var_tags="${var_tags:-alpine;monitoring}"
|
var_tags="${var_tags:-alpine;monitoring}"
|
||||||
var_cpu="${var_cpu:-1}"
|
var_cpu="${var_cpu:-1}"
|
||||||
var_ram="${var_ram:-256}"
|
var_ram="${var_ram:-256}"
|
||||||
var_disk="${var_disk:-1}"
|
var_disk="${var_disk:-2}"
|
||||||
var_os="${var_os:-alpine}"
|
var_os="${var_os:-alpine}"
|
||||||
var_version="${var_version:-3.23}"
|
var_version="${var_version:-3.23}"
|
||||||
var_unprivileged="${var_unprivileged:-1}"
|
var_unprivileged="${var_unprivileged:-1}"
|
||||||
|
|||||||
@@ -42,7 +42,8 @@ function update_script() {
|
|||||||
|
|
||||||
msg_info "Restoring Configurations"
|
msg_info "Restoring Configurations"
|
||||||
mv /opt/selfhosted.yaml /opt/donetick/config
|
mv /opt/selfhosted.yaml /opt/donetick/config
|
||||||
sed -i '/capacitor:\/\/localhost/d' /opt/donetick/config/selfhosted.yaml
|
grep -q 'http://localhost"$' /opt/donetick/config/selfhosted.yaml || sed -i '/https:\/\/localhost"$/a\ - "http://localhost"' /opt/donetick/config/selfhosted.yaml
|
||||||
|
grep -q 'capacitor://localhost' /opt/donetick/config/selfhosted.yaml || sed -i '/http:\/\/localhost"$/a\ - "capacitor://localhost"' /opt/donetick/config/selfhosted.yaml
|
||||||
mv /opt/donetick.db /opt/donetick
|
mv /opt/donetick.db /opt/donetick
|
||||||
msg_ok "Restored Configurations"
|
msg_ok "Restored Configurations"
|
||||||
|
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ APP="EMQX"
|
|||||||
var_tags="${var_tags:-mqtt}"
|
var_tags="${var_tags:-mqtt}"
|
||||||
var_cpu="${var_cpu:-2}"
|
var_cpu="${var_cpu:-2}"
|
||||||
var_ram="${var_ram:-1024}"
|
var_ram="${var_ram:-1024}"
|
||||||
var_disk="${var_disk:-4}"
|
var_disk="${var_disk:-6}"
|
||||||
var_os="${var_os:-debian}"
|
var_os="${var_os:-debian}"
|
||||||
var_version="${var_version:-13}"
|
var_version="${var_version:-13}"
|
||||||
var_unprivileged="${var_unprivileged:-1}"
|
var_unprivileged="${var_unprivileged:-1}"
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ APP="Grafana"
|
|||||||
var_tags="${var_tags:-monitoring;visualization}"
|
var_tags="${var_tags:-monitoring;visualization}"
|
||||||
var_cpu="${var_cpu:-1}"
|
var_cpu="${var_cpu:-1}"
|
||||||
var_ram="${var_ram:-512}"
|
var_ram="${var_ram:-512}"
|
||||||
var_disk="${var_disk:-2}"
|
var_disk="${var_disk:-4}"
|
||||||
var_os="${var_os:-debian}"
|
var_os="${var_os:-debian}"
|
||||||
var_version="${var_version:-13}"
|
var_version="${var_version:-13}"
|
||||||
var_unprivileged="${var_unprivileged:-1}"
|
var_unprivileged="${var_unprivileged:-1}"
|
||||||
|
|||||||
@@ -46,7 +46,7 @@ function update_script() {
|
|||||||
msg_info "Restoring configuration & data"
|
msg_info "Restoring configuration & data"
|
||||||
mv /opt/app.env /opt/jotty/.env
|
mv /opt/app.env /opt/jotty/.env
|
||||||
[[ -d /opt/data ]] && mv /opt/data /opt/jotty/data
|
[[ -d /opt/data ]] && mv /opt/data /opt/jotty/data
|
||||||
[[ -d /opt/jotty/config ]] && mv /opt/config/* /opt/jotty/config
|
[[ -d /opt/jotty/config ]] && cp -a /opt/config/* /opt/jotty/config && rm -rf /opt/config
|
||||||
msg_ok "Restored configuration & data"
|
msg_ok "Restored configuration & data"
|
||||||
|
|
||||||
msg_info "Starting Service"
|
msg_info "Starting Service"
|
||||||
|
|||||||
@@ -44,7 +44,7 @@ function update_script() {
|
|||||||
|
|
||||||
msg_info "Installing uv-based Open-WebUI"
|
msg_info "Installing uv-based Open-WebUI"
|
||||||
PYTHON_VERSION="3.12" setup_uv
|
PYTHON_VERSION="3.12" setup_uv
|
||||||
$STD uv tool install --python 3.12 open-webui[all]
|
$STD uv tool install --python 3.12 --constraint <(echo "numba>=0.60") open-webui[all]
|
||||||
msg_ok "Installed uv-based Open-WebUI"
|
msg_ok "Installed uv-based Open-WebUI"
|
||||||
|
|
||||||
msg_info "Restoring data"
|
msg_info "Restoring data"
|
||||||
@@ -126,7 +126,7 @@ EOF
|
|||||||
|
|
||||||
msg_info "Updating Open WebUI via uv"
|
msg_info "Updating Open WebUI via uv"
|
||||||
PYTHON_VERSION="3.12" setup_uv
|
PYTHON_VERSION="3.12" setup_uv
|
||||||
$STD uv tool upgrade --python 3.12 open-webui[all]
|
$STD uv tool install --force --python 3.12 --constraint <(echo "numba>=0.60") open-webui[all]
|
||||||
systemctl restart open-webui
|
systemctl restart open-webui
|
||||||
msg_ok "Updated Open WebUI"
|
msg_ok "Updated Open WebUI"
|
||||||
msg_ok "Updated successfully!"
|
msg_ok "Updated successfully!"
|
||||||
|
|||||||
@@ -51,7 +51,7 @@ function update_script() {
|
|||||||
$STD npm run db:generate
|
$STD npm run db:generate
|
||||||
$STD npm run build
|
$STD npm run build
|
||||||
$STD npm run build:cli
|
$STD npm run build:cli
|
||||||
$STD npm run db:sqlite:push
|
$STD npm run db:push
|
||||||
cp -R .next/standalone ./
|
cp -R .next/standalone ./
|
||||||
chmod +x ./dist/cli.mjs
|
chmod +x ./dist/cli.mjs
|
||||||
cp server/db/names.json ./dist/names.json
|
cp server/db/names.json ./dist/names.json
|
||||||
|
|||||||
@@ -61,6 +61,12 @@ function update_script() {
|
|||||||
rm -rf "$BK"
|
rm -rf "$BK"
|
||||||
msg_ok "Restored data"
|
msg_ok "Restored data"
|
||||||
|
|
||||||
|
msg_ok "Migrate Database"
|
||||||
|
cd /opt/planka
|
||||||
|
$STD npm run db:upgrade
|
||||||
|
$STD npm run db:migrate
|
||||||
|
msg_ok "Migrated Database"
|
||||||
|
|
||||||
msg_info "Starting Service"
|
msg_info "Starting Service"
|
||||||
systemctl start planka
|
systemctl start planka
|
||||||
msg_ok "Started Service"
|
msg_ok "Started Service"
|
||||||
|
|||||||
@@ -28,16 +28,55 @@ function update_script() {
|
|||||||
exit
|
exit
|
||||||
fi
|
fi
|
||||||
|
|
||||||
msg_info "Updating ${APP}"
|
if check_for_gh_release "Radicale" "Kozea/Radicale"; then
|
||||||
$STD python3 -m venv /opt/radicale
|
msg_info "Stopping service"
|
||||||
source /opt/radicale/bin/activate
|
systemctl stop radicale
|
||||||
$STD python3 -m pip install --upgrade https://github.com/Kozea/Radicale/archive/master.tar.gz
|
msg_ok "Stopped service"
|
||||||
msg_ok "Updated ${APP}"
|
|
||||||
|
|
||||||
msg_info "Starting Service"
|
msg_info "Backing up users file"
|
||||||
systemctl enable -q --now radicale
|
cp /opt/radicale/users /opt/radicale_users_backup
|
||||||
msg_ok "Started Service"
|
msg_ok "Backed up users file"
|
||||||
msg_ok "Updated successfully!"
|
|
||||||
|
PYTHON_VERSION="3.13" setup_uv
|
||||||
|
CLEAN_INSTALL=1 fetch_and_deploy_gh_release "Radicale" "Kozea/Radicale" "tarball" "latest" "/opt/radicale"
|
||||||
|
|
||||||
|
msg_info "Restoring users file"
|
||||||
|
rm -f /opt/radicale/users
|
||||||
|
mv /opt/radicale_users_backup /opt/radicale/users
|
||||||
|
msg_ok "Restored users file"
|
||||||
|
|
||||||
|
if grep -q 'start.sh' /etc/systemd/system/radicale.service; then
|
||||||
|
sed -i -e '/^Description/i[Unit]' \
|
||||||
|
-e '\|^ExecStart|iWorkingDirectory=/opt/radicale' \
|
||||||
|
-e 's|^ExecStart=.*|ExecStart=/usr/local/bin/uv run -m radicale --config /etc/radicale/config|' /etc/systemd/system/radicale.service
|
||||||
|
systemctl daemon-reload
|
||||||
|
fi
|
||||||
|
if [[ ! -f /etc/radicale/config ]]; then
|
||||||
|
msg_info "Migrating to config file (/etc/radicale/config)"
|
||||||
|
mkdir -p /etc/radicale
|
||||||
|
cat <<EOF >/etc/radicale/config
|
||||||
|
[server]
|
||||||
|
hosts = 0.0.0.0:5232
|
||||||
|
|
||||||
|
[auth]
|
||||||
|
type = htpasswd
|
||||||
|
htpasswd_filename = /opt/radicale/users
|
||||||
|
htpasswd_encryption = sha512
|
||||||
|
|
||||||
|
[storage]
|
||||||
|
type = multifilesystem
|
||||||
|
filesystem_folder = /var/lib/radicale/collections
|
||||||
|
|
||||||
|
[web]
|
||||||
|
type = internal
|
||||||
|
EOF
|
||||||
|
msg_ok "Migrated to config (/etc/radicale/config)"
|
||||||
|
fi
|
||||||
|
msg_info "Starting service"
|
||||||
|
systemctl start radicale
|
||||||
|
msg_ok "Started service"
|
||||||
|
msg_ok "Updated Successfully!"
|
||||||
|
fi
|
||||||
exit
|
exit
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -21,7 +21,7 @@
|
|||||||
"resources": {
|
"resources": {
|
||||||
"cpu": 2,
|
"cpu": 2,
|
||||||
"ram": 1024,
|
"ram": 1024,
|
||||||
"hdd": 4,
|
"hdd": 6,
|
||||||
"os": "debian",
|
"os": "debian",
|
||||||
"version": "13"
|
"version": "13"
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
{
|
{
|
||||||
"generated": "2026-02-12T12:15:15Z",
|
"generated": "2026-02-14T12:08:41Z",
|
||||||
"versions": [
|
"versions": [
|
||||||
{
|
{
|
||||||
"slug": "2fauth",
|
"slug": "2fauth",
|
||||||
@@ -67,9 +67,9 @@
|
|||||||
{
|
{
|
||||||
"slug": "autobrr",
|
"slug": "autobrr",
|
||||||
"repo": "autobrr/autobrr",
|
"repo": "autobrr/autobrr",
|
||||||
"version": "v1.72.1",
|
"version": "v1.73.0",
|
||||||
"pinned": false,
|
"pinned": false,
|
||||||
"date": "2026-01-30T12:57:58Z"
|
"date": "2026-02-13T16:37:28Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"slug": "autocaliweb",
|
"slug": "autocaliweb",
|
||||||
@@ -193,16 +193,16 @@
|
|||||||
{
|
{
|
||||||
"slug": "cleanuparr",
|
"slug": "cleanuparr",
|
||||||
"repo": "Cleanuparr/Cleanuparr",
|
"repo": "Cleanuparr/Cleanuparr",
|
||||||
"version": "v2.5.1",
|
"version": "v2.6.1",
|
||||||
"pinned": false,
|
"pinned": false,
|
||||||
"date": "2026-01-11T00:46:17Z"
|
"date": "2026-02-13T10:00:19Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"slug": "cloudreve",
|
"slug": "cloudreve",
|
||||||
"repo": "cloudreve/cloudreve",
|
"repo": "cloudreve/cloudreve",
|
||||||
"version": "4.13.0",
|
"version": "4.14.0",
|
||||||
"pinned": false,
|
"pinned": false,
|
||||||
"date": "2026-02-05T12:53:24Z"
|
"date": "2026-02-14T06:05:06Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"slug": "comfyui",
|
"slug": "comfyui",
|
||||||
@@ -284,9 +284,9 @@
|
|||||||
{
|
{
|
||||||
"slug": "domain-locker",
|
"slug": "domain-locker",
|
||||||
"repo": "Lissy93/domain-locker",
|
"repo": "Lissy93/domain-locker",
|
||||||
"version": "v0.1.3",
|
"version": "v0.1.4",
|
||||||
"pinned": false,
|
"pinned": false,
|
||||||
"date": "2026-02-11T10:03:32Z"
|
"date": "2026-02-14T07:41:29Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"slug": "domain-monitor",
|
"slug": "domain-monitor",
|
||||||
@@ -298,9 +298,9 @@
|
|||||||
{
|
{
|
||||||
"slug": "donetick",
|
"slug": "donetick",
|
||||||
"repo": "donetick/donetick",
|
"repo": "donetick/donetick",
|
||||||
"version": "v0.1.71",
|
"version": "v0.1.73",
|
||||||
"pinned": false,
|
"pinned": false,
|
||||||
"date": "2026-02-11T06:01:13Z"
|
"date": "2026-02-12T23:42:30Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"slug": "drawio",
|
"slug": "drawio",
|
||||||
@@ -354,9 +354,9 @@
|
|||||||
{
|
{
|
||||||
"slug": "firefly",
|
"slug": "firefly",
|
||||||
"repo": "firefly-iii/firefly-iii",
|
"repo": "firefly-iii/firefly-iii",
|
||||||
"version": "v6.4.18",
|
"version": "v6.4.19",
|
||||||
"pinned": false,
|
"pinned": false,
|
||||||
"date": "2026-02-08T07:28:00Z"
|
"date": "2026-02-14T11:55:40Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"slug": "fladder",
|
"slug": "fladder",
|
||||||
@@ -403,9 +403,9 @@
|
|||||||
{
|
{
|
||||||
"slug": "ghostfolio",
|
"slug": "ghostfolio",
|
||||||
"repo": "ghostfolio/ghostfolio",
|
"repo": "ghostfolio/ghostfolio",
|
||||||
"version": "2.237.0",
|
"version": "2.238.0",
|
||||||
"pinned": false,
|
"pinned": false,
|
||||||
"date": "2026-02-08T13:59:53Z"
|
"date": "2026-02-12T18:28:55Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"slug": "gitea",
|
"slug": "gitea",
|
||||||
@@ -445,9 +445,9 @@
|
|||||||
{
|
{
|
||||||
"slug": "gotify",
|
"slug": "gotify",
|
||||||
"repo": "gotify/server",
|
"repo": "gotify/server",
|
||||||
"version": "v2.8.0",
|
"version": "v2.9.0",
|
||||||
"pinned": false,
|
"pinned": false,
|
||||||
"date": "2026-01-02T11:56:16Z"
|
"date": "2026-02-13T15:22:31Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"slug": "grist",
|
"slug": "grist",
|
||||||
@@ -508,9 +508,9 @@
|
|||||||
{
|
{
|
||||||
"slug": "homarr",
|
"slug": "homarr",
|
||||||
"repo": "homarr-labs/homarr",
|
"repo": "homarr-labs/homarr",
|
||||||
"version": "v1.53.0",
|
"version": "v1.53.1",
|
||||||
"pinned": false,
|
"pinned": false,
|
||||||
"date": "2026-02-06T19:42:58Z"
|
"date": "2026-02-13T19:47:11Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"slug": "homebox",
|
"slug": "homebox",
|
||||||
@@ -543,9 +543,9 @@
|
|||||||
{
|
{
|
||||||
"slug": "huntarr",
|
"slug": "huntarr",
|
||||||
"repo": "plexguide/Huntarr.io",
|
"repo": "plexguide/Huntarr.io",
|
||||||
"version": "9.2.4",
|
"version": "9.2.4.1",
|
||||||
"pinned": false,
|
"pinned": false,
|
||||||
"date": "2026-02-12T08:31:23Z"
|
"date": "2026-02-12T22:17:47Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"slug": "immich-public-proxy",
|
"slug": "immich-public-proxy",
|
||||||
@@ -571,16 +571,16 @@
|
|||||||
{
|
{
|
||||||
"slug": "invoiceninja",
|
"slug": "invoiceninja",
|
||||||
"repo": "invoiceninja/invoiceninja",
|
"repo": "invoiceninja/invoiceninja",
|
||||||
"version": "v5.12.57",
|
"version": "v5.12.59",
|
||||||
"pinned": false,
|
"pinned": false,
|
||||||
"date": "2026-02-11T23:08:56Z"
|
"date": "2026-02-13T02:26:13Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"slug": "jackett",
|
"slug": "jackett",
|
||||||
"repo": "Jackett/Jackett",
|
"repo": "Jackett/Jackett",
|
||||||
"version": "v0.24.1098",
|
"version": "v0.24.1109",
|
||||||
"pinned": false,
|
"pinned": false,
|
||||||
"date": "2026-02-12T05:56:25Z"
|
"date": "2026-02-14T05:54:26Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"slug": "jellystat",
|
"slug": "jellystat",
|
||||||
@@ -823,16 +823,16 @@
|
|||||||
{
|
{
|
||||||
"slug": "metube",
|
"slug": "metube",
|
||||||
"repo": "alexta69/metube",
|
"repo": "alexta69/metube",
|
||||||
"version": "2026.02.08",
|
"version": "2026.02.14",
|
||||||
"pinned": false,
|
"pinned": false,
|
||||||
"date": "2026-02-08T17:01:37Z"
|
"date": "2026-02-14T07:49:11Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"slug": "miniflux",
|
"slug": "miniflux",
|
||||||
"repo": "miniflux/v2",
|
"repo": "miniflux/v2",
|
||||||
"version": "2.2.16",
|
"version": "2.2.17",
|
||||||
"pinned": false,
|
"pinned": false,
|
||||||
"date": "2026-01-07T03:26:27Z"
|
"date": "2026-02-13T20:30:17Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"slug": "monica",
|
"slug": "monica",
|
||||||
@@ -998,9 +998,9 @@
|
|||||||
{
|
{
|
||||||
"slug": "pangolin",
|
"slug": "pangolin",
|
||||||
"repo": "fosrl/pangolin",
|
"repo": "fosrl/pangolin",
|
||||||
"version": "1.15.3",
|
"version": "1.15.4",
|
||||||
"pinned": false,
|
"pinned": false,
|
||||||
"date": "2026-02-12T06:10:19Z"
|
"date": "2026-02-13T23:01:29Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"slug": "paperless-ai",
|
"slug": "paperless-ai",
|
||||||
@@ -1026,9 +1026,9 @@
|
|||||||
{
|
{
|
||||||
"slug": "patchmon",
|
"slug": "patchmon",
|
||||||
"repo": "PatchMon/PatchMon",
|
"repo": "PatchMon/PatchMon",
|
||||||
"version": "v1.3.7",
|
"version": "v1.4.0",
|
||||||
"pinned": false,
|
"pinned": false,
|
||||||
"date": "2025-12-25T11:08:14Z"
|
"date": "2026-02-13T10:39:03Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"slug": "paymenter",
|
"slug": "paymenter",
|
||||||
@@ -1096,9 +1096,9 @@
|
|||||||
{
|
{
|
||||||
"slug": "pocketbase",
|
"slug": "pocketbase",
|
||||||
"repo": "pocketbase/pocketbase",
|
"repo": "pocketbase/pocketbase",
|
||||||
"version": "v0.36.2",
|
"version": "v0.36.3",
|
||||||
"pinned": false,
|
"pinned": false,
|
||||||
"date": "2026-02-01T08:12:42Z"
|
"date": "2026-02-13T18:38:58Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"slug": "pocketid",
|
"slug": "pocketid",
|
||||||
@@ -1219,6 +1219,13 @@
|
|||||||
"pinned": false,
|
"pinned": false,
|
||||||
"date": "2025-11-16T22:39:01Z"
|
"date": "2025-11-16T22:39:01Z"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"slug": "radicale",
|
||||||
|
"repo": "Kozea/Radicale",
|
||||||
|
"version": "v3.6.0",
|
||||||
|
"pinned": false,
|
||||||
|
"date": "2026-01-10T06:56:46Z"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"slug": "rclone",
|
"slug": "rclone",
|
||||||
"repo": "rclone/rclone",
|
"repo": "rclone/rclone",
|
||||||
@@ -1292,9 +1299,9 @@
|
|||||||
{
|
{
|
||||||
"slug": "scraparr",
|
"slug": "scraparr",
|
||||||
"repo": "thecfu/scraparr",
|
"repo": "thecfu/scraparr",
|
||||||
"version": "v3.0.1",
|
"version": "v3.0.3",
|
||||||
"pinned": false,
|
"pinned": false,
|
||||||
"date": "2026-02-11T17:42:23Z"
|
"date": "2026-02-12T14:20:56Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"slug": "seelf",
|
"slug": "seelf",
|
||||||
@@ -1306,9 +1313,9 @@
|
|||||||
{
|
{
|
||||||
"slug": "semaphore",
|
"slug": "semaphore",
|
||||||
"repo": "semaphoreui/semaphore",
|
"repo": "semaphoreui/semaphore",
|
||||||
"version": "v2.16.51",
|
"version": "v2.17.0",
|
||||||
"pinned": false,
|
"pinned": false,
|
||||||
"date": "2026-01-12T16:26:38Z"
|
"date": "2026-02-13T21:08:30Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"slug": "shelfmark",
|
"slug": "shelfmark",
|
||||||
@@ -1404,9 +1411,9 @@
|
|||||||
{
|
{
|
||||||
"slug": "tandoor",
|
"slug": "tandoor",
|
||||||
"repo": "TandoorRecipes/recipes",
|
"repo": "TandoorRecipes/recipes",
|
||||||
"version": "2.5.0",
|
"version": "2.5.1",
|
||||||
"pinned": false,
|
"pinned": false,
|
||||||
"date": "2026-02-08T13:23:02Z"
|
"date": "2026-02-13T15:57:27Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"slug": "tasmoadmin",
|
"slug": "tasmoadmin",
|
||||||
@@ -1432,9 +1439,9 @@
|
|||||||
{
|
{
|
||||||
"slug": "termix",
|
"slug": "termix",
|
||||||
"repo": "Termix-SSH/Termix",
|
"repo": "Termix-SSH/Termix",
|
||||||
"version": "release-1.11.0-tag",
|
"version": "release-1.11.1-tag",
|
||||||
"pinned": false,
|
"pinned": false,
|
||||||
"date": "2026-01-25T02:09:52Z"
|
"date": "2026-02-13T04:49:16Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"slug": "the-lounge",
|
"slug": "the-lounge",
|
||||||
@@ -1460,9 +1467,9 @@
|
|||||||
{
|
{
|
||||||
"slug": "tianji",
|
"slug": "tianji",
|
||||||
"repo": "msgbyte/tianji",
|
"repo": "msgbyte/tianji",
|
||||||
"version": "v1.31.10",
|
"version": "v1.31.13",
|
||||||
"pinned": false,
|
"pinned": false,
|
||||||
"date": "2026-02-04T17:21:04Z"
|
"date": "2026-02-13T16:30:09Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"slug": "traccar",
|
"slug": "traccar",
|
||||||
@@ -1509,9 +1516,9 @@
|
|||||||
{
|
{
|
||||||
"slug": "tududi",
|
"slug": "tududi",
|
||||||
"repo": "chrisvel/tududi",
|
"repo": "chrisvel/tududi",
|
||||||
"version": "v0.88.4",
|
"version": "v0.88.5",
|
||||||
"pinned": false,
|
"pinned": false,
|
||||||
"date": "2026-01-20T15:11:58Z"
|
"date": "2026-02-13T13:54:14Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"slug": "tunarr",
|
"slug": "tunarr",
|
||||||
@@ -1551,16 +1558,16 @@
|
|||||||
{
|
{
|
||||||
"slug": "upsnap",
|
"slug": "upsnap",
|
||||||
"repo": "seriousm4x/UpSnap",
|
"repo": "seriousm4x/UpSnap",
|
||||||
"version": "5.2.7",
|
"version": "5.2.8",
|
||||||
"pinned": false,
|
"pinned": false,
|
||||||
"date": "2026-01-07T23:48:00Z"
|
"date": "2026-02-13T00:02:37Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"slug": "uptimekuma",
|
"slug": "uptimekuma",
|
||||||
"repo": "louislam/uptime-kuma",
|
"repo": "louislam/uptime-kuma",
|
||||||
"version": "2.1.0",
|
"version": "2.1.1",
|
||||||
"pinned": false,
|
"pinned": false,
|
||||||
"date": "2026-02-07T02:31:49Z"
|
"date": "2026-02-13T16:07:33Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"slug": "vaultwarden",
|
"slug": "vaultwarden",
|
||||||
|
|||||||
@@ -21,7 +21,7 @@
|
|||||||
"resources": {
|
"resources": {
|
||||||
"cpu": 1,
|
"cpu": 1,
|
||||||
"ram": 512,
|
"ram": 512,
|
||||||
"hdd": 2,
|
"hdd": 4,
|
||||||
"os": "debian",
|
"os": "debian",
|
||||||
"version": "13"
|
"version": "13"
|
||||||
}
|
}
|
||||||
@@ -32,7 +32,7 @@
|
|||||||
"resources": {
|
"resources": {
|
||||||
"cpu": 1,
|
"cpu": 1,
|
||||||
"ram": 256,
|
"ram": 256,
|
||||||
"hdd": 1,
|
"hdd": 2,
|
||||||
"os": "alpine",
|
"os": "alpine",
|
||||||
"version": "3.23"
|
"version": "3.23"
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -12,7 +12,7 @@
|
|||||||
"documentation": "https://radicale.org/master.html#documentation-1",
|
"documentation": "https://radicale.org/master.html#documentation-1",
|
||||||
"website": "https://radicale.org/",
|
"website": "https://radicale.org/",
|
||||||
"logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/radicale.webp",
|
"logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/radicale.webp",
|
||||||
"config_path": "/etc/radicale/config or ~/.config/radicale/config",
|
"config_path": "/etc/radicale/config",
|
||||||
"description": "Radicale is a small but powerful CalDAV (calendars, to-do lists) and CardDAV (contacts)",
|
"description": "Radicale is a small but powerful CalDAV (calendars, to-do lists) and CardDAV (contacts)",
|
||||||
"install_methods": [
|
"install_methods": [
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -32,6 +32,10 @@
|
|||||||
"password": null
|
"password": null
|
||||||
},
|
},
|
||||||
"notes": [
|
"notes": [
|
||||||
|
{
|
||||||
|
"text": "SQL Server (2025) SQLPAL is incompatible with Proxmox VE 9 (Kernel 6.12+) in LXC containers. Use a VM instead or the SQL-Server 2022 LXC.",
|
||||||
|
"type": "warning"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"text": "If you choose not to run the installation setup, execute: `/opt/mssql/bin/mssql-conf setup` in LXC shell.",
|
"text": "If you choose not to run the installation setup, execute: `/opt/mssql/bin/mssql-conf setup` in LXC shell.",
|
||||||
"type": "info"
|
"type": "info"
|
||||||
|
|||||||
@@ -14,6 +14,8 @@
|
|||||||
"logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/ubiquiti-unifi.webp",
|
"logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/ubiquiti-unifi.webp",
|
||||||
"config_path": "",
|
"config_path": "",
|
||||||
"description": "UniFi Network Server is a software that helps manage and monitor UniFi networks (Wi-Fi, Ethernet, etc.) by providing an intuitive user interface and advanced features. It allows network administrators to configure, monitor, and upgrade network devices, as well as view network statistics, client devices, and historical events. The aim of the application is to make the management of UniFi networks easier and more efficient.",
|
"description": "UniFi Network Server is a software that helps manage and monitor UniFi networks (Wi-Fi, Ethernet, etc.) by providing an intuitive user interface and advanced features. It allows network administrators to configure, monitor, and upgrade network devices, as well as view network statistics, client devices, and historical events. The aim of the application is to make the management of UniFi networks easier and more efficient.",
|
||||||
|
"disable": true,
|
||||||
|
"disable_description": "This script is disabled because UniFi no longer delivers APT packages for Debian systems. The installation relies on APT repositories that are no longer maintained or available. For more details, see: https://github.com/community-scripts/ProxmoxVE/issues/11876",
|
||||||
"install_methods": [
|
"install_methods": [
|
||||||
{
|
{
|
||||||
"type": "default",
|
"type": "default",
|
||||||
|
|||||||
@@ -38,6 +38,18 @@ rm -f "$DEB_FILE"
|
|||||||
echo "$LATEST_VERSION" >~/.emqx
|
echo "$LATEST_VERSION" >~/.emqx
|
||||||
msg_ok "Installed EMQX"
|
msg_ok "Installed EMQX"
|
||||||
|
|
||||||
|
read -r -p "${TAB3}Would you like to disable the EMQX MQ feature? (reduces disk/CPU usage) <y/N> " prompt
|
||||||
|
if [[ ${prompt,,} =~ ^(y|yes)$ ]]; then
|
||||||
|
msg_info "Disabling EMQX MQ feature"
|
||||||
|
mkdir -p /etc/emqx
|
||||||
|
if ! grep -q "^mq.enable" /etc/emqx/emqx.conf 2>/dev/null; then
|
||||||
|
echo "mq.enable = false" >>/etc/emqx/emqx.conf
|
||||||
|
else
|
||||||
|
sed -i 's/^mq.enable.*/mq.enable = false/' /etc/emqx/emqx.conf
|
||||||
|
fi
|
||||||
|
msg_ok "Disabled EMQX MQ feature"
|
||||||
|
fi
|
||||||
|
|
||||||
msg_info "Starting EMQX service"
|
msg_info "Starting EMQX service"
|
||||||
$STD systemctl enable -q --now emqx
|
$STD systemctl enable -q --now emqx
|
||||||
msg_ok "Enabled EMQX service"
|
msg_ok "Enabled EMQX service"
|
||||||
|
|||||||
@@ -24,7 +24,7 @@ setup_hwaccel
|
|||||||
PYTHON_VERSION="3.12" setup_uv
|
PYTHON_VERSION="3.12" setup_uv
|
||||||
|
|
||||||
msg_info "Installing Open WebUI"
|
msg_info "Installing Open WebUI"
|
||||||
$STD uv tool install --python 3.12 open-webui[all]
|
$STD uv tool install --python 3.12 --constraint <(echo "numba>=0.60") open-webui[all]
|
||||||
msg_ok "Installed Open WebUI"
|
msg_ok "Installed Open WebUI"
|
||||||
|
|
||||||
read -r -p "${TAB3}Would you like to add Ollama? <y/N> " prompt
|
read -r -p "${TAB3}Would you like to add Ollama? <y/N> " prompt
|
||||||
|
|||||||
@@ -36,7 +36,7 @@ $STD npm ci
|
|||||||
$STD npm run set:sqlite
|
$STD npm run set:sqlite
|
||||||
$STD npm run set:oss
|
$STD npm run set:oss
|
||||||
rm -rf server/private
|
rm -rf server/private
|
||||||
$STD npm run db:sqlite:generate
|
$STD npm run db:generate
|
||||||
$STD npm run build
|
$STD npm run build
|
||||||
$STD npm run build:cli
|
$STD npm run build:cli
|
||||||
cp -R .next/standalone ./
|
cp -R .next/standalone ./
|
||||||
@@ -178,7 +178,7 @@ http:
|
|||||||
servers:
|
servers:
|
||||||
- url: "http://$LOCAL_IP:3000"
|
- url: "http://$LOCAL_IP:3000"
|
||||||
EOF
|
EOF
|
||||||
$STD npm run db:sqlite:push
|
$STD npm run db:push
|
||||||
|
|
||||||
. /etc/os-release
|
. /etc/os-release
|
||||||
if [ "$VERSION_CODENAME" = "trixie" ]; then
|
if [ "$VERSION_CODENAME" = "trixie" ]; then
|
||||||
|
|||||||
@@ -14,42 +14,51 @@ network_check
|
|||||||
update_os
|
update_os
|
||||||
|
|
||||||
msg_info "Installing Dependencies"
|
msg_info "Installing Dependencies"
|
||||||
$STD apt install -y \
|
$STD apt install -y apache2-utils
|
||||||
apache2-utils \
|
|
||||||
python3-pip \
|
|
||||||
python3-venv
|
|
||||||
msg_ok "Installed Dependencies"
|
msg_ok "Installed Dependencies"
|
||||||
|
|
||||||
|
PYTHON_VERSION="3.13" setup_uv
|
||||||
|
fetch_and_deploy_gh_release "Radicale" "Kozea/Radicale" "tarball" "latest" "/opt/radicale"
|
||||||
|
|
||||||
msg_info "Setting up Radicale"
|
msg_info "Setting up Radicale"
|
||||||
python3 -m venv /opt/radicale
|
cd /opt/radicale
|
||||||
source /opt/radicale/bin/activate
|
|
||||||
$STD python3 -m pip install --upgrade https://github.com/Kozea/Radicale/archive/master.tar.gz
|
|
||||||
RNDPASS=$(openssl rand -base64 18 | tr -dc 'a-zA-Z0-9' | head -c13)
|
RNDPASS=$(openssl rand -base64 18 | tr -dc 'a-zA-Z0-9' | head -c13)
|
||||||
$STD htpasswd -c -b -5 /opt/radicale/users admin $RNDPASS
|
$STD htpasswd -c -b -5 /opt/radicale/users admin "$RNDPASS"
|
||||||
{
|
{
|
||||||
echo "Radicale Credentials"
|
echo "Radicale Credentials"
|
||||||
echo "Admin User: admin"
|
echo "Admin User: admin"
|
||||||
echo "Admin Password: $RNDPASS"
|
echo "Admin Password: $RNDPASS"
|
||||||
} >>~/radicale.creds
|
} >>~/radicale.creds
|
||||||
msg_ok "Done setting up Radicale"
|
|
||||||
|
|
||||||
msg_info "Setup Service"
|
mkdir -p /etc/radicale
|
||||||
|
cat <<EOF >/etc/radicale/config
|
||||||
|
[server]
|
||||||
|
hosts = 0.0.0.0:5232
|
||||||
|
|
||||||
cat <<EOF >/opt/radicale/start.sh
|
[auth]
|
||||||
#!/usr/bin/env bash
|
type = htpasswd
|
||||||
source /opt/radicale/bin/activate
|
htpasswd_filename = /opt/radicale/users
|
||||||
python3 -m radicale --storage-filesystem-folder=/var/lib/radicale/collections --hosts 0.0.0.0:5232 --auth-type htpasswd --auth-htpasswd-filename /opt/radicale/users --auth-htpasswd-encryption sha512
|
htpasswd_encryption = sha512
|
||||||
|
|
||||||
|
[storage]
|
||||||
|
type = multifilesystem
|
||||||
|
filesystem_folder = /var/lib/radicale/collections
|
||||||
|
|
||||||
|
[web]
|
||||||
|
type = internal
|
||||||
EOF
|
EOF
|
||||||
|
msg_ok "Set up Radicale"
|
||||||
|
|
||||||
chmod +x /opt/radicale/start.sh
|
msg_info "Creating Service"
|
||||||
|
|
||||||
cat <<EOF >/etc/systemd/system/radicale.service
|
cat <<EOF >/etc/systemd/system/radicale.service
|
||||||
|
[Unit]
|
||||||
Description=A simple CalDAV (calendar) and CardDAV (contact) server
|
Description=A simple CalDAV (calendar) and CardDAV (contact) server
|
||||||
After=network.target
|
After=network.target
|
||||||
Requires=network.target
|
Requires=network.target
|
||||||
|
|
||||||
[Service]
|
[Service]
|
||||||
ExecStart=/opt/radicale/start.sh
|
WorkingDirectory=/opt/radicale
|
||||||
|
ExecStart=/usr/local/bin/uv run -m radicale --config /etc/radicale/config
|
||||||
Restart=on-failure
|
Restart=on-failure
|
||||||
# User=radicale
|
# User=radicale
|
||||||
# Deny other users access to the calendar data
|
# Deny other users access to the calendar data
|
||||||
|
|||||||
@@ -15,16 +15,18 @@ update_os
|
|||||||
|
|
||||||
msg_info "Installing Dependencies"
|
msg_info "Installing Dependencies"
|
||||||
$STD apt install -y apt-transport-https
|
$STD apt install -y apt-transport-https
|
||||||
|
curl -fsSL "https://dl.ui.com/unifi/unifi-repo.gpg" -o "/usr/share/keyrings/unifi-repo.gpg"
|
||||||
|
cat <<EOF | sudo tee /etc/apt/sources.list.d/100-ubnt-unifi.sources >/dev/null
|
||||||
|
Types: deb
|
||||||
|
URIs: https://www.ui.com/downloads/unifi/debian
|
||||||
|
Suites: stable
|
||||||
|
Components: ubiquiti
|
||||||
|
Architectures: amd64
|
||||||
|
Signed-By: /usr/share/keyrings/unifi-repo.gpg
|
||||||
|
EOF
|
||||||
|
$STD apt update
|
||||||
msg_ok "Installed Dependencies"
|
msg_ok "Installed Dependencies"
|
||||||
|
|
||||||
setup_deb822_repo \
|
|
||||||
"unifi" \
|
|
||||||
"https://dl.ui.com/unifi/unifi-repo.gpg" \
|
|
||||||
"https://www.ui.com/downloads/unifi/debian" \
|
|
||||||
"stable" \
|
|
||||||
"ubiquiti" \
|
|
||||||
"amd64"
|
|
||||||
|
|
||||||
JAVA_VERSION="21" setup_java
|
JAVA_VERSION="21" setup_java
|
||||||
|
|
||||||
if lscpu | grep -q 'avx'; then
|
if lscpu | grep -q 'avx'; then
|
||||||
|
|||||||
179
misc/api.func
179
misc/api.func
@@ -153,7 +153,7 @@ explain_exit_code() {
|
|||||||
126) echo "Command invoked cannot execute (permission problem?)" ;;
|
126) echo "Command invoked cannot execute (permission problem?)" ;;
|
||||||
127) echo "Command not found" ;;
|
127) echo "Command not found" ;;
|
||||||
128) echo "Invalid argument to exit" ;;
|
128) echo "Invalid argument to exit" ;;
|
||||||
130) echo "Terminated by Ctrl+C (SIGINT)" ;;
|
130) echo "Aborted by user (SIGINT)" ;;
|
||||||
134) echo "Process aborted (SIGABRT - possibly Node.js heap overflow)" ;;
|
134) echo "Process aborted (SIGABRT - possibly Node.js heap overflow)" ;;
|
||||||
137) echo "Killed (SIGKILL / Out of memory?)" ;;
|
137) echo "Killed (SIGKILL / Out of memory?)" ;;
|
||||||
139) echo "Segmentation fault (core dumped)" ;;
|
139) echo "Segmentation fault (core dumped)" ;;
|
||||||
@@ -233,6 +233,43 @@ explain_exit_code() {
|
|||||||
esac
|
esac
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
# json_escape()
|
||||||
|
#
|
||||||
|
# - Escapes a string for safe JSON embedding
|
||||||
|
# - Handles backslashes, quotes, newlines, tabs, and carriage returns
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
json_escape() {
|
||||||
|
local s="$1"
|
||||||
|
s=${s//\\/\\\\}
|
||||||
|
s=${s//"/\\"}
|
||||||
|
s=${s//$'\n'/\\n}
|
||||||
|
s=${s//$'\r'/}
|
||||||
|
s=${s//$'\t'/\\t}
|
||||||
|
echo "$s"
|
||||||
|
}
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
# get_error_text()
|
||||||
|
#
|
||||||
|
# - Returns last 20 lines of the active log (INSTALL_LOG or BUILD_LOG)
|
||||||
|
# - Falls back to empty string if no log is available
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
get_error_text() {
|
||||||
|
local logfile=""
|
||||||
|
if declare -f get_active_logfile >/dev/null 2>&1; then
|
||||||
|
logfile=$(get_active_logfile)
|
||||||
|
elif [[ -n "${INSTALL_LOG:-}" ]]; then
|
||||||
|
logfile="$INSTALL_LOG"
|
||||||
|
elif [[ -n "${BUILD_LOG:-}" ]]; then
|
||||||
|
logfile="$BUILD_LOG"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ -n "$logfile" && -s "$logfile" ]]; then
|
||||||
|
tail -n 20 "$logfile" 2>/dev/null | sed 's/\r$//'
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
# ==============================================================================
|
# ==============================================================================
|
||||||
# SECTION 2: TELEMETRY FUNCTIONS
|
# SECTION 2: TELEMETRY FUNCTIONS
|
||||||
# ==============================================================================
|
# ==============================================================================
|
||||||
@@ -385,7 +422,8 @@ post_to_api() {
|
|||||||
detect_gpu
|
detect_gpu
|
||||||
fi
|
fi
|
||||||
local gpu_vendor="${GPU_VENDOR:-unknown}"
|
local gpu_vendor="${GPU_VENDOR:-unknown}"
|
||||||
local gpu_model="${GPU_MODEL:-}"
|
local gpu_model
|
||||||
|
gpu_model=$(json_escape "${GPU_MODEL:-}")
|
||||||
local gpu_passthrough="${GPU_PASSTHROUGH:-unknown}"
|
local gpu_passthrough="${GPU_PASSTHROUGH:-unknown}"
|
||||||
|
|
||||||
# Detect CPU if not already set
|
# Detect CPU if not already set
|
||||||
@@ -393,7 +431,8 @@ post_to_api() {
|
|||||||
detect_cpu
|
detect_cpu
|
||||||
fi
|
fi
|
||||||
local cpu_vendor="${CPU_VENDOR:-unknown}"
|
local cpu_vendor="${CPU_VENDOR:-unknown}"
|
||||||
local cpu_model="${CPU_MODEL:-}"
|
local cpu_model
|
||||||
|
cpu_model=$(json_escape "${CPU_MODEL:-}")
|
||||||
|
|
||||||
# Detect RAM if not already set
|
# Detect RAM if not already set
|
||||||
if [[ -z "${RAM_SPEED:-}" ]]; then
|
if [[ -z "${RAM_SPEED:-}" ]]; then
|
||||||
@@ -484,7 +523,8 @@ post_to_api_vm() {
|
|||||||
detect_gpu
|
detect_gpu
|
||||||
fi
|
fi
|
||||||
local gpu_vendor="${GPU_VENDOR:-unknown}"
|
local gpu_vendor="${GPU_VENDOR:-unknown}"
|
||||||
local gpu_model="${GPU_MODEL:-}"
|
local gpu_model
|
||||||
|
gpu_model=$(json_escape "${GPU_MODEL:-}")
|
||||||
local gpu_passthrough="${GPU_PASSTHROUGH:-unknown}"
|
local gpu_passthrough="${GPU_PASSTHROUGH:-unknown}"
|
||||||
|
|
||||||
# Detect CPU if not already set
|
# Detect CPU if not already set
|
||||||
@@ -492,7 +532,8 @@ post_to_api_vm() {
|
|||||||
detect_cpu
|
detect_cpu
|
||||||
fi
|
fi
|
||||||
local cpu_vendor="${CPU_VENDOR:-unknown}"
|
local cpu_vendor="${CPU_VENDOR:-unknown}"
|
||||||
local cpu_model="${CPU_MODEL:-}"
|
local cpu_model
|
||||||
|
cpu_model=$(json_escape "${CPU_MODEL:-}")
|
||||||
|
|
||||||
# Detect RAM if not already set
|
# Detect RAM if not already set
|
||||||
if [[ -z "${RAM_SPEED:-}" ]]; then
|
if [[ -z "${RAM_SPEED:-}" ]]; then
|
||||||
@@ -555,9 +596,12 @@ post_update_to_api() {
|
|||||||
# Silent fail - telemetry should never break scripts
|
# Silent fail - telemetry should never break scripts
|
||||||
command -v curl &>/dev/null || return 0
|
command -v curl &>/dev/null || return 0
|
||||||
|
|
||||||
# Prevent duplicate submissions
|
# Support "force" mode (3rd arg) to bypass duplicate check for retries after cleanup
|
||||||
|
local force="${3:-}"
|
||||||
POST_UPDATE_DONE=${POST_UPDATE_DONE:-false}
|
POST_UPDATE_DONE=${POST_UPDATE_DONE:-false}
|
||||||
[[ "$POST_UPDATE_DONE" == "true" ]] && return 0
|
if [[ "$POST_UPDATE_DONE" == "true" && "$force" != "force" ]]; then
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
|
||||||
[[ "${DIAGNOSTICS:-no}" == "no" ]] && return 0
|
[[ "${DIAGNOSTICS:-no}" == "no" ]] && return 0
|
||||||
[[ -z "${RANDOM_UUID:-}" ]] && return 0
|
[[ -z "${RANDOM_UUID:-}" ]] && return 0
|
||||||
@@ -568,12 +612,14 @@ post_update_to_api() {
|
|||||||
|
|
||||||
# Get GPU info (if detected)
|
# Get GPU info (if detected)
|
||||||
local gpu_vendor="${GPU_VENDOR:-unknown}"
|
local gpu_vendor="${GPU_VENDOR:-unknown}"
|
||||||
local gpu_model="${GPU_MODEL:-}"
|
local gpu_model
|
||||||
|
gpu_model=$(json_escape "${GPU_MODEL:-}")
|
||||||
local gpu_passthrough="${GPU_PASSTHROUGH:-unknown}"
|
local gpu_passthrough="${GPU_PASSTHROUGH:-unknown}"
|
||||||
|
|
||||||
# Get CPU info (if detected)
|
# Get CPU info (if detected)
|
||||||
local cpu_vendor="${CPU_VENDOR:-unknown}"
|
local cpu_vendor="${CPU_VENDOR:-unknown}"
|
||||||
local cpu_model="${CPU_MODEL:-}"
|
local cpu_model
|
||||||
|
cpu_model=$(json_escape "${CPU_MODEL:-}")
|
||||||
|
|
||||||
# Get RAM info (if detected)
|
# Get RAM info (if detected)
|
||||||
local ram_speed="${RAM_SPEED:-}"
|
local ram_speed="${RAM_SPEED:-}"
|
||||||
@@ -595,13 +641,21 @@ post_update_to_api() {
|
|||||||
esac
|
esac
|
||||||
|
|
||||||
# For failed/unknown status, resolve exit code and error description
|
# For failed/unknown status, resolve exit code and error description
|
||||||
|
local short_error=""
|
||||||
if [[ "$pb_status" == "failed" ]] || [[ "$pb_status" == "unknown" ]]; then
|
if [[ "$pb_status" == "failed" ]] || [[ "$pb_status" == "unknown" ]]; then
|
||||||
if [[ "$raw_exit_code" =~ ^[0-9]+$ ]]; then
|
if [[ "$raw_exit_code" =~ ^[0-9]+$ ]]; then
|
||||||
exit_code="$raw_exit_code"
|
exit_code="$raw_exit_code"
|
||||||
else
|
else
|
||||||
exit_code=1
|
exit_code=1
|
||||||
fi
|
fi
|
||||||
error=$(explain_exit_code "$exit_code")
|
local error_text=""
|
||||||
|
error_text=$(get_error_text)
|
||||||
|
if [[ -n "$error_text" ]]; then
|
||||||
|
error=$(json_escape "$error_text")
|
||||||
|
else
|
||||||
|
error=$(json_escape "$(explain_exit_code "$exit_code")")
|
||||||
|
fi
|
||||||
|
short_error=$(json_escape "$(explain_exit_code "$exit_code")")
|
||||||
error_category=$(categorize_error "$exit_code")
|
error_category=$(categorize_error "$exit_code")
|
||||||
[[ -z "$error" ]] && error="Unknown error"
|
[[ -z "$error" ]] && error="Unknown error"
|
||||||
fi
|
fi
|
||||||
@@ -618,8 +672,9 @@ post_update_to_api() {
|
|||||||
pve_version=$(pveversion 2>/dev/null | awk -F'[/ ]' '{print $2}') || true
|
pve_version=$(pveversion 2>/dev/null | awk -F'[/ ]' '{print $2}') || true
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Full payload including all fields - allows record creation if initial call failed
|
local http_code=""
|
||||||
# The Go service will find the record by random_id and PATCH, or create if not found
|
|
||||||
|
# ── Attempt 1: Full payload with complete error text ──
|
||||||
local JSON_PAYLOAD
|
local JSON_PAYLOAD
|
||||||
JSON_PAYLOAD=$(
|
JSON_PAYLOAD=$(
|
||||||
cat <<EOF
|
cat <<EOF
|
||||||
@@ -651,11 +706,80 @@ post_update_to_api() {
|
|||||||
EOF
|
EOF
|
||||||
)
|
)
|
||||||
|
|
||||||
# Fire-and-forget: never block, never fail
|
http_code=$(curl -sS -w "%{http_code}" -m "${TELEMETRY_TIMEOUT}" -X POST "${TELEMETRY_URL}" \
|
||||||
|
-H "Content-Type: application/json" \
|
||||||
|
-d "$JSON_PAYLOAD" -o /dev/null 2>/dev/null) || http_code="000"
|
||||||
|
|
||||||
|
if [[ "$http_code" =~ ^2[0-9]{2}$ ]]; then
|
||||||
|
POST_UPDATE_DONE=true
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
# ── Attempt 2: Short error text (no full log) ──
|
||||||
|
sleep 1
|
||||||
|
local RETRY_PAYLOAD
|
||||||
|
RETRY_PAYLOAD=$(
|
||||||
|
cat <<EOF
|
||||||
|
{
|
||||||
|
"random_id": "${RANDOM_UUID}",
|
||||||
|
"type": "${TELEMETRY_TYPE:-lxc}",
|
||||||
|
"nsapp": "${NSAPP:-unknown}",
|
||||||
|
"status": "${pb_status}",
|
||||||
|
"ct_type": ${CT_TYPE:-1},
|
||||||
|
"disk_size": ${DISK_SIZE:-0},
|
||||||
|
"core_count": ${CORE_COUNT:-0},
|
||||||
|
"ram_size": ${RAM_SIZE:-0},
|
||||||
|
"os_type": "${var_os:-}",
|
||||||
|
"os_version": "${var_version:-}",
|
||||||
|
"pve_version": "${pve_version}",
|
||||||
|
"method": "${METHOD:-default}",
|
||||||
|
"exit_code": ${exit_code},
|
||||||
|
"error": "${short_error}",
|
||||||
|
"error_category": "${error_category}",
|
||||||
|
"install_duration": ${duration},
|
||||||
|
"cpu_vendor": "${cpu_vendor}",
|
||||||
|
"cpu_model": "${cpu_model}",
|
||||||
|
"gpu_vendor": "${gpu_vendor}",
|
||||||
|
"gpu_model": "${gpu_model}",
|
||||||
|
"gpu_passthrough": "${gpu_passthrough}",
|
||||||
|
"ram_speed": "${ram_speed}",
|
||||||
|
"repo_source": "${REPO_SOURCE}"
|
||||||
|
}
|
||||||
|
EOF
|
||||||
|
)
|
||||||
|
|
||||||
|
http_code=$(curl -sS -w "%{http_code}" -m "${TELEMETRY_TIMEOUT}" -X POST "${TELEMETRY_URL}" \
|
||||||
|
-H "Content-Type: application/json" \
|
||||||
|
-d "$RETRY_PAYLOAD" -o /dev/null 2>/dev/null) || http_code="000"
|
||||||
|
|
||||||
|
if [[ "$http_code" =~ ^2[0-9]{2}$ ]]; then
|
||||||
|
POST_UPDATE_DONE=true
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
# ── Attempt 3: Minimal payload (bare minimum to set status) ──
|
||||||
|
sleep 2
|
||||||
|
local MINIMAL_PAYLOAD
|
||||||
|
MINIMAL_PAYLOAD=$(
|
||||||
|
cat <<EOF
|
||||||
|
{
|
||||||
|
"random_id": "${RANDOM_UUID}",
|
||||||
|
"type": "${TELEMETRY_TYPE:-lxc}",
|
||||||
|
"nsapp": "${NSAPP:-unknown}",
|
||||||
|
"status": "${pb_status}",
|
||||||
|
"exit_code": ${exit_code},
|
||||||
|
"error": "${short_error}",
|
||||||
|
"error_category": "${error_category}",
|
||||||
|
"install_duration": ${duration}
|
||||||
|
}
|
||||||
|
EOF
|
||||||
|
)
|
||||||
|
|
||||||
curl -sS -w "%{http_code}" -m "${TELEMETRY_TIMEOUT}" -X POST "${TELEMETRY_URL}" \
|
curl -sS -w "%{http_code}" -m "${TELEMETRY_TIMEOUT}" -X POST "${TELEMETRY_URL}" \
|
||||||
-H "Content-Type: application/json" \
|
-H "Content-Type: application/json" \
|
||||||
-d "$JSON_PAYLOAD" -o /dev/null 2>&1 || true
|
-d "$MINIMAL_PAYLOAD" -o /dev/null 2>/dev/null || true
|
||||||
|
|
||||||
|
# Tried 3 times - mark as done regardless to prevent infinite loops
|
||||||
POST_UPDATE_DONE=true
|
POST_UPDATE_DONE=true
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -691,6 +815,9 @@ categorize_error() {
|
|||||||
# Configuration errors
|
# Configuration errors
|
||||||
203 | 204 | 205 | 206 | 207 | 208) echo "config" ;;
|
203 | 204 | 205 | 206 | 207 | 208) echo "config" ;;
|
||||||
|
|
||||||
|
# Aborted by user
|
||||||
|
130) echo "aborted" ;;
|
||||||
|
|
||||||
# Resource errors (OOM, etc)
|
# Resource errors (OOM, etc)
|
||||||
137 | 134) echo "resource" ;;
|
137 | 134) echo "resource" ;;
|
||||||
|
|
||||||
@@ -755,7 +882,13 @@ post_tool_to_api() {
|
|||||||
|
|
||||||
if [[ "$status" == "failed" ]]; then
|
if [[ "$status" == "failed" ]]; then
|
||||||
[[ ! "$exit_code" =~ ^[0-9]+$ ]] && exit_code=1
|
[[ ! "$exit_code" =~ ^[0-9]+$ ]] && exit_code=1
|
||||||
error=$(explain_exit_code "$exit_code")
|
local error_text=""
|
||||||
|
error_text=$(get_error_text)
|
||||||
|
if [[ -n "$error_text" ]]; then
|
||||||
|
error=$(json_escape "$error_text")
|
||||||
|
else
|
||||||
|
error=$(json_escape "$(explain_exit_code "$exit_code")")
|
||||||
|
fi
|
||||||
error_category=$(categorize_error "$exit_code")
|
error_category=$(categorize_error "$exit_code")
|
||||||
fi
|
fi
|
||||||
|
|
||||||
@@ -816,7 +949,13 @@ post_addon_to_api() {
|
|||||||
|
|
||||||
if [[ "$status" == "failed" ]]; then
|
if [[ "$status" == "failed" ]]; then
|
||||||
[[ ! "$exit_code" =~ ^[0-9]+$ ]] && exit_code=1
|
[[ ! "$exit_code" =~ ^[0-9]+$ ]] && exit_code=1
|
||||||
error=$(explain_exit_code "$exit_code")
|
local error_text=""
|
||||||
|
error_text=$(get_error_text)
|
||||||
|
if [[ -n "$error_text" ]]; then
|
||||||
|
error=$(json_escape "$error_text")
|
||||||
|
else
|
||||||
|
error=$(json_escape "$(explain_exit_code "$exit_code")")
|
||||||
|
fi
|
||||||
error_category=$(categorize_error "$exit_code")
|
error_category=$(categorize_error "$exit_code")
|
||||||
fi
|
fi
|
||||||
|
|
||||||
@@ -909,7 +1048,13 @@ post_update_to_api_extended() {
|
|||||||
else
|
else
|
||||||
exit_code=1
|
exit_code=1
|
||||||
fi
|
fi
|
||||||
error=$(explain_exit_code "$exit_code")
|
local error_text=""
|
||||||
|
error_text=$(get_error_text)
|
||||||
|
if [[ -n "$error_text" ]]; then
|
||||||
|
error=$(json_escape "$error_text")
|
||||||
|
else
|
||||||
|
error=$(json_escape "$(explain_exit_code "$exit_code")")
|
||||||
|
fi
|
||||||
error_category=$(categorize_error "$exit_code")
|
error_category=$(categorize_error "$exit_code")
|
||||||
[[ -z "$error" ]] && error="Unknown error"
|
[[ -z "$error" ]] && error="Unknown error"
|
||||||
fi
|
fi
|
||||||
|
|||||||
@@ -4046,12 +4046,10 @@ EOF'
|
|||||||
if [[ $install_exit_code -ne 0 ]]; then
|
if [[ $install_exit_code -ne 0 ]]; then
|
||||||
msg_error "Installation failed in container ${CTID} (exit code: ${install_exit_code})"
|
msg_error "Installation failed in container ${CTID} (exit code: ${install_exit_code})"
|
||||||
|
|
||||||
# Report failure to telemetry API
|
# Copy install log from container BEFORE API call so get_error_text() can read it
|
||||||
post_update_to_api "failed" "$install_exit_code"
|
|
||||||
|
|
||||||
# Copy both logs from container before potential deletion
|
|
||||||
local build_log_copied=false
|
local build_log_copied=false
|
||||||
local install_log_copied=false
|
local install_log_copied=false
|
||||||
|
local host_install_log="/tmp/install-lxc-${CTID}-${SESSION_ID}.log"
|
||||||
|
|
||||||
if [[ -n "$CTID" && -n "${SESSION_ID:-}" ]]; then
|
if [[ -n "$CTID" && -n "${SESSION_ID:-}" ]]; then
|
||||||
# Copy BUILD_LOG (creation log) if it exists
|
# Copy BUILD_LOG (creation log) if it exists
|
||||||
@@ -4059,15 +4057,22 @@ EOF'
|
|||||||
cp "${BUILD_LOG}" "/tmp/create-lxc-${CTID}-${SESSION_ID}.log" 2>/dev/null && build_log_copied=true
|
cp "${BUILD_LOG}" "/tmp/create-lxc-${CTID}-${SESSION_ID}.log" 2>/dev/null && build_log_copied=true
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Copy INSTALL_LOG from container
|
# Copy INSTALL_LOG from container to host
|
||||||
if pct pull "$CTID" "/root/.install-${SESSION_ID}.log" "/tmp/install-lxc-${CTID}-${SESSION_ID}.log" 2>/dev/null; then
|
if pct pull "$CTID" "/root/.install-${SESSION_ID}.log" "$host_install_log" 2>/dev/null; then
|
||||||
install_log_copied=true
|
install_log_copied=true
|
||||||
|
# Point INSTALL_LOG to host copy so get_error_text() finds it
|
||||||
|
INSTALL_LOG="$host_install_log"
|
||||||
fi
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
# Show available logs
|
# Report failure to telemetry API (now with log available on host)
|
||||||
|
post_update_to_api "failed" "$install_exit_code"
|
||||||
|
|
||||||
|
# Show available logs
|
||||||
|
if [[ -n "$CTID" && -n "${SESSION_ID:-}" ]]; then
|
||||||
echo ""
|
echo ""
|
||||||
[[ "$build_log_copied" == true ]] && echo -e "${GN}✔${CL} Container creation log: ${BL}/tmp/create-lxc-${CTID}-${SESSION_ID}.log${CL}"
|
[[ "$build_log_copied" == true ]] && echo -e "${GN}✔${CL} Container creation log: ${BL}/tmp/create-lxc-${CTID}-${SESSION_ID}.log${CL}"
|
||||||
[[ "$install_log_copied" == true ]] && echo -e "${GN}✔${CL} Installation log: ${BL}/tmp/install-lxc-${CTID}-${SESSION_ID}.log${CL}"
|
[[ "$install_log_copied" == true ]] && echo -e "${GN}✔${CL} Installation log: ${BL}${host_install_log}${CL}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Dev mode: Keep container or open breakpoint shell
|
# Dev mode: Keep container or open breakpoint shell
|
||||||
@@ -4125,6 +4130,10 @@ EOF'
|
|||||||
echo -e "${BFR}${CM}${GN}Container ${CTID} removed${CL}"
|
echo -e "${BFR}${CM}${GN}Container ${CTID} removed${CL}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# Force one final status update attempt after cleanup
|
||||||
|
# This ensures status is updated even if the first attempt failed (e.g., HTTP 400)
|
||||||
|
post_update_to_api "failed" "$install_exit_code" "force"
|
||||||
|
|
||||||
exit $install_exit_code
|
exit $install_exit_code
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -222,6 +222,12 @@ error_handler() {
|
|||||||
pct destroy "$CTID" &>/dev/null || true
|
pct destroy "$CTID" &>/dev/null || true
|
||||||
echo -e "${GN}✔${CL} Container ${CTID} removed"
|
echo -e "${GN}✔${CL} Container ${CTID} removed"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# Force one final status update attempt after cleanup
|
||||||
|
# This ensures status is updated even if the first attempt failed (e.g., HTTP 400)
|
||||||
|
if declare -f post_update_to_api &>/dev/null; then
|
||||||
|
post_update_to_api "failed" "$exit_code" "force"
|
||||||
|
fi
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
@@ -243,6 +249,18 @@ error_handler() {
|
|||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
on_exit() {
|
on_exit() {
|
||||||
local exit_code=$?
|
local exit_code=$?
|
||||||
|
# Report orphaned "installing" records to telemetry API
|
||||||
|
# Catches ALL exit paths: errors (non-zero), signals, AND clean exits where
|
||||||
|
# post_to_api was called ("installing" sent) but post_update_to_api was never called
|
||||||
|
if [[ "${POST_TO_API_DONE:-}" == "true" && "${POST_UPDATE_DONE:-}" != "true" ]]; then
|
||||||
|
if declare -f post_update_to_api >/dev/null 2>&1; then
|
||||||
|
if [[ $exit_code -ne 0 ]]; then
|
||||||
|
post_update_to_api "failed" "$exit_code"
|
||||||
|
else
|
||||||
|
post_update_to_api "failed" "1"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
fi
|
||||||
[[ -n "${lockfile:-}" && -e "$lockfile" ]] && rm -f "$lockfile"
|
[[ -n "${lockfile:-}" && -e "$lockfile" ]] && rm -f "$lockfile"
|
||||||
exit "$exit_code"
|
exit "$exit_code"
|
||||||
}
|
}
|
||||||
@@ -255,6 +273,10 @@ on_exit() {
|
|||||||
# - Exits with code 130 (128 + SIGINT=2)
|
# - Exits with code 130 (128 + SIGINT=2)
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
on_interrupt() {
|
on_interrupt() {
|
||||||
|
# Report interruption to telemetry API (prevents stuck "installing" records)
|
||||||
|
if declare -f post_update_to_api >/dev/null 2>&1; then
|
||||||
|
post_update_to_api "failed" "130"
|
||||||
|
fi
|
||||||
if declare -f msg_error >/dev/null 2>&1; then
|
if declare -f msg_error >/dev/null 2>&1; then
|
||||||
msg_error "Interrupted by user (SIGINT)"
|
msg_error "Interrupted by user (SIGINT)"
|
||||||
else
|
else
|
||||||
@@ -272,6 +294,10 @@ on_interrupt() {
|
|||||||
# - Triggered by external process termination
|
# - Triggered by external process termination
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
on_terminate() {
|
on_terminate() {
|
||||||
|
# Report termination to telemetry API (prevents stuck "installing" records)
|
||||||
|
if declare -f post_update_to_api >/dev/null 2>&1; then
|
||||||
|
post_update_to_api "failed" "143"
|
||||||
|
fi
|
||||||
if declare -f msg_error >/dev/null 2>&1; then
|
if declare -f msg_error >/dev/null 2>&1; then
|
||||||
msg_error "Terminated by signal (SIGTERM)"
|
msg_error "Terminated by signal (SIGTERM)"
|
||||||
else
|
else
|
||||||
|
|||||||
@@ -465,6 +465,7 @@ manage_tool_repository() {
|
|||||||
msg_error "Failed to download MongoDB GPG key"
|
msg_error "Failed to download MongoDB GPG key"
|
||||||
return 1
|
return 1
|
||||||
fi
|
fi
|
||||||
|
chmod 644 "/etc/apt/keyrings/mongodb-server-${version}.gpg"
|
||||||
|
|
||||||
# Setup repository
|
# Setup repository
|
||||||
local distro_codename
|
local distro_codename
|
||||||
@@ -1294,12 +1295,33 @@ setup_deb822_repo() {
|
|||||||
return 1
|
return 1
|
||||||
}
|
}
|
||||||
|
|
||||||
# Import GPG
|
# Import GPG key (auto-detect binary vs ASCII-armored format)
|
||||||
curl -fsSL "$gpg_url" | gpg --dearmor --yes -o "/etc/apt/keyrings/${name}.gpg" || {
|
local tmp_gpg
|
||||||
msg_error "Failed to import GPG key for ${name}"
|
tmp_gpg=$(mktemp) || return 1
|
||||||
|
curl -fsSL "$gpg_url" -o "$tmp_gpg" || {
|
||||||
|
msg_error "Failed to download GPG key for ${name}"
|
||||||
|
rm -f "$tmp_gpg"
|
||||||
return 1
|
return 1
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if grep -q "BEGIN PGP" "$tmp_gpg" 2>/dev/null; then
|
||||||
|
# ASCII-armored — dearmor to binary
|
||||||
|
gpg --dearmor --yes -o "/etc/apt/keyrings/${name}.gpg" < "$tmp_gpg" || {
|
||||||
|
msg_error "Failed to dearmor GPG key for ${name}"
|
||||||
|
rm -f "$tmp_gpg"
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
else
|
||||||
|
# Already in binary GPG format — copy directly
|
||||||
|
cp "$tmp_gpg" "/etc/apt/keyrings/${name}.gpg" || {
|
||||||
|
msg_error "Failed to install GPG key for ${name}"
|
||||||
|
rm -f "$tmp_gpg"
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
fi
|
||||||
|
rm -f "$tmp_gpg"
|
||||||
|
chmod 644 "/etc/apt/keyrings/${name}.gpg"
|
||||||
|
|
||||||
# Write deb822
|
# Write deb822
|
||||||
{
|
{
|
||||||
echo "Types: deb"
|
echo "Types: deb"
|
||||||
|
|||||||
@@ -75,14 +75,37 @@ pct exec "$CTID" -- bash -c '
|
|||||||
set -e
|
set -e
|
||||||
export DEBIAN_FRONTEND=noninteractive
|
export DEBIAN_FRONTEND=noninteractive
|
||||||
|
|
||||||
ID=$(grep "^ID=" /etc/os-release | cut -d"=" -f2)
|
# Source os-release properly (handles quoted values)
|
||||||
VER=$(grep "^VERSION_CODENAME=" /etc/os-release | cut -d"=" -f2)
|
source /etc/os-release
|
||||||
|
|
||||||
# fallback if DNS is poisoned or blocked
|
# Fallback if DNS is poisoned or blocked
|
||||||
ORIG_RESOLV="/etc/resolv.conf"
|
ORIG_RESOLV="/etc/resolv.conf"
|
||||||
BACKUP_RESOLV="/tmp/resolv.conf.backup"
|
BACKUP_RESOLV="/tmp/resolv.conf.backup"
|
||||||
|
|
||||||
if ! dig +short pkgs.tailscale.com | grep -qvE "^127\.|^0\.0\.0\.0$"; then
|
# Check DNS resolution using multiple methods (dig may not be installed)
|
||||||
|
dns_check_failed=true
|
||||||
|
if command -v dig &>/dev/null; then
|
||||||
|
if dig +short pkgs.tailscale.com 2>/dev/null | grep -qvE "^127\.|^0\.0\.0\.0$|^$"; then
|
||||||
|
dns_check_failed=false
|
||||||
|
fi
|
||||||
|
elif command -v host &>/dev/null; then
|
||||||
|
if host pkgs.tailscale.com 2>/dev/null | grep -q "has address"; then
|
||||||
|
dns_check_failed=false
|
||||||
|
fi
|
||||||
|
elif command -v nslookup &>/dev/null; then
|
||||||
|
if nslookup pkgs.tailscale.com 2>/dev/null | grep -q "Address:"; then
|
||||||
|
dns_check_failed=false
|
||||||
|
fi
|
||||||
|
elif command -v getent &>/dev/null; then
|
||||||
|
if getent hosts pkgs.tailscale.com &>/dev/null; then
|
||||||
|
dns_check_failed=false
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
# No DNS tools available, try curl directly and assume DNS works
|
||||||
|
dns_check_failed=false
|
||||||
|
fi
|
||||||
|
|
||||||
|
if $dns_check_failed; then
|
||||||
echo "[INFO] DNS resolution for pkgs.tailscale.com failed (blocked or redirected)."
|
echo "[INFO] DNS resolution for pkgs.tailscale.com failed (blocked or redirected)."
|
||||||
echo "[INFO] Temporarily overriding /etc/resolv.conf with Cloudflare DNS (1.1.1.1)"
|
echo "[INFO] Temporarily overriding /etc/resolv.conf with Cloudflare DNS (1.1.1.1)"
|
||||||
cp "$ORIG_RESOLV" "$BACKUP_RESOLV"
|
cp "$ORIG_RESOLV" "$BACKUP_RESOLV"
|
||||||
@@ -92,17 +115,22 @@ fi
|
|||||||
if ! command -v curl &>/dev/null; then
|
if ! command -v curl &>/dev/null; then
|
||||||
echo "[INFO] curl not found, installing..."
|
echo "[INFO] curl not found, installing..."
|
||||||
apt-get update -qq
|
apt-get update -qq
|
||||||
apt-get install -y curl >/dev/null
|
apt update -qq
|
||||||
|
apt install -y curl >/dev/null
|
||||||
fi
|
fi
|
||||||
|
|
||||||
curl -fsSL https://pkgs.tailscale.com/stable/${ID}/${VER}.noarmor.gpg \
|
# Ensure keyrings directory exists
|
||||||
|
mkdir -p /usr/share/keyrings
|
||||||
|
|
||||||
|
curl -fsSL "https://pkgs.tailscale.com/stable/${ID}/${VERSION_CODENAME}.noarmor.gpg" \
|
||||||
| tee /usr/share/keyrings/tailscale-archive-keyring.gpg >/dev/null
|
| tee /usr/share/keyrings/tailscale-archive-keyring.gpg >/dev/null
|
||||||
|
|
||||||
echo "deb [signed-by=/usr/share/keyrings/tailscale-archive-keyring.gpg] https://pkgs.tailscale.com/stable/${ID} ${VER} main" \
|
echo "deb [signed-by=/usr/share/keyrings/tailscale-archive-keyring.gpg] https://pkgs.tailscale.com/stable/${ID} ${VERSION_CODENAME} main" \
|
||||||
>/etc/apt/sources.list.d/tailscale.list
|
>/etc/apt/sources.list.d/tailscale.list
|
||||||
|
|
||||||
apt-get update -qq
|
apt-get update -qq
|
||||||
apt-get install -y tailscale >/dev/null
|
apt update -qq
|
||||||
|
apt install -y tailscale >/dev/null
|
||||||
|
|
||||||
if [[ -f /tmp/resolv.conf.backup ]]; then
|
if [[ -f /tmp/resolv.conf.backup ]]; then
|
||||||
echo "[INFO] Restoring original /etc/resolv.conf"
|
echo "[INFO] Restoring original /etc/resolv.conf"
|
||||||
|
|||||||
@@ -70,7 +70,7 @@ function error_handler() {
|
|||||||
local line_number="$1"
|
local line_number="$1"
|
||||||
local command="$2"
|
local command="$2"
|
||||||
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
|
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
|
||||||
post_update_to_api "failed" "${command}"
|
post_update_to_api "failed" "${exit_code}"
|
||||||
echo -e "\n$error_message\n"
|
echo -e "\n$error_message\n"
|
||||||
cleanup_vmid
|
cleanup_vmid
|
||||||
}
|
}
|
||||||
@@ -203,7 +203,6 @@ function exit-script() {
|
|||||||
|
|
||||||
function default_settings() {
|
function default_settings() {
|
||||||
VMID=$(get_valid_nextid)
|
VMID=$(get_valid_nextid)
|
||||||
FORMAT=",efitype=4m"
|
|
||||||
MACHINE=""
|
MACHINE=""
|
||||||
DISK_SIZE="4G"
|
DISK_SIZE="4G"
|
||||||
DISK_CACHE=""
|
DISK_CACHE=""
|
||||||
@@ -259,11 +258,9 @@ function advanced_settings() {
|
|||||||
3>&1 1>&2 2>&3); then
|
3>&1 1>&2 2>&3); then
|
||||||
if [ "$MACH" = q35 ]; then
|
if [ "$MACH" = q35 ]; then
|
||||||
echo -e "${CONTAINERTYPE}${BOLD}${DGN}Machine Type: ${BGN}$MACH${CL}"
|
echo -e "${CONTAINERTYPE}${BOLD}${DGN}Machine Type: ${BGN}$MACH${CL}"
|
||||||
FORMAT=""
|
|
||||||
MACHINE=" -machine q35"
|
MACHINE=" -machine q35"
|
||||||
else
|
else
|
||||||
echo -e "${CONTAINERTYPE}${BOLD}${DGN}Machine Type: ${BGN}$MACH${CL}"
|
echo -e "${CONTAINERTYPE}${BOLD}${DGN}Machine Type: ${BGN}$MACH${CL}"
|
||||||
FORMAT=",efitype=4m"
|
|
||||||
MACHINE=""
|
MACHINE=""
|
||||||
fi
|
fi
|
||||||
else
|
else
|
||||||
@@ -476,31 +473,45 @@ case $STORAGE_TYPE in
|
|||||||
nfs | dir | cifs)
|
nfs | dir | cifs)
|
||||||
DISK_EXT=".qcow2"
|
DISK_EXT=".qcow2"
|
||||||
DISK_REF="$VMID/"
|
DISK_REF="$VMID/"
|
||||||
DISK_IMPORT="-format qcow2"
|
DISK_IMPORT="--format qcow2"
|
||||||
THIN=""
|
THIN=""
|
||||||
;;
|
;;
|
||||||
btrfs)
|
btrfs)
|
||||||
DISK_EXT=".raw"
|
DISK_EXT=".raw"
|
||||||
DISK_REF="$VMID/"
|
DISK_REF="$VMID/"
|
||||||
DISK_IMPORT="-format raw"
|
DISK_IMPORT="--format raw"
|
||||||
FORMAT=",efitype=4m"
|
|
||||||
THIN=""
|
THIN=""
|
||||||
;;
|
;;
|
||||||
|
*)
|
||||||
|
DISK_EXT=""
|
||||||
|
DISK_REF=""
|
||||||
|
DISK_IMPORT="--format raw"
|
||||||
|
;;
|
||||||
esac
|
esac
|
||||||
for i in {0,1}; do
|
|
||||||
disk="DISK$i"
|
|
||||||
eval DISK"${i}"=vm-"${VMID}"-disk-"${i}"${DISK_EXT:-}
|
|
||||||
eval DISK"${i}"_REF="${STORAGE}":"${DISK_REF:-}"${!disk}
|
|
||||||
done
|
|
||||||
|
|
||||||
msg_info "Creating a Arch Linux VM"
|
msg_info "Creating a Arch Linux VM"
|
||||||
qm create $VMID -agent 1${MACHINE} -tablet 0 -localtime 1 -bios ovmf${CPU_TYPE} -cores $CORE_COUNT -memory $RAM_SIZE \
|
qm create $VMID -agent 1${MACHINE} -tablet 0 -localtime 1 -bios ovmf${CPU_TYPE} -cores $CORE_COUNT -memory $RAM_SIZE \
|
||||||
-name $HN -tags community-script -net0 virtio,bridge=$BRG,macaddr=$MAC$VLAN$MTU -onboot 1 -ostype l26 -scsihw virtio-scsi-pci
|
-name $HN -tags community-script -net0 virtio,bridge=$BRG,macaddr=$MAC$VLAN$MTU -onboot 1 -ostype l26 -scsihw virtio-scsi-pci
|
||||||
pvesm alloc $STORAGE $VMID $DISK0 4M 1>&/dev/null
|
|
||||||
qm importdisk $VMID ${FILE} $STORAGE ${DISK_IMPORT:-} 1>&/dev/null
|
if qm disk import --help >/dev/null 2>&1; then
|
||||||
|
IMPORT_CMD=(qm disk import)
|
||||||
|
else
|
||||||
|
IMPORT_CMD=(qm importdisk)
|
||||||
|
fi
|
||||||
|
|
||||||
|
IMPORT_OUT="$("${IMPORT_CMD[@]}" "$VMID" "${FILE}" "$STORAGE" ${DISK_IMPORT:-} 2>&1 || true)"
|
||||||
|
DISK_REF_IMPORTED="$(printf '%s\n' "$IMPORT_OUT" | sed -n "s/.*successfully imported disk '\([^']\+\)'.*/\1/p" | tr -d "\r\"'")"
|
||||||
|
[[ -z "$DISK_REF_IMPORTED" ]] && DISK_REF_IMPORTED="$(pvesm list "$STORAGE" | awk -v id="$VMID" '$5 ~ ("vm-"id"-disk-") {print $1":"$5}' | sort | tail -n1)"
|
||||||
|
[[ -z "$DISK_REF_IMPORTED" ]] && {
|
||||||
|
msg_error "Unable to determine imported disk reference."
|
||||||
|
echo "$IMPORT_OUT"
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
msg_ok "Imported disk (${CL}${BL}${DISK_REF_IMPORTED}${CL})"
|
||||||
|
|
||||||
qm set $VMID \
|
qm set $VMID \
|
||||||
-efidisk0 ${DISK0_REF}${FORMAT} \
|
-efidisk0 ${STORAGE}:0,efitype=4m \
|
||||||
-scsi0 ${DISK1_REF},${DISK_CACHE}${THIN}size=${DISK_SIZE} \
|
-scsi0 ${DISK_REF_IMPORTED},${DISK_CACHE}${THIN%,} \
|
||||||
-ide2 ${STORAGE}:cloudinit \
|
-ide2 ${STORAGE}:cloudinit \
|
||||||
-boot order=scsi0 \
|
-boot order=scsi0 \
|
||||||
-serial0 socket >/dev/null
|
-serial0 socket >/dev/null
|
||||||
|
|||||||
@@ -70,7 +70,7 @@ function error_handler() {
|
|||||||
local line_number="$1"
|
local line_number="$1"
|
||||||
local command="$2"
|
local command="$2"
|
||||||
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
|
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
|
||||||
post_update_to_api "failed" "${command}"
|
post_update_to_api "failed" "${exit_code}"
|
||||||
echo -e "\n$error_message\n"
|
echo -e "\n$error_message\n"
|
||||||
cleanup_vmid
|
cleanup_vmid
|
||||||
}
|
}
|
||||||
@@ -560,6 +560,11 @@ btrfs)
|
|||||||
FORMAT=",efitype=4m"
|
FORMAT=",efitype=4m"
|
||||||
THIN=""
|
THIN=""
|
||||||
;;
|
;;
|
||||||
|
*)
|
||||||
|
DISK_EXT=""
|
||||||
|
DISK_REF=""
|
||||||
|
DISK_IMPORT="-format raw"
|
||||||
|
;;
|
||||||
esac
|
esac
|
||||||
for i in {0,1}; do
|
for i in {0,1}; do
|
||||||
disk="DISK$i"
|
disk="DISK$i"
|
||||||
|
|||||||
@@ -70,7 +70,7 @@ function error_handler() {
|
|||||||
local line_number="$1"
|
local line_number="$1"
|
||||||
local command="$2"
|
local command="$2"
|
||||||
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
|
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
|
||||||
post_update_to_api "failed" "${command}"
|
post_update_to_api "failed" "${exit_code}"
|
||||||
echo -e "\n$error_message\n"
|
echo -e "\n$error_message\n"
|
||||||
cleanup_vmid
|
cleanup_vmid
|
||||||
}
|
}
|
||||||
@@ -501,6 +501,11 @@ btrfs)
|
|||||||
FORMAT=",efitype=4m"
|
FORMAT=",efitype=4m"
|
||||||
THIN=""
|
THIN=""
|
||||||
;;
|
;;
|
||||||
|
*)
|
||||||
|
DISK_EXT=""
|
||||||
|
DISK_REF=""
|
||||||
|
DISK_IMPORT="-format raw"
|
||||||
|
;;
|
||||||
esac
|
esac
|
||||||
for i in {0,1}; do
|
for i in {0,1}; do
|
||||||
disk="DISK$i"
|
disk="DISK$i"
|
||||||
|
|||||||
@@ -45,7 +45,7 @@ function error_handler() {
|
|||||||
local line_number="$1"
|
local line_number="$1"
|
||||||
local command="$2"
|
local command="$2"
|
||||||
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
|
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
|
||||||
post_update_to_api "failed" "${command}"
|
post_update_to_api "failed" "${exit_code}"
|
||||||
echo -e "\n$error_message\n"
|
echo -e "\n$error_message\n"
|
||||||
cleanup_vmid
|
cleanup_vmid
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -74,7 +74,7 @@ function error_handler() {
|
|||||||
local line_number="$1"
|
local line_number="$1"
|
||||||
local command="$2"
|
local command="$2"
|
||||||
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
|
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
|
||||||
post_update_to_api "failed" "${command}"
|
post_update_to_api "failed" "${exit_code}"
|
||||||
echo -e "\n$error_message\n"
|
echo -e "\n$error_message\n"
|
||||||
cleanup_vmid
|
cleanup_vmid
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -71,7 +71,7 @@ function error_handler() {
|
|||||||
local line_number="$1"
|
local line_number="$1"
|
||||||
local command="$2"
|
local command="$2"
|
||||||
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
|
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
|
||||||
post_update_to_api "failed" "${command}"
|
post_update_to_api "failed" "${exit_code}"
|
||||||
echo -e "\n$error_message\n"
|
echo -e "\n$error_message\n"
|
||||||
cleanup_vmid
|
cleanup_vmid
|
||||||
}
|
}
|
||||||
@@ -566,6 +566,11 @@ zfspool)
|
|||||||
DISK_REF=""
|
DISK_REF=""
|
||||||
DISK_IMPORT="-format raw"
|
DISK_IMPORT="-format raw"
|
||||||
;;
|
;;
|
||||||
|
*)
|
||||||
|
DISK_EXT=""
|
||||||
|
DISK_REF=""
|
||||||
|
DISK_IMPORT="-format raw"
|
||||||
|
;;
|
||||||
esac
|
esac
|
||||||
|
|
||||||
DISK_VAR="vm-${VMID}-disk-0${DISK_EXT:-}"
|
DISK_VAR="vm-${VMID}-disk-0${DISK_EXT:-}"
|
||||||
|
|||||||
@@ -70,7 +70,7 @@ function error_handler() {
|
|||||||
local line_number="$1"
|
local line_number="$1"
|
||||||
local command="$2"
|
local command="$2"
|
||||||
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
|
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
|
||||||
post_update_to_api "failed" "${command}"
|
post_update_to_api "failed" "${exit_code}"
|
||||||
echo -e "\n$error_message\n"
|
echo -e "\n$error_message\n"
|
||||||
cleanup_vmid
|
cleanup_vmid
|
||||||
}
|
}
|
||||||
@@ -487,6 +487,11 @@ btrfs)
|
|||||||
FORMAT=",efitype=4m"
|
FORMAT=",efitype=4m"
|
||||||
THIN=""
|
THIN=""
|
||||||
;;
|
;;
|
||||||
|
*)
|
||||||
|
DISK_EXT=""
|
||||||
|
DISK_REF=""
|
||||||
|
DISK_IMPORT="-format raw"
|
||||||
|
;;
|
||||||
esac
|
esac
|
||||||
for i in {0,1,2}; do
|
for i in {0,1,2}; do
|
||||||
disk="DISK$i"
|
disk="DISK$i"
|
||||||
|
|||||||
@@ -74,7 +74,7 @@ function error_handler() {
|
|||||||
local exit_code="$?"
|
local exit_code="$?"
|
||||||
local line_number="$1"
|
local line_number="$1"
|
||||||
local command="$2"
|
local command="$2"
|
||||||
post_update_to_api "failed" "$command"
|
post_update_to_api "failed" "$exit_code"
|
||||||
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
|
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
|
||||||
echo -e "\n$error_message\n"
|
echo -e "\n$error_message\n"
|
||||||
cleanup_vmid
|
cleanup_vmid
|
||||||
|
|||||||
@@ -48,7 +48,7 @@ function error_handler() {
|
|||||||
local exit_code="$?"
|
local exit_code="$?"
|
||||||
local line_number="$1"
|
local line_number="$1"
|
||||||
local command="$2"
|
local command="$2"
|
||||||
post_update_to_api "failed" "$command"
|
post_update_to_api "failed" "$exit_code"
|
||||||
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
|
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
|
||||||
echo -e "\n$error_message\n"
|
echo -e "\n$error_message\n"
|
||||||
cleanup_vmid
|
cleanup_vmid
|
||||||
@@ -619,6 +619,11 @@ btrfs)
|
|||||||
FORMAT=",efitype=4m"
|
FORMAT=",efitype=4m"
|
||||||
THIN=""
|
THIN=""
|
||||||
;;
|
;;
|
||||||
|
*)
|
||||||
|
DISK_EXT=""
|
||||||
|
DISK_REF=""
|
||||||
|
DISK_IMPORT="-format raw"
|
||||||
|
;;
|
||||||
esac
|
esac
|
||||||
for i in {0,1}; do
|
for i in {0,1}; do
|
||||||
disk="DISK$i"
|
disk="DISK$i"
|
||||||
|
|||||||
@@ -71,7 +71,7 @@ function error_handler() {
|
|||||||
local line_number="$1"
|
local line_number="$1"
|
||||||
local command="$2"
|
local command="$2"
|
||||||
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
|
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
|
||||||
post_update_to_api "failed" "${command}"
|
post_update_to_api "failed" "${exit_code}"
|
||||||
echo -e "\n$error_message\n"
|
echo -e "\n$error_message\n"
|
||||||
cleanup_vmid
|
cleanup_vmid
|
||||||
}
|
}
|
||||||
@@ -500,6 +500,11 @@ btrfs)
|
|||||||
FORMAT=",efitype=4m"
|
FORMAT=",efitype=4m"
|
||||||
THIN=""
|
THIN=""
|
||||||
;;
|
;;
|
||||||
|
*)
|
||||||
|
DISK_EXT=""
|
||||||
|
DISK_REF=""
|
||||||
|
DISK_IMPORT="-format raw"
|
||||||
|
;;
|
||||||
esac
|
esac
|
||||||
for i in {0,1,2}; do
|
for i in {0,1,2}; do
|
||||||
disk="DISK$i"
|
disk="DISK$i"
|
||||||
|
|||||||
@@ -79,7 +79,7 @@ function error_handler() {
|
|||||||
local line_number="$1"
|
local line_number="$1"
|
||||||
local command="$2"
|
local command="$2"
|
||||||
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
|
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
|
||||||
post_update_to_api "failed" "${command}"
|
post_update_to_api "failed" "${exit_code}"
|
||||||
echo -e "\n$error_message\n"
|
echo -e "\n$error_message\n"
|
||||||
cleanup_vmid
|
cleanup_vmid
|
||||||
}
|
}
|
||||||
@@ -402,6 +402,11 @@ nfs | dir)
|
|||||||
DISK_REF="$VMID/"
|
DISK_REF="$VMID/"
|
||||||
DISK_IMPORT="-format qcow2"
|
DISK_IMPORT="-format qcow2"
|
||||||
;;
|
;;
|
||||||
|
*)
|
||||||
|
DISK_EXT=""
|
||||||
|
DISK_REF=""
|
||||||
|
DISK_IMPORT="-format raw"
|
||||||
|
;;
|
||||||
esac
|
esac
|
||||||
for i in {0,1}; do
|
for i in {0,1}; do
|
||||||
disk="DISK$i"
|
disk="DISK$i"
|
||||||
|
|||||||
@@ -66,7 +66,7 @@ function error_handler() {
|
|||||||
local exit_code="$?"
|
local exit_code="$?"
|
||||||
local line_number="$1"
|
local line_number="$1"
|
||||||
local command="$2"
|
local command="$2"
|
||||||
post_update_to_api "failed" "$command"
|
post_update_to_api "failed" "$exit_code"
|
||||||
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
|
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
|
||||||
echo -e "\n$error_message\n"
|
echo -e "\n$error_message\n"
|
||||||
cleanup_vmid
|
cleanup_vmid
|
||||||
@@ -482,6 +482,11 @@ btrfs)
|
|||||||
FORMAT=",efitype=4m"
|
FORMAT=",efitype=4m"
|
||||||
THIN=""
|
THIN=""
|
||||||
;;
|
;;
|
||||||
|
*)
|
||||||
|
DISK_EXT=""
|
||||||
|
DISK_REF=""
|
||||||
|
DISK_IMPORT="-format raw"
|
||||||
|
;;
|
||||||
esac
|
esac
|
||||||
for i in {0,1}; do
|
for i in {0,1}; do
|
||||||
disk="DISK$i"
|
disk="DISK$i"
|
||||||
|
|||||||
@@ -69,7 +69,7 @@ function error_handler() {
|
|||||||
local exit_code="$?"
|
local exit_code="$?"
|
||||||
local line_number="$1"
|
local line_number="$1"
|
||||||
local command="$2"
|
local command="$2"
|
||||||
post_update_to_api "failed" "$command"
|
post_update_to_api "failed" "$exit_code"
|
||||||
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
|
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
|
||||||
echo -e "\n$error_message\n"
|
echo -e "\n$error_message\n"
|
||||||
cleanup_vmid
|
cleanup_vmid
|
||||||
@@ -484,6 +484,11 @@ btrfs)
|
|||||||
FORMAT=",efitype=4m"
|
FORMAT=",efitype=4m"
|
||||||
THIN=""
|
THIN=""
|
||||||
;;
|
;;
|
||||||
|
*)
|
||||||
|
DISK_EXT=""
|
||||||
|
DISK_REF=""
|
||||||
|
DISK_IMPORT="-format raw"
|
||||||
|
;;
|
||||||
esac
|
esac
|
||||||
for i in {0,1}; do
|
for i in {0,1}; do
|
||||||
disk="DISK$i"
|
disk="DISK$i"
|
||||||
|
|||||||
@@ -68,7 +68,7 @@ function error_handler() {
|
|||||||
local exit_code="$?"
|
local exit_code="$?"
|
||||||
local line_number="$1"
|
local line_number="$1"
|
||||||
local command="$2"
|
local command="$2"
|
||||||
post_update_to_api "failed" "$command"
|
post_update_to_api "failed" "$exit_code"
|
||||||
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
|
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
|
||||||
echo -e "\n$error_message\n"
|
echo -e "\n$error_message\n"
|
||||||
cleanup_vmid
|
cleanup_vmid
|
||||||
@@ -483,6 +483,11 @@ btrfs)
|
|||||||
FORMAT=",efitype=4m"
|
FORMAT=",efitype=4m"
|
||||||
THIN=""
|
THIN=""
|
||||||
;;
|
;;
|
||||||
|
*)
|
||||||
|
DISK_EXT=""
|
||||||
|
DISK_REF=""
|
||||||
|
DISK_IMPORT="-format raw"
|
||||||
|
;;
|
||||||
esac
|
esac
|
||||||
for i in {0,1}; do
|
for i in {0,1}; do
|
||||||
disk="DISK$i"
|
disk="DISK$i"
|
||||||
|
|||||||
@@ -69,7 +69,7 @@ function error_handler() {
|
|||||||
local line_number="$1"
|
local line_number="$1"
|
||||||
local command="$2"
|
local command="$2"
|
||||||
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
|
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
|
||||||
post_update_to_api "failed" "${command}"
|
post_update_to_api "failed" "${exit_code}"
|
||||||
echo -e "\n$error_message\n"
|
echo -e "\n$error_message\n"
|
||||||
cleanup_vmid
|
cleanup_vmid
|
||||||
}
|
}
|
||||||
|
|||||||
Reference in New Issue
Block a user