mirror of
https://github.com/community-scripts/ProxmoxVE.git
synced 2026-02-17 02:33:26 +01:00
Compare commits
1 Commits
feature/vm
...
hf_alpine_
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2171f51569 |
3
.github/workflows/autolabeler.yml
generated
vendored
3
.github/workflows/autolabeler.yml
generated
vendored
@@ -100,8 +100,7 @@ jobs:
|
||||
// If it's an update script PR with json changes and a content label, skip adding website/json
|
||||
// The PR should be categorized as update script with the content label
|
||||
if (!(hasUpdateScript && hasJson && hasContentLabel)) {
|
||||
labelsToAdd.add("website");
|
||||
if (hasJson) labelsToAdd.add("json");
|
||||
labelsToAdd.add(hasJson ? "json" : "website");
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
35
CHANGELOG.md
35
CHANGELOG.md
@@ -406,47 +406,12 @@ Exercise vigilance regarding copycat or coat-tailing sites that seek to exploit
|
||||
|
||||
## 2026-02-16
|
||||
|
||||
### 🆕 New Scripts
|
||||
|
||||
- RomM ([#11987](https://github.com/community-scripts/ProxmoxVE/pull/11987))
|
||||
- LinkDing ([#11976](https://github.com/community-scripts/ProxmoxVE/pull/11976))
|
||||
|
||||
### 🚀 Updated Scripts
|
||||
|
||||
- #### 🐞 Bug Fixes
|
||||
|
||||
- Tududi: Fix sed command for DB_FILE configuration [@tremor021](https://github.com/tremor021) ([#11988](https://github.com/community-scripts/ProxmoxVE/pull/11988))
|
||||
- slskd: fix exit position [@MickLesk](https://github.com/MickLesk) ([#11963](https://github.com/community-scripts/ProxmoxVE/pull/11963))
|
||||
- cryptpad: restore config earlier and run onlyoffice upgrade [@MickLesk](https://github.com/MickLesk) ([#11964](https://github.com/community-scripts/ProxmoxVE/pull/11964))
|
||||
- jellyseerr/overseerr: Migrate update script to Seerr; prompt rerun [@MickLesk](https://github.com/MickLesk) ([#11965](https://github.com/community-scripts/ProxmoxVE/pull/11965))
|
||||
|
||||
- #### 🔧 Refactor
|
||||
|
||||
- core/vm's: ensure script state is sent on script exit [@MickLesk](https://github.com/MickLesk) ([#11991](https://github.com/community-scripts/ProxmoxVE/pull/11991))
|
||||
- Vaultwarden: export VW_VERSION as version number [@MickLesk](https://github.com/MickLesk) ([#11966](https://github.com/community-scripts/ProxmoxVE/pull/11966))
|
||||
- Zabbix: Improve zabbix-agent service detection [@MickLesk](https://github.com/MickLesk) ([#11968](https://github.com/community-scripts/ProxmoxVE/pull/11968))
|
||||
|
||||
### 💾 Core
|
||||
|
||||
- #### ✨ New Features
|
||||
|
||||
- tools.func: ensure /usr/local/bin PATH persists for pct enter sessions [@MickLesk](https://github.com/MickLesk) ([#11970](https://github.com/community-scripts/ProxmoxVE/pull/11970))
|
||||
|
||||
- #### 🔧 Refactor
|
||||
|
||||
- core: remove duplicate error handler from alpine-install.func [@MickLesk](https://github.com/MickLesk) ([#11971](https://github.com/community-scripts/ProxmoxVE/pull/11971))
|
||||
|
||||
### 📂 Github
|
||||
|
||||
- github: add "website" label if "json" changed [@MickLesk](https://github.com/MickLesk) ([#11975](https://github.com/community-scripts/ProxmoxVE/pull/11975))
|
||||
|
||||
### 🌐 Website
|
||||
|
||||
- #### 📝 Script Information
|
||||
|
||||
- Update Wishlist LXC webpage to include reverse proxy info [@summoningpixels](https://github.com/summoningpixels) ([#11973](https://github.com/community-scripts/ProxmoxVE/pull/11973))
|
||||
- Update OpenCloud LXC webpage to include services ports [@summoningpixels](https://github.com/summoningpixels) ([#11969](https://github.com/community-scripts/ProxmoxVE/pull/11969))
|
||||
|
||||
## 2026-02-15
|
||||
|
||||
### 🆕 New Scripts
|
||||
|
||||
@@ -27,7 +27,7 @@ function update_script() {
|
||||
exit
|
||||
fi
|
||||
|
||||
RELEASE=$(curl -fsSL https://teamspeak.com/en/downloads/#server | sed -n 's/.*teamspeak3-server_linux_amd64-\([0-9.]*[0-9]\).*/\1/p' | awk 'NR==1')
|
||||
set +o pipefail && RELEASE=$(curl -fsSL https://teamspeak.com/en/downloads/#server | sed -n 's/.*teamspeak3-server_linux_amd64-\([0-9.]*[0-9]\).*/\1/p' | head -1) && set -o pipefail
|
||||
|
||||
if [ "${RELEASE}" != "$(cat ~/.teamspeak-server)" ] || [ ! -f ~/.teamspeak-server ]; then
|
||||
msg_info "Updating ${APP} LXC"
|
||||
|
||||
@@ -39,20 +39,17 @@ function update_script() {
|
||||
|
||||
CLEAN_INSTALL=1 fetch_and_deploy_gh_release "cryptpad" "cryptpad/cryptpad" "tarball"
|
||||
|
||||
msg_info "Restoring configuration"
|
||||
mv /opt/config.js /opt/cryptpad/config/
|
||||
msg_ok "Configuration restored"
|
||||
|
||||
msg_info "Updating CryptaPad"
|
||||
cd /opt/cryptpad
|
||||
$STD npm ci
|
||||
$STD npm run install:components
|
||||
if [ -f "/opt/cryptpad/install-onlyoffice.sh" ]; then
|
||||
$STD bash /opt/cryptpad/install-onlyoffice.sh --accept-license
|
||||
fi
|
||||
$STD npm run build
|
||||
msg_ok "Updated CryptaPad"
|
||||
|
||||
msg_info "Restoring configuration"
|
||||
mv /opt/config.js /opt/cryptpad/config/
|
||||
msg_ok "Configuration restored"
|
||||
|
||||
msg_info "Starting Service"
|
||||
systemctl start cryptpad
|
||||
msg_ok "Started Service"
|
||||
|
||||
@@ -1,6 +0,0 @@
|
||||
___ __ ___
|
||||
/ (_)___ / /______/ (_)___ ____ _
|
||||
/ / / __ \/ //_/ __ / / __ \/ __ `/
|
||||
/ / / / / / ,< / /_/ / / / / / /_/ /
|
||||
/_/_/_/ /_/_/|_|\__,_/_/_/ /_/\__, /
|
||||
/____/
|
||||
@@ -1,6 +0,0 @@
|
||||
____ __ ___
|
||||
/ __ \____ ____ ___ / |/ /
|
||||
/ /_/ / __ \/ __ `__ \/ /|_/ /
|
||||
/ _, _/ /_/ / / / / / / / / /
|
||||
/_/ |_|\____/_/ /_/ /_/_/ /_/
|
||||
|
||||
@@ -1,79 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/build.func)
|
||||
# Copyright (c) 2021-2026 community-scripts ORG
|
||||
# Author: MickLesk (MickLesk)
|
||||
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
|
||||
# Source: https://linkding.link/
|
||||
|
||||
APP="linkding"
|
||||
var_tags="${var_tags:-bookmarks;management}"
|
||||
var_cpu="${var_cpu:-2}"
|
||||
var_ram="${var_ram:-1024}"
|
||||
var_disk="${var_disk:-4}"
|
||||
var_os="${var_os:-debian}"
|
||||
var_version="${var_version:-13}"
|
||||
var_unprivileged="${var_unprivileged:-1}"
|
||||
|
||||
header_info "$APP"
|
||||
variables
|
||||
color
|
||||
catch_errors
|
||||
|
||||
function update_script() {
|
||||
header_info
|
||||
check_container_storage
|
||||
check_container_resources
|
||||
|
||||
if [[ ! -d /opt/linkding ]]; then
|
||||
msg_error "No ${APP} Installation Found!"
|
||||
exit
|
||||
fi
|
||||
|
||||
if check_for_gh_release "linkding" "sissbruecker/linkding"; then
|
||||
msg_info "Stopping Services"
|
||||
systemctl stop nginx linkding linkding-tasks
|
||||
msg_ok "Stopped Services"
|
||||
|
||||
msg_info "Backing up Data"
|
||||
cp -r /opt/linkding/data /opt/linkding_data_backup
|
||||
cp /opt/linkding/.env /opt/linkding_env_backup
|
||||
msg_ok "Backed up Data"
|
||||
|
||||
CLEAN_INSTALL=1 fetch_and_deploy_gh_release "linkding" "sissbruecker/linkding"
|
||||
|
||||
msg_info "Restoring Data"
|
||||
cp -r /opt/linkding_data_backup/. /opt/linkding/data
|
||||
cp /opt/linkding_env_backup /opt/linkding/.env
|
||||
rm -rf /opt/linkding_data_backup /opt/linkding_env_backup
|
||||
ln -sf /usr/lib/x86_64-linux-gnu/mod_icu.so /opt/linkding/libicu.so
|
||||
msg_ok "Restored Data"
|
||||
|
||||
msg_info "Updating LinkDing"
|
||||
cd /opt/linkding
|
||||
rm -f bookmarks/settings/dev.py
|
||||
touch bookmarks/settings/custom.py
|
||||
$STD npm ci
|
||||
$STD npm run build
|
||||
$STD uv sync --no-dev --frozen
|
||||
$STD uv pip install gunicorn
|
||||
set -a && source /opt/linkding/.env && set +a
|
||||
$STD /opt/linkding/.venv/bin/python manage.py migrate
|
||||
$STD /opt/linkding/.venv/bin/python manage.py collectstatic --no-input
|
||||
msg_ok "Updated LinkDing"
|
||||
|
||||
msg_info "Starting Services"
|
||||
systemctl start nginx linkding linkding-tasks
|
||||
msg_ok "Started Services"
|
||||
msg_ok "Updated successfully!"
|
||||
fi
|
||||
exit
|
||||
}
|
||||
|
||||
start
|
||||
build_container
|
||||
description
|
||||
|
||||
msg_ok "Completed Successfully!\n"
|
||||
echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}"
|
||||
echo -e "${INFO}${YW} Access it using the following URL:${CL}"
|
||||
echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:9090${CL}"
|
||||
74
ct/romm.sh
74
ct/romm.sh
@@ -1,74 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
source <(curl -s https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/build.func)
|
||||
# Copyright (c) 2021-2026 community-scripts ORG
|
||||
# Author: MickLesk (CanbiZ) | DevelopmentCats | AlphaLawless
|
||||
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
|
||||
# Source: https://romm.app
|
||||
|
||||
APP="RomM"
|
||||
var_tags="${var_tags:-emulation}"
|
||||
var_cpu="${var_cpu:-2}"
|
||||
var_ram="${var_ram:-4096}"
|
||||
var_disk="${var_disk:-20}"
|
||||
var_os="${var_os:-debian}"
|
||||
var_version="${var_version:-13}"
|
||||
var_unprivileged="${var_unprivileged:-1}"
|
||||
|
||||
header_info "$APP"
|
||||
variables
|
||||
color
|
||||
catch_errors
|
||||
|
||||
function update_script() {
|
||||
header_info
|
||||
check_container_storage
|
||||
check_container_resources
|
||||
|
||||
if [[ ! -d /opt/romm ]]; then
|
||||
msg_error "No ${APP} Installation Found!"
|
||||
exit
|
||||
fi
|
||||
|
||||
if check_for_gh_release "romm" "rommapp/romm"; then
|
||||
msg_info "Stopping Services"
|
||||
systemctl stop romm-backend romm-worker romm-scheduler romm-watcher
|
||||
msg_ok "Stopped Services"
|
||||
|
||||
msg_info "Backing up configuration"
|
||||
cp /opt/romm/.env /opt/romm/.env.backup
|
||||
msg_ok "Backed up configuration"
|
||||
|
||||
fetch_and_deploy_gh_release "romm" "rommapp/romm" "tarball" "latest" "/opt/romm"
|
||||
|
||||
msg_info "Updating ROMM"
|
||||
cp /opt/romm/.env.backup /opt/romm/.env
|
||||
cd /opt/romm
|
||||
$STD uv sync --all-extras
|
||||
cd /opt/romm/backend
|
||||
$STD uv run alembic upgrade head
|
||||
cd /opt/romm/frontend
|
||||
$STD npm install
|
||||
$STD npm run build
|
||||
# Merge static assets into dist folder
|
||||
cp -rf /opt/romm/frontend/assets/* /opt/romm/frontend/dist/assets/
|
||||
mkdir -p /opt/romm/frontend/dist/assets/romm
|
||||
ln -sfn /var/lib/romm/resources /opt/romm/frontend/dist/assets/romm/resources
|
||||
ln -sfn /var/lib/romm/assets /opt/romm/frontend/dist/assets/romm/assets
|
||||
msg_ok "Updated ROMM"
|
||||
|
||||
msg_info "Starting Services"
|
||||
systemctl start romm-backend romm-worker romm-scheduler romm-watcher
|
||||
msg_ok "Started Services"
|
||||
msg_ok "Updated successfully"
|
||||
fi
|
||||
exit
|
||||
}
|
||||
|
||||
start
|
||||
build_container
|
||||
description
|
||||
|
||||
msg_ok "Completed successfully!\n"
|
||||
echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}"
|
||||
echo -e "${INFO}${YW} Access it using the following URL:${CL}"
|
||||
echo -e "${TAB}${GATEWAY}${BGN}http://${IP}${CL}"
|
||||
@@ -83,7 +83,6 @@ function update_script() {
|
||||
msg_ok "Started Soularr Timer"
|
||||
msg_ok "Updated Soularr successfully!"
|
||||
fi
|
||||
exit
|
||||
}
|
||||
|
||||
start
|
||||
|
||||
@@ -45,8 +45,6 @@ function update_script() {
|
||||
|
||||
msg_info "Updating VaultWarden to $VAULT (Patience)"
|
||||
cd /tmp/vaultwarden-src
|
||||
VW_VERSION="$VAULT"
|
||||
export VW_VERSION
|
||||
$STD cargo build --features "sqlite,mysql,postgresql" --release
|
||||
if [[ -f /usr/bin/vaultwarden ]]; then
|
||||
cp target/release/vaultwarden /usr/bin/
|
||||
|
||||
15
ct/zabbix.sh
15
ct/zabbix.sh
@@ -35,18 +35,15 @@ function update_script() {
|
||||
exit
|
||||
fi
|
||||
|
||||
if systemctl cat zabbix-agent2.service &>/dev/null; then
|
||||
if systemctl list-unit-files | grep -q zabbix-agent2.service; then
|
||||
AGENT_SERVICE="zabbix-agent2"
|
||||
elif systemctl cat zabbix-agent.service &>/dev/null; then
|
||||
AGENT_SERVICE="zabbix-agent"
|
||||
else
|
||||
AGENT_SERVICE=""
|
||||
msg_warn "No Zabbix Agent service found, skipping agent actions"
|
||||
AGENT_SERVICE="zabbix-agent"
|
||||
fi
|
||||
|
||||
msg_info "Stopping Services"
|
||||
systemctl stop zabbix-server
|
||||
[[ -n "$AGENT_SERVICE" ]] && systemctl stop "$AGENT_SERVICE"
|
||||
systemctl stop "$AGENT_SERVICE"
|
||||
msg_ok "Stopped Services"
|
||||
|
||||
read -rp "Choose Zabbix version [1] 7.0 LTS [2] 7.4 (Latest Stable) [3] Latest available (default: 2): " ZABBIX_CHOICE
|
||||
@@ -86,13 +83,13 @@ function update_script() {
|
||||
|
||||
$STD apt install --only-upgrade zabbix-server-pgsql zabbix-frontend-php php8.4-pgsql
|
||||
|
||||
if [[ "$AGENT_SERVICE" == "zabbix-agent2" ]]; then
|
||||
if [ "$AGENT_SERVICE" = "zabbix-agent2" ]; then
|
||||
$STD apt install --only-upgrade zabbix-agent2 zabbix-agent2-plugin-postgresql
|
||||
if [ -f /etc/zabbix/zabbix_agent2.d/plugins.d/nvidia.conf ]; then
|
||||
sed -i 's|^Plugins.NVIDIA.System.Path=.*|# Plugins.NVIDIA.System.Path=/usr/libexec/zabbix/zabbix-agent2-plugin-nvidia-gpu|' \
|
||||
/etc/zabbix/zabbix_agent2.d/plugins.d/nvidia.conf
|
||||
fi
|
||||
elif [[ "$AGENT_SERVICE" == "zabbix-agent" ]]; then
|
||||
else
|
||||
$STD apt install --only-upgrade zabbix-agent
|
||||
fi
|
||||
|
||||
@@ -108,7 +105,7 @@ function update_script() {
|
||||
|
||||
msg_info "Starting Services"
|
||||
systemctl start zabbix-server
|
||||
[[ -n "$AGENT_SERVICE" ]] && systemctl start "$AGENT_SERVICE"
|
||||
systemctl start "$AGENT_SERVICE"
|
||||
systemctl restart apache2
|
||||
msg_ok "Started Services"
|
||||
msg_ok "Updated successfully!"
|
||||
|
||||
@@ -1,301 +0,0 @@
|
||||
# VM Smart Recovery — Arbeitsanweisung
|
||||
|
||||
**Branch:** `feature/vm-smart-recovery` (basiert auf `main`)
|
||||
**Verwandt:** `feature/smart-error-recovery` (LXC, PR #11221)
|
||||
**Erstellt:** 2026-02-16
|
||||
|
||||
---
|
||||
|
||||
## 1. Ausgangslage
|
||||
|
||||
### Architektur-Vergleich LXC vs. VM
|
||||
|
||||
| Aspekt | LXC (fertig in PR #11221) | VM (offen) |
|
||||
|---|---|---|
|
||||
| Shared Code | `misc/build.func` (5577 Zeilen) | `misc/vm-core.func` (627 Zeilen) — **nur von `docker-vm.sh` genutzt** |
|
||||
| Anzahl Scripts | ~170 | 15 |
|
||||
| Architektur | Alle nutzen `build_container()` | **2 Generationen** (s.u.) |
|
||||
| Software-Install | `pct exec` → Install-Script im Container | Variiert: `virt-customize`, Cloud-Init, `qm sendkey`, oder gar nichts |
|
||||
| Telemetrie | `post_to_api()` + `post_update_to_api()` | Identisch — alle sourcen `misc/api.func` |
|
||||
| Error Handling | Zentral in `build.func` Traps | Jedes Script hat eigenen `error_handler()` |
|
||||
| Recovery | Smart-Menü mit 6 dynamischen Optionen | **Keine** — bei Fehler wird VM sofort zerstört (`cleanup_vmid`) |
|
||||
|
||||
### Zwei Generationen von VM-Scripts
|
||||
|
||||
**Generation 1 — Legacy (monolithisch):** `haos-vm.sh`, `debian-vm.sh`, `openwrt-vm.sh` und 11 weitere.
|
||||
- Selbstständige 500–700-Zeilen-Scripts
|
||||
- Definieren **alle** Utility-Funktionen inline (Colors, Icons, `msg_info`/`msg_ok`, `error_handler`, `cleanup`, etc.)
|
||||
- Sourcen nur `misc/api.func` für Telemetrie
|
||||
|
||||
**Generation 2 — Modern (modular):** Ausschließlich `docker-vm.sh`.
|
||||
- Sourced drei Shared Libraries:
|
||||
- `misc/api.func` — Telemetrie
|
||||
- `misc/vm-core.func` — Shared Utilities (627 Zeilen)
|
||||
- `misc/cloud-init.func` — Cloud-Init Konfiguration (709 Zeilen)
|
||||
- Ruft `load_functions` aus `vm-core.func` auf
|
||||
|
||||
### Telemetrie-Daten (Top VM-Failures)
|
||||
|
||||
| Script | Anteil an VM-Failures |
|
||||
|---|---|
|
||||
| `docker-vm.sh` | 30.1 % |
|
||||
| `openwrt-vm.sh` | 25.9 % |
|
||||
| `debian-13-vm.sh` | 9.6 % |
|
||||
|
||||
---
|
||||
|
||||
## 2. Scope & Abgrenzung
|
||||
|
||||
### In Scope
|
||||
|
||||
- Smart Recovery für VM-Erstellungsfehler (Retry-Menü analog LXC)
|
||||
- Fehlererkennung: Download, Disk-Import, virt-customize, Ressourcen-Konflikte, Netzwerk
|
||||
- Exit-Code-Mapping (bereits in `api.func` vorhanden, wird geteilt)
|
||||
|
||||
### Out of Scope (bewusst)
|
||||
|
||||
- **Migration aller Legacy-Scripts auf `vm-core.func`** → eigenes Refactoring-Ticket
|
||||
- **In-VM-Repair** → VMs haben kein `pct exec`-Äquivalent
|
||||
- **`qm sendkey`-Recovery** (OpenWrt) → prinzipbedingt nicht retryable
|
||||
- **APT/DPKG-Repair innerhalb der VM** → kein Shell-Zugang während Install
|
||||
|
||||
---
|
||||
|
||||
## 3. Software-Installationsmethoden pro Script
|
||||
|
||||
| Script | Methode | Beschreibung |
|
||||
|---|---|---|
|
||||
| `docker-vm.sh` | `virt-customize` | Offline Image-Manipulation (libguestfs) |
|
||||
| `docker-vm.sh` (Fallback) | systemd First-Boot-Service | Script läuft in VM beim ersten Boot |
|
||||
| `haos-vm.sh` | Keine | Pre-built Appliance (qcow2) |
|
||||
| `debian-vm.sh` / `debian-13-vm.sh` | Keine / Cloud-Init | Basis Cloud-Image |
|
||||
| `openwrt-vm.sh` | `qm sendkey` | Virtuelle Tastatur-Automation |
|
||||
| `opnsense-vm.sh` | `qm sendkey` + Bootstrap | Virtuelle Tastatur |
|
||||
| `ubuntu-*-vm.sh` | Cloud-Init | User konfiguriert vor Start |
|
||||
| `owncloud-vm.sh` | `virt-customize` | Wie docker-vm.sh |
|
||||
|
||||
---
|
||||
|
||||
## 4. Dateien & Änderungen
|
||||
|
||||
### 4.1 `misc/vm-core.func` — Zentrale Recovery-Logik
|
||||
|
||||
#### Neue Funktion: `vm_error_handler_with_recovery()`
|
||||
|
||||
```
|
||||
Ablauf:
|
||||
├── Exit-Code erfassen ($? als ERSTES — kein ensure_log_on_host davor!)
|
||||
├── Fehlerklassifikation:
|
||||
│ ├── Download-Fehler (curl exit 6/7/22/28/35/52/56)
|
||||
│ ├── Disk-Import-Fehler (qm importdisk, pvesm alloc)
|
||||
│ ├── virt-customize-Fehler (libguestfs)
|
||||
│ ├── Ressourcen-Konflikt (VMID exists, Storage full)
|
||||
│ └── Netzwerk-Fehler (DNS, Timeout)
|
||||
├── Smart Recovery Menü:
|
||||
│ ├── [1] Retry (VM zerstören & neu erstellen)
|
||||
│ ├── [2] Retry mit anderen Einstellungen (RAM/CPU/Disk ändern)
|
||||
│ ├── [3] VM behalten (nicht zerstören, manuell debuggen)
|
||||
│ ├── [4] Abbrechen (VM zerstören, Exit)
|
||||
│ └── Dynamische Optionen je nach Fehlertyp:
|
||||
│ ├── Download-Fehler → "Cache löschen & neu downloaden"
|
||||
│ └── Ressourcen-Konflikt → "Andere VMID wählen"
|
||||
└── Bei Retry: cleanup_vmid() + create-Funktion erneut aufrufen
|
||||
```
|
||||
|
||||
#### Neue Helper-Funktionen (Fehlererkennung):
|
||||
|
||||
```bash
|
||||
is_download_error() # curl exit codes + HTTP 404/500
|
||||
is_disk_import_error() # qm importdisk stderr patterns
|
||||
is_virt_customize_err() # libguestfs error patterns
|
||||
is_vmid_conflict() # "already exists" in stderr
|
||||
is_storage_full() # "not enough space" patterns
|
||||
```
|
||||
|
||||
#### Log-Erfassung für VMs
|
||||
|
||||
Anders als LXC (wo `/root/.install*.log` im Container liegt) müssen VM-Fehler direkt aus stderr der `qm`/`virt-customize` Befehle erfasst werden:
|
||||
|
||||
```bash
|
||||
# Jeder kritische Befehl mit stderr-Capture:
|
||||
VM_ERROR_LOG="/tmp/vm-install-${VMID}.log"
|
||||
qm importdisk "$VMID" "$IMAGE" "$STORAGE" 2>> "$VM_ERROR_LOG"
|
||||
virt-customize -a "$IMAGE" --install docker.io 2>> "$VM_ERROR_LOG"
|
||||
```
|
||||
|
||||
### 4.2 Retry-Wrapper-Architektur
|
||||
|
||||
Da VMs kein zentrales `build_container()` haben, gibt es zwei Ansätze:
|
||||
|
||||
#### Option A: Wrapper in `vm-core.func` (empfohlen für Gen-2 Scripts)
|
||||
|
||||
```bash
|
||||
vm_create_with_recovery() {
|
||||
local create_fn="$1" # VM-spezifische Erstellungsfunktion
|
||||
local max_retries=2
|
||||
local attempt=0
|
||||
|
||||
while true; do
|
||||
if "$create_fn"; then
|
||||
return 0 # Erfolg
|
||||
fi
|
||||
((attempt++))
|
||||
if ((attempt >= max_retries)); then
|
||||
# Max retries erreicht → nur noch "behalten" oder "abbrechen"
|
||||
fi
|
||||
vm_show_recovery_menu "$?" "$attempt"
|
||||
# Menü-Auswahl verarbeiten...
|
||||
done
|
||||
}
|
||||
```
|
||||
|
||||
#### Option B: Inline-Recovery in Legacy-Scripts
|
||||
|
||||
Für die 14 Legacy-Scripts (bis Migration auf `vm-core.func`):
|
||||
- Minimaler Patch: `error_handler()` um Recovery-Prompt erweitern
|
||||
- `cleanup_vmid` **nicht** sofort aufrufen, sondern erst nach User-Entscheidung
|
||||
|
||||
**Empfehlung:** Zunächst **nur `docker-vm.sh`** (30.1 % der Failures) mit Option A umsetzen. Legacy-Scripts als Phase 2 nach Migration.
|
||||
|
||||
### 4.3 `misc/api.func` — Keine Änderungen nötig
|
||||
|
||||
Exit-Code-Mapping (`explain_exit_code()`) und `categorize_error()` sind bereits universal (LXC + VM). Nach Merge von PR #11221 stehen 70+ Exit-Codes zur Verfügung. Falls dieser Branch vorher fertig ist, können die Codes aus `feature/smart-error-recovery` cherry-picked werden.
|
||||
|
||||
---
|
||||
|
||||
## 5. Wichtige Unterschiede LXC vs. VM Recovery
|
||||
|
||||
| LXC | VM |
|
||||
|---|---|
|
||||
| APT/DPKG In-Place-Repair im Container | **Nicht möglich** — kein Shell-Zugang während Install |
|
||||
| OOM-Retry mit x2 Ressourcen | **Funktioniert** — `qm set` kann RAM/CPU nachträglich ändern |
|
||||
| DNS-Override im Container (`/etc/resolv.conf`) | **Nicht anwendbar** — VM hat eigenes Netzwerk |
|
||||
| Container bleibt erhalten bei Repair | VM muss bei Retry **komplett neu erstellt** werden |
|
||||
| `build_container()` als zentrale Retry-Schleife | **Neue Wrapper-Funktion nötig** (`vm_create_with_recovery`) |
|
||||
| `pct exec` für In-Container-Zugriff | Kein Äquivalent (qemu-guest-agent nur wenn VM läuft + Agent installiert) |
|
||||
|
||||
---
|
||||
|
||||
## 6. Technische Fallstricke
|
||||
|
||||
### 6.1 VMID-Cleanup vor Retry
|
||||
|
||||
`cleanup_vmid` muss vollständig aufräumen:
|
||||
- `qm stop "$VMID" --skiplock` (falls Running)
|
||||
- `qm destroy "$VMID" --destroy-unreferenced-disks --purge`
|
||||
- Einige Scripts erzeugen zusätzliche Disks (`efidisk0`, `cloudinit`), die extra entfernt werden müssen
|
||||
|
||||
### 6.2 Image-Caching
|
||||
|
||||
`docker-vm.sh` cached Images in `/var/lib/vz/template/cache/`. Bei Download-Retry:
|
||||
- **Behalten**, wenn Download vollständig war (md5/sha-Check)
|
||||
- **Löschen**, wenn Corruption vermutet (curl-Fehler, xz-Validierung fehlgeschlagen)
|
||||
|
||||
### 6.3 Cloud-Init-State
|
||||
|
||||
Wenn Cloud-Init teilweise konfiguriert wurde, muss bei Retry der gesamte State zurückgesetzt werden:
|
||||
```bash
|
||||
qm set "$VMID" --delete cicustom
|
||||
qm set "$VMID" --delete ciuser
|
||||
qm set "$VMID" --delete cipassword
|
||||
```
|
||||
|
||||
### 6.4 Legacy-Scripts (14 Stück)
|
||||
|
||||
- Definieren `error_handler()` inline und sourcen nur `api.func`
|
||||
- Um dort Recovery einzubauen, entweder:
|
||||
- **Jedes Script einzeln patchen** (hohes Risiko, viel Duplikat)
|
||||
- **Erst Migration auf `vm-core.func`** (sauberer, aber größerer Scope)
|
||||
- **Empfehlung:** Migration priorisieren, Recovery danach trivial
|
||||
|
||||
### 6.5 `virt-customize` Fallback
|
||||
|
||||
`docker-vm.sh` hat bereits einen First-Boot-Fallback für Docker-Installation. Wenn `virt-customize` fehlschlägt:
|
||||
- Recovery sollte dies als **"soft failure"** behandeln
|
||||
- Aktiv den Fallback vorschlagen statt blindes Retry
|
||||
|
||||
### 6.6 Kein `pct exec`-Äquivalent
|
||||
|
||||
- Man kann **nicht "in die VM hinein reparieren"** wie bei LXC
|
||||
- `qm guest exec` existiert zwar (mit qemu-guest-agent), funktioniert aber nur wenn:
|
||||
- Die VM läuft
|
||||
- Der Guest Agent installiert ist
|
||||
- Genau das ist typischerweise der Punkt, an dem der Install fehlschlägt
|
||||
|
||||
---
|
||||
|
||||
## 7. Implementierungsreihenfolge
|
||||
|
||||
| Phase | Task | Dateien | Impact |
|
||||
|---|---|---|---|
|
||||
| **Phase 1** | `vm_error_handler_with_recovery()` Grundgerüst | `misc/vm-core.func` | Basis für alles |
|
||||
| **Phase 2** | `docker-vm.sh`: Recovery integrieren | `vm/docker-vm.sh` | 30.1 % der Failures |
|
||||
| **Phase 3** | Fehlererkennung (Download, Import, virt-customize) | `misc/vm-core.func` | Intelligente dynamische Menüoptionen |
|
||||
| **Phase 4** | `haos-vm.sh`: Recovery integrieren (Download-Retry) | `vm/haos-vm.sh` | Download-Corruption bereits teilweise vorhanden |
|
||||
| **Phase 5** | `debian-13-vm.sh` + `ubuntu-*-vm.sh` | `vm/debian-13-vm.sh`, etc. | Cloud-Image-Scripts |
|
||||
| **Phase 6** | `openwrt-vm.sh` (limitiert — nur Download/Import-Retry) | `vm/openwrt-vm.sh` | `sendkey`-Teil nicht retryable |
|
||||
|
||||
---
|
||||
|
||||
## 8. Test-Matrix
|
||||
|
||||
| Szenario | Erwartetes Verhalten |
|
||||
|---|---|
|
||||
| Download-Fehler (curl 6/7/28) | Menü: "Retry Download" + "Cache löschen" |
|
||||
| Disk-Import-Fehler | Menü: "Retry" + "Anderen Storage wählen" |
|
||||
| VMID-Konflikt | Menü: "Andere VMID" + "Bestehende VM zerstören" |
|
||||
| virt-customize-Fehler (docker-vm) | Menü: "Retry" + "First-Boot-Fallback nutzen" |
|
||||
| Storage voll | Menü: "Anderen Storage wählen" + "Disk verkleinern" |
|
||||
| Netzwerk-Timeout | Menü: "Retry" + "Abbrechen" |
|
||||
| 2× Retry erreicht | Nur noch "VM behalten" oder "Abbrechen" |
|
||||
| User wählt "VM behalten" | VM nicht zerstören, manuellen Zugang erklären |
|
||||
|
||||
---
|
||||
|
||||
## 9. Branch-Workflow
|
||||
|
||||
```bash
|
||||
# Neuen Branch erstellen (bereits geschehen):
|
||||
git checkout main
|
||||
git pull origin main
|
||||
git checkout -b feature/vm-smart-recovery
|
||||
|
||||
# Arbeit in Phasen committen:
|
||||
# Phase 1: git commit -m "feat(vm): add vm_error_handler_with_recovery to vm-core.func"
|
||||
# Phase 2: git commit -m "feat(vm): integrate smart recovery into docker-vm.sh"
|
||||
# etc.
|
||||
|
||||
# PR gegen main erstellen (NICHT gegen feature/smart-error-recovery)
|
||||
```
|
||||
|
||||
### Abhängigkeit zu PR #11221
|
||||
|
||||
Die `api.func`-Änderungen aus `feature/smart-error-recovery` (70+ Exit-Codes, `categorize_error()`) werden nach Merge von PR #11221 automatisch in `main` verfügbar sein.
|
||||
|
||||
- Falls VM-Branch **nach** PR #11221 Merge gestartet wird → alles da
|
||||
- Falls VM-Branch **vorher** fertig ist → `api.func` Codes aus `feature/smart-error-recovery` cherry-picken
|
||||
|
||||
---
|
||||
|
||||
## 10. Referenz: Exit-0-Bug (nur LXC, gefixt)
|
||||
|
||||
> Dieser Bug betrifft **nur LXC** (`misc/build.func`), nicht die VM-Scripts.
|
||||
|
||||
**Root Cause:** Der ERR-Trap in `build.func` rief `ensure_log_on_host` vor `post_update_to_api` auf. Da `ensure_log_on_host` mit Exit 0 returned, wurde `$?` auf 0 zurückgesetzt → Telemetrie meldete "failed/0" statt dem echten Exit-Code (~15-20 Records/Tag).
|
||||
|
||||
**Fix (PR #11221, Commit `2d7e707a0`):**
|
||||
```bash
|
||||
# Vorher (Bug):
|
||||
trap 'ensure_log_on_host; post_update_to_api "failed" "$?"' ERR
|
||||
|
||||
# Nachher (Fix):
|
||||
trap '_ERR_CODE=$?; ensure_log_on_host; post_update_to_api "failed" "$_ERR_CODE"' ERR
|
||||
```
|
||||
|
||||
**VM-Scripts nicht betroffen:** Diese erfassen `$?` korrekt als erste Zeile in `error_handler()`:
|
||||
```bash
|
||||
function error_handler() {
|
||||
local exit_code="$?" # Erste Zeile → korrekt
|
||||
...
|
||||
}
|
||||
```
|
||||
@@ -1,5 +1,5 @@
|
||||
{
|
||||
"generated": "2026-02-16T12:14:16Z",
|
||||
"generated": "2026-02-16T06:25:10Z",
|
||||
"versions": [
|
||||
{
|
||||
"slug": "2fauth",
|
||||
@@ -557,9 +557,9 @@
|
||||
{
|
||||
"slug": "immich-public-proxy",
|
||||
"repo": "alangrainger/immich-public-proxy",
|
||||
"version": "v1.15.2",
|
||||
"version": "v1.15.1",
|
||||
"pinned": false,
|
||||
"date": "2026-02-16T08:54:59Z"
|
||||
"date": "2026-01-26T08:04:27Z"
|
||||
},
|
||||
{
|
||||
"slug": "inspircd",
|
||||
@@ -585,9 +585,9 @@
|
||||
{
|
||||
"slug": "jackett",
|
||||
"repo": "Jackett/Jackett",
|
||||
"version": "v0.24.1127",
|
||||
"version": "v0.24.1124",
|
||||
"pinned": false,
|
||||
"date": "2026-02-16T08:43:41Z"
|
||||
"date": "2026-02-15T05:54:22Z"
|
||||
},
|
||||
{
|
||||
"slug": "jellystat",
|
||||
@@ -729,13 +729,6 @@
|
||||
"pinned": false,
|
||||
"date": "2025-11-16T22:40:18Z"
|
||||
},
|
||||
{
|
||||
"slug": "linkding",
|
||||
"repo": "sissbruecker/linkding",
|
||||
"version": "v1.45.0",
|
||||
"pinned": false,
|
||||
"date": "2026-01-06T20:31:04Z"
|
||||
},
|
||||
{
|
||||
"slug": "linkstack",
|
||||
"repo": "linkstackorg/linkstack",
|
||||
@@ -970,9 +963,9 @@
|
||||
{
|
||||
"slug": "ots",
|
||||
"repo": "Luzifer/ots",
|
||||
"version": "v1.21.1",
|
||||
"version": "v1.21.0",
|
||||
"pinned": false,
|
||||
"date": "2026-02-16T12:12:23Z"
|
||||
"date": "2026-01-19T23:21:29Z"
|
||||
},
|
||||
{
|
||||
"slug": "outline",
|
||||
@@ -1019,9 +1012,9 @@
|
||||
{
|
||||
"slug": "paperless-gpt",
|
||||
"repo": "icereed/paperless-gpt",
|
||||
"version": "v0.25.0",
|
||||
"version": "v0.24.0",
|
||||
"pinned": false,
|
||||
"date": "2026-02-16T08:31:48Z"
|
||||
"date": "2026-01-14T21:28:09Z"
|
||||
},
|
||||
{
|
||||
"slug": "paperless-ngx",
|
||||
@@ -1275,13 +1268,6 @@
|
||||
"pinned": false,
|
||||
"date": "2025-03-28T13:00:23Z"
|
||||
},
|
||||
{
|
||||
"slug": "romm",
|
||||
"repo": "RetroAchievements/RALibretro",
|
||||
"version": "1.8.2",
|
||||
"pinned": false,
|
||||
"date": "2026-01-23T17:03:31Z"
|
||||
},
|
||||
{
|
||||
"slug": "rustdeskserver",
|
||||
"repo": "rustdesk/rustdesk-server",
|
||||
@@ -1334,9 +1320,9 @@
|
||||
{
|
||||
"slug": "semaphore",
|
||||
"repo": "semaphoreui/semaphore",
|
||||
"version": "v2.17.2",
|
||||
"version": "v2.17.0",
|
||||
"pinned": false,
|
||||
"date": "2026-02-16T10:27:40Z"
|
||||
"date": "2026-02-13T21:08:30Z"
|
||||
},
|
||||
{
|
||||
"slug": "shelfmark",
|
||||
@@ -1740,9 +1726,9 @@
|
||||
{
|
||||
"slug": "zitadel",
|
||||
"repo": "zitadel/zitadel",
|
||||
"version": "v4.11.0",
|
||||
"version": "v4.10.1",
|
||||
"pinned": false,
|
||||
"date": "2026-02-16T09:48:38Z"
|
||||
"date": "2026-01-30T06:52:53Z"
|
||||
},
|
||||
{
|
||||
"slug": "zoraxy",
|
||||
|
||||
@@ -1,40 +0,0 @@
|
||||
{
|
||||
"name": "linkding",
|
||||
"slug": "linkding",
|
||||
"categories": [
|
||||
12
|
||||
],
|
||||
"date_created": "2026-02-16",
|
||||
"type": "ct",
|
||||
"updateable": true,
|
||||
"privileged": false,
|
||||
"interface_port": 9090,
|
||||
"documentation": "https://linkding.link/",
|
||||
"website": "https://linkding.link/",
|
||||
"logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/linkding.webp",
|
||||
"config_path": "/opt/linkding/.env",
|
||||
"description": "linkding is a self-hosted bookmark manager that is designed to be minimal, fast, and easy to set up. It features a clean UI, tag-based organization, bulk editing, Markdown notes, read it later functionality, sharing, REST API, and browser extensions for Firefox and Chrome.",
|
||||
"install_methods": [
|
||||
{
|
||||
"type": "default",
|
||||
"script": "ct/linkding.sh",
|
||||
"resources": {
|
||||
"cpu": 2,
|
||||
"ram": 1024,
|
||||
"hdd": 4,
|
||||
"os": "Debian",
|
||||
"version": "13"
|
||||
}
|
||||
}
|
||||
],
|
||||
"default_credentials": {
|
||||
"username": "admin",
|
||||
"password": null
|
||||
},
|
||||
"notes": [
|
||||
{
|
||||
"text": "Admin credentials are stored in /opt/linkding/.env",
|
||||
"type": "info"
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -33,7 +33,7 @@
|
||||
},
|
||||
"notes": [
|
||||
{
|
||||
"text": "Valid TLS certificates and fully-qualified domain names behind a reverse proxy (Caddy) for 3 services - OpenCloud (port: 9200), Collabora (port: 9980), and WOPI (port: 9300) are **REQUIRED**",
|
||||
"text": "Valid TLS certificates and fully-qualified domain names behind a reverse proxy (Caddy) for 3 services - OpenCloud, Collabora, and WOPI are **REQUIRED**",
|
||||
"type": "warning"
|
||||
},
|
||||
{
|
||||
|
||||
@@ -1,35 +0,0 @@
|
||||
{
|
||||
"name": "RomM",
|
||||
"slug": "romm",
|
||||
"categories": [
|
||||
24
|
||||
],
|
||||
"date_created": "2026-02-16",
|
||||
"type": "ct",
|
||||
"updateable": true,
|
||||
"privileged": false,
|
||||
"interface_port": 80,
|
||||
"documentation": "https://docs.romm.app/latest/",
|
||||
"website": "https://romm.app/",
|
||||
"config_path": "/opt/romm/.env",
|
||||
"logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/romm.webp",
|
||||
"description": "RomM (ROM Manager) allows you to scan, enrich, browse and play your game collection with a clean and responsive interface. Support for multiple platforms, various naming schemes, and custom tags.",
|
||||
"install_methods": [
|
||||
{
|
||||
"type": "default",
|
||||
"script": "ct/romm.sh",
|
||||
"resources": {
|
||||
"cpu": 2,
|
||||
"ram": 4096,
|
||||
"hdd": 20,
|
||||
"os": "debian",
|
||||
"version": "13"
|
||||
}
|
||||
}
|
||||
],
|
||||
"default_credentials": {
|
||||
"username": null,
|
||||
"password": null
|
||||
},
|
||||
"notes": []
|
||||
}
|
||||
@@ -1,40 +1,35 @@
|
||||
{
|
||||
"name": "Wishlist",
|
||||
"slug": "wishlist",
|
||||
"categories": [
|
||||
12
|
||||
],
|
||||
"date_created": "2026-02-04",
|
||||
"type": "ct",
|
||||
"updateable": true,
|
||||
"privileged": false,
|
||||
"interface_port": 3280,
|
||||
"documentation": "https://github.com/cmintey/wishlist/blob/main/README.md#getting-started",
|
||||
"website": "https://github.com/cmintey/wishlist",
|
||||
"logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/cmintey-wishlist.webp",
|
||||
"config_path": "/opt/wishlist/.env",
|
||||
"description": "Wishlist is a self-hosted wishlist application that you can share with your friends and family. You no longer have to wonder what to get your family for the holidays, simply check their wishlist and claim any available item!",
|
||||
"install_methods": [
|
||||
{
|
||||
"type": "default",
|
||||
"script": "ct/wishlist.sh",
|
||||
"resources": {
|
||||
"cpu": 2,
|
||||
"ram": 2048,
|
||||
"hdd": 5,
|
||||
"os": "Debian",
|
||||
"version": "13"
|
||||
}
|
||||
}
|
||||
],
|
||||
"default_credentials": {
|
||||
"username": null,
|
||||
"password": null
|
||||
},
|
||||
"notes": [
|
||||
{
|
||||
"text": "When using a reverse proxy with this script, please edit the`ORIGIN` value in `/opt/wishlist/.env` to point to your new URL, otherwise creating an admin account or logging in will not work.",
|
||||
"type": "info"
|
||||
}
|
||||
]
|
||||
"name": "Wishlist",
|
||||
"slug": "wishlist",
|
||||
"categories": [
|
||||
12
|
||||
],
|
||||
"date_created": "2026-02-04",
|
||||
"type": "ct",
|
||||
"updateable": true,
|
||||
"privileged": false,
|
||||
"interface_port": 3280,
|
||||
"documentation": "https://github.com/cmintey/wishlist/blob/main/README.md#getting-started",
|
||||
"config_path": "/opt/wishlist/.env",
|
||||
"website": "https://github.com/cmintey/wishlist",
|
||||
"logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/cmintey-wishlist.webp",
|
||||
"description": "Wishlist is a self-hosted wishlist application that you can share with your friends and family. You no longer have to wonder what to get your family for the holidays, simply check their wishlist and claim any available item!",
|
||||
"install_methods": [
|
||||
{
|
||||
"type": "default",
|
||||
"script": "ct/wishlist.sh",
|
||||
"resources": {
|
||||
"cpu": 2,
|
||||
"ram": 2048,
|
||||
"hdd": 5,
|
||||
"os": "Debian",
|
||||
"version": "13"
|
||||
}
|
||||
}
|
||||
],
|
||||
"default_credentials": {
|
||||
"username": null,
|
||||
"password": null
|
||||
},
|
||||
"notes": []
|
||||
}
|
||||
|
||||
@@ -20,7 +20,7 @@ $STD apk add --no-cache \
|
||||
libc6-compat
|
||||
msg_ok "Installed dependencies"
|
||||
|
||||
RELEASE=$(curl -fsSL https://teamspeak.com/en/downloads/#server | sed -n 's/.*teamspeak3-server_linux_amd64-\([0-9.]*[0-9]\).*/\1/p' | awk 'NR==1')
|
||||
RELEASE=$(curl -fsSL https://teamspeak.com/en/downloads/#server | sed -n 's/.*teamspeak3-server_linux_amd64-\([0-9.]*[0-9]\).*/\1/p' | head -1)
|
||||
msg_info "Installing Teamspeak Server v${RELEASE}"
|
||||
mkdir -p /opt/teamspeak-server
|
||||
cd /opt/teamspeak-server
|
||||
|
||||
@@ -26,13 +26,13 @@ msg_info "Setup CryptPad"
|
||||
cd /opt/cryptpad
|
||||
$STD npm ci
|
||||
$STD npm run install:components
|
||||
if [[ "$onlyoffice" =~ ^[Yy]$ ]]; then
|
||||
$STD bash -c "./install-onlyoffice.sh --accept-license"
|
||||
fi
|
||||
$STD npm run build
|
||||
cp config/config.example.js config/config.js
|
||||
sed -i "51s/localhost/${LOCAL_IP}/g" /opt/cryptpad/config/config.js
|
||||
sed -i "80s#//httpAddress: 'localhost'#httpAddress: '0.0.0.0'#g" /opt/cryptpad/config/config.js
|
||||
$STD npm run build
|
||||
if [[ "$onlyoffice" =~ ^[Yy]$ ]]; then
|
||||
$STD bash -c "./install-onlyoffice.sh --accept-license"
|
||||
fi
|
||||
msg_ok "Setup CryptPad"
|
||||
|
||||
msg_info "Creating Service"
|
||||
|
||||
@@ -1,126 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright (c) 2021-2026 community-scripts ORG
|
||||
# Author: MickLesk (MickLesk)
|
||||
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
|
||||
# Source: https://linkding.link/
|
||||
|
||||
source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
|
||||
color
|
||||
verb_ip6
|
||||
catch_errors
|
||||
setting_up_container
|
||||
network_check
|
||||
update_os
|
||||
|
||||
msg_info "Installing Dependencies"
|
||||
$STD apt install -y \
|
||||
build-essential \
|
||||
pkg-config \
|
||||
python3-dev \
|
||||
nginx \
|
||||
libpq-dev \
|
||||
libicu-dev \
|
||||
libsqlite3-dev \
|
||||
libffi-dev
|
||||
msg_ok "Installed Dependencies"
|
||||
|
||||
NODE_VERSION="22" setup_nodejs
|
||||
setup_uv
|
||||
fetch_and_deploy_gh_release "linkding" "sissbruecker/linkding"
|
||||
|
||||
msg_info "Building Frontend"
|
||||
cd /opt/linkding
|
||||
$STD npm ci
|
||||
$STD npm run build
|
||||
ln -sf /usr/lib/x86_64-linux-gnu/mod_icu.so /opt/linkding/libicu.so
|
||||
msg_ok "Built Frontend"
|
||||
|
||||
msg_info "Setting up LinkDing"
|
||||
rm -f bookmarks/settings/dev.py
|
||||
touch bookmarks/settings/custom.py
|
||||
$STD uv sync --no-dev --frozen
|
||||
$STD uv pip install gunicorn
|
||||
mkdir -p data/{favicons,previews,assets}
|
||||
ADMIN_PASS=$(openssl rand -base64 18 | tr -dc 'a-zA-Z0-9' | cut -c1-13)
|
||||
cat <<EOF >/opt/linkding/.env
|
||||
LD_SUPERUSER_NAME=admin
|
||||
LD_SUPERUSER_PASSWORD=${ADMIN_PASS}
|
||||
LD_CSRF_TRUSTED_ORIGINS=http://${LOCAL_IP}:9090
|
||||
EOF
|
||||
set -a && source /opt/linkding/.env && set +a
|
||||
$STD /opt/linkding/.venv/bin/python manage.py generate_secret_key
|
||||
$STD /opt/linkding/.venv/bin/python manage.py migrate
|
||||
$STD /opt/linkding/.venv/bin/python manage.py enable_wal
|
||||
$STD /opt/linkding/.venv/bin/python manage.py create_initial_superuser
|
||||
$STD /opt/linkding/.venv/bin/python manage.py collectstatic --no-input
|
||||
msg_ok "Set up LinkDing"
|
||||
|
||||
msg_info "Creating Services"
|
||||
cat <<EOF >/etc/systemd/system/linkding.service
|
||||
[Unit]
|
||||
Description=linkding Bookmark Manager
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
User=root
|
||||
WorkingDirectory=/opt/linkding
|
||||
EnvironmentFile=/opt/linkding/.env
|
||||
ExecStart=/opt/linkding/.venv/bin/gunicorn \
|
||||
--bind 127.0.0.1:8000 \
|
||||
--workers 3 \
|
||||
--threads 2 \
|
||||
--timeout 120 \
|
||||
bookmarks.wsgi:application
|
||||
Restart=on-failure
|
||||
RestartSec=5
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
EOF
|
||||
cat <<EOF >/etc/systemd/system/linkding-tasks.service
|
||||
[Unit]
|
||||
Description=linkding Background Tasks
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
User=root
|
||||
WorkingDirectory=/opt/linkding
|
||||
EnvironmentFile=/opt/linkding/.env
|
||||
ExecStart=/opt/linkding/.venv/bin/python manage.py run_huey
|
||||
Restart=on-failure
|
||||
RestartSec=5
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
EOF
|
||||
cat <<'EOF' >/etc/nginx/sites-available/linkding
|
||||
server {
|
||||
listen 9090;
|
||||
server_name _;
|
||||
|
||||
client_max_body_size 20M;
|
||||
|
||||
location /static/ {
|
||||
alias /opt/linkding/static/;
|
||||
expires 30d;
|
||||
}
|
||||
|
||||
location / {
|
||||
proxy_pass http://127.0.0.1:8000;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_redirect off;
|
||||
}
|
||||
}
|
||||
EOF
|
||||
$STD rm -f /etc/nginx/sites-enabled/default
|
||||
$STD ln -sf /etc/nginx/sites-available/linkding /etc/nginx/sites-enabled/linkding
|
||||
systemctl enable -q --now nginx linkding linkding-tasks
|
||||
systemctl restart nginx
|
||||
msg_ok "Created Services"
|
||||
|
||||
motd_ssh
|
||||
customize
|
||||
cleanup_lxc
|
||||
@@ -1,344 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright (c) 2021-2026 community-scripts ORG
|
||||
# Author: MickLesk (CanbiZ) | DevelopmentCats | AlphaLawless
|
||||
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
|
||||
# Source: https://romm.app
|
||||
|
||||
source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
|
||||
color
|
||||
verb_ip6
|
||||
catch_errors
|
||||
setting_up_container
|
||||
network_check
|
||||
update_os
|
||||
|
||||
msg_info "Installing Dependencies"
|
||||
$STD apt install -y \
|
||||
acl \
|
||||
git \
|
||||
build-essential \
|
||||
libssl-dev \
|
||||
libffi-dev \
|
||||
libmagic-dev \
|
||||
python3-dev \
|
||||
python3-pip \
|
||||
python3-venv \
|
||||
libmariadb3 \
|
||||
libmariadb-dev \
|
||||
libpq-dev \
|
||||
libbz2-dev \
|
||||
libreadline-dev \
|
||||
libsqlite3-dev \
|
||||
zlib1g-dev \
|
||||
liblzma-dev \
|
||||
libncurses5-dev \
|
||||
libncursesw5-dev \
|
||||
redis-server \
|
||||
redis-tools \
|
||||
p7zip-full \
|
||||
tzdata \
|
||||
nginx
|
||||
msg_ok "Installed Dependencies"
|
||||
|
||||
PYTHON_VERSION="3.13" setup_uv
|
||||
NODE_VERSION="22" setup_nodejs
|
||||
setup_mariadb
|
||||
MARIADB_DB_NAME="romm" MARIADB_DB_USER="romm" setup_mariadb_db
|
||||
|
||||
msg_info "Creating directories"
|
||||
mkdir -p /opt/romm \
|
||||
/var/lib/romm/config \
|
||||
/var/lib/romm/resources \
|
||||
/var/lib/romm/assets/{saves,states,screenshots} \
|
||||
/var/lib/romm/library/roms \
|
||||
/var/lib/romm/library/bios
|
||||
msg_ok "Created directories"
|
||||
|
||||
msg_info "Creating configuration file"
|
||||
cat <<'EOF' >/var/lib/romm/config/config.yml
|
||||
# RomM Configuration File
|
||||
# Documentation: https://docs.romm.app/latest/Getting-Started/Configuration-File/
|
||||
# Only uncomment the lines you want to use/modify
|
||||
|
||||
# exclude:
|
||||
# platforms:
|
||||
# - excluded_folder_a
|
||||
# roms:
|
||||
# single_file:
|
||||
# extensions:
|
||||
# - xml
|
||||
# - txt
|
||||
# names:
|
||||
# - '._*'
|
||||
# - '*.nfo'
|
||||
# multi_file:
|
||||
# names:
|
||||
# - downloaded_media
|
||||
# - media
|
||||
|
||||
# system:
|
||||
# platforms:
|
||||
# gc: ngc
|
||||
# ps1: psx
|
||||
|
||||
# The folder name where your roms are located (relative to library path)
|
||||
# filesystem:
|
||||
# roms_folder: 'roms'
|
||||
|
||||
# scan:
|
||||
# priority:
|
||||
# metadata:
|
||||
# - "igdb"
|
||||
# - "moby"
|
||||
# - "ss"
|
||||
# - "ra"
|
||||
# artwork:
|
||||
# - "igdb"
|
||||
# - "moby"
|
||||
# - "ss"
|
||||
# region:
|
||||
# - "us"
|
||||
# - "eu"
|
||||
# - "jp"
|
||||
# language:
|
||||
# - "en"
|
||||
# media:
|
||||
# - box2d
|
||||
# - box3d
|
||||
# - screenshot
|
||||
# - manual
|
||||
|
||||
# emulatorjs:
|
||||
# debug: false
|
||||
# cache_limit: null
|
||||
EOF
|
||||
chmod 644 /var/lib/romm/config/config.yml
|
||||
msg_ok "Created configuration file"
|
||||
|
||||
fetch_and_deploy_gh_release "RAHasher" "RetroAchievements/RALibretro" "prebuild" "latest" "/opt/RALibretro" "RAHasher-x64-Linux-*.zip"
|
||||
cp /opt/RALibretro/RAHasher /usr/bin/RAHasher
|
||||
chmod +x /usr/bin/RAHasher
|
||||
|
||||
fetch_and_deploy_gh_release "romm" "rommapp/romm"
|
||||
|
||||
msg_info "Creating environment file"
|
||||
sed -i 's/^supervised no/supervised systemd/' /etc/redis/redis.conf
|
||||
systemctl restart redis-server
|
||||
systemctl enable -q --now redis-server
|
||||
AUTH_SECRET_KEY=$(openssl rand -hex 32)
|
||||
|
||||
cat <<EOF >/opt/romm/.env
|
||||
ROMM_BASE_PATH=/var/lib/romm
|
||||
ROMM_CONFIG_PATH=/var/lib/romm/config/config.yml
|
||||
WEB_CONCURRENCY=4
|
||||
|
||||
DB_HOST=127.0.0.1
|
||||
DB_PORT=3306
|
||||
DB_NAME=$MARIADB_DB_NAME
|
||||
DB_USER=$MARIADB_DB_USER
|
||||
DB_PASSWD=$MARIADB_DB_PASS
|
||||
|
||||
REDIS_HOST=127.0.0.1
|
||||
REDIS_PORT=6379
|
||||
|
||||
ROMM_AUTH_SECRET_KEY=$AUTH_SECRET_KEY
|
||||
DISABLE_DOWNLOAD_ENDPOINT_AUTH=false
|
||||
DISABLE_CSRF_PROTECTION=false
|
||||
|
||||
ENABLE_RESCAN_ON_FILESYSTEM_CHANGE=true
|
||||
RESCAN_ON_FILESYSTEM_CHANGE_DELAY=5
|
||||
|
||||
ENABLE_SCHEDULED_RESCAN=true
|
||||
SCHEDULED_RESCAN_CRON=0 3 * * *
|
||||
ENABLE_SCHEDULED_UPDATE_SWITCH_TITLEDB=true
|
||||
SCHEDULED_UPDATE_SWITCH_TITLEDB_CRON=0 4 * * *
|
||||
|
||||
LOGLEVEL=INFO
|
||||
EOF
|
||||
|
||||
chmod 600 /opt/romm/.env
|
||||
msg_ok "Created environment file"
|
||||
|
||||
msg_info "Setting up RomM Backend"
|
||||
cd /opt/romm
|
||||
export UV_CONCURRENT_DOWNLOADS=1
|
||||
$STD uv sync --all-extras
|
||||
cd /opt/romm/backend
|
||||
$STD uv run alembic upgrade head
|
||||
msg_ok "Set up RomM Backend"
|
||||
|
||||
msg_info "Setting up RomM Frontend"
|
||||
cd /opt/romm/frontend
|
||||
$STD npm install
|
||||
$STD npm run build
|
||||
|
||||
cp -rf /opt/romm/frontend/assets/* /opt/romm/frontend/dist/assets/
|
||||
|
||||
mkdir -p /opt/romm/frontend/dist/assets/romm
|
||||
ln -sfn /var/lib/romm/resources /opt/romm/frontend/dist/assets/romm/resources
|
||||
ln -sfn /var/lib/romm/assets /opt/romm/frontend/dist/assets/romm/assets
|
||||
msg_ok "Set up RomM Frontend"
|
||||
|
||||
msg_info "Configuring Nginx"
|
||||
cat <<'EOF' >/etc/nginx/sites-available/romm
|
||||
upstream romm_backend {
|
||||
server 127.0.0.1:5000;
|
||||
}
|
||||
|
||||
map $http_upgrade $connection_upgrade {
|
||||
default upgrade;
|
||||
'' close;
|
||||
}
|
||||
|
||||
server {
|
||||
listen 80;
|
||||
server_name _;
|
||||
root /opt/romm/frontend/dist;
|
||||
client_max_body_size 0;
|
||||
|
||||
# Frontend SPA
|
||||
location / {
|
||||
try_files $uri $uri/ /index.html;
|
||||
}
|
||||
|
||||
# Static assets
|
||||
location /assets {
|
||||
alias /opt/romm/frontend/dist/assets;
|
||||
try_files $uri $uri/ =404;
|
||||
expires 1y;
|
||||
add_header Cache-Control "public, immutable";
|
||||
}
|
||||
|
||||
# EmulatorJS player - requires COOP/COEP headers for SharedArrayBuffer
|
||||
location ~ ^/rom/.*/ejs$ {
|
||||
add_header Cross-Origin-Embedder-Policy "require-corp";
|
||||
add_header Cross-Origin-Opener-Policy "same-origin";
|
||||
try_files $uri /index.html;
|
||||
}
|
||||
|
||||
# Backend API
|
||||
location /api {
|
||||
proxy_pass http://romm_backend;
|
||||
proxy_buffering off;
|
||||
proxy_request_buffering off;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
}
|
||||
|
||||
# WebSocket and Netplay
|
||||
location ~ ^/(ws|netplay) {
|
||||
proxy_pass http://romm_backend;
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection $connection_upgrade;
|
||||
proxy_set_header Host $host;
|
||||
proxy_read_timeout 86400;
|
||||
}
|
||||
|
||||
# OpenAPI docs
|
||||
location = /openapi.json {
|
||||
proxy_pass http://romm_backend;
|
||||
}
|
||||
|
||||
# Internal library file serving
|
||||
location /library/ {
|
||||
internal;
|
||||
alias /var/lib/romm/library/;
|
||||
}
|
||||
}
|
||||
EOF
|
||||
|
||||
rm -f /etc/nginx/sites-enabled/default
|
||||
ln -sf /etc/nginx/sites-available/romm /etc/nginx/sites-enabled/romm
|
||||
systemctl restart nginx
|
||||
systemctl enable -q --now nginx
|
||||
msg_ok "Configured Nginx"
|
||||
|
||||
msg_info "Creating Services"
|
||||
cat <<EOF >/etc/systemd/system/romm-backend.service
|
||||
[Unit]
|
||||
Description=RomM Backend
|
||||
After=network.target mariadb.service redis-server.service
|
||||
Requires=mariadb.service redis-server.service
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
WorkingDirectory=/opt/romm/backend
|
||||
EnvironmentFile=/opt/romm/.env
|
||||
Environment="PYTHONPATH=/opt/romm"
|
||||
ExecStart=/opt/romm/.venv/bin/python main.py
|
||||
Restart=on-failure
|
||||
RestartSec=5
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
EOF
|
||||
|
||||
cat <<EOF >/etc/systemd/system/romm-worker.service
|
||||
[Unit]
|
||||
Description=RomM RQ Worker
|
||||
After=network.target mariadb.service redis-server.service romm-backend.service
|
||||
Requires=mariadb.service redis-server.service
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
WorkingDirectory=/opt/romm/backend
|
||||
EnvironmentFile=/opt/romm/.env
|
||||
Environment="PYTHONPATH=/opt/romm/backend"
|
||||
ExecStart=/opt/romm/.venv/bin/rq worker --path /opt/romm/backend --url redis://127.0.0.1:6379/0 high default low
|
||||
Restart=on-failure
|
||||
RestartSec=5
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
EOF
|
||||
|
||||
cat <<EOF >/etc/systemd/system/romm-scheduler.service
|
||||
[Unit]
|
||||
Description=RomM RQ Scheduler
|
||||
After=network.target mariadb.service redis-server.service romm-backend.service
|
||||
Requires=mariadb.service redis-server.service
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
WorkingDirectory=/opt/romm/backend
|
||||
EnvironmentFile=/opt/romm/.env
|
||||
Environment="PYTHONPATH=/opt/romm/backend"
|
||||
Environment="RQ_REDIS_HOST=127.0.0.1"
|
||||
Environment="RQ_REDIS_PORT=6379"
|
||||
ExecStart=/opt/romm/.venv/bin/rqscheduler --path /opt/romm/backend
|
||||
Restart=on-failure
|
||||
RestartSec=5
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
EOF
|
||||
|
||||
cat <<EOF >/etc/systemd/system/romm-watcher.service
|
||||
[Unit]
|
||||
Description=RomM Filesystem Watcher
|
||||
After=network.target romm-backend.service
|
||||
Requires=romm-backend.service
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
WorkingDirectory=/opt/romm/backend
|
||||
EnvironmentFile=/opt/romm/.env
|
||||
Environment="PYTHONPATH=/opt/romm/backend"
|
||||
ExecStart=/opt/romm/.venv/bin/watchfiles --target-type command '/opt/romm/.venv/bin/python watcher.py' /var/lib/romm/library
|
||||
Restart=on-failure
|
||||
RestartSec=5
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
EOF
|
||||
|
||||
systemctl enable -q --now romm-backend romm-worker romm-scheduler romm-watcher
|
||||
msg_ok "Created Services"
|
||||
|
||||
motd_ssh
|
||||
customize
|
||||
cleanup_lxc
|
||||
@@ -38,8 +38,8 @@ SECRET="$(openssl rand -hex 64)"
|
||||
sed -e '/^NODE_ENV=/s/=.*$/=production/' \
|
||||
-e 's/^TUDUDI_USER/# TUDUDI_USER/g' \
|
||||
-e "/_SECRET=/s/=.*$/=${SECRET}/" \
|
||||
-e '/^# DB_FILE=/s/^# //' \
|
||||
-e "s|^DB_FILE=.*|DB_FILE=${DB_LOCATION}/production.sqlite3|" \
|
||||
-e "/^# DB_FILE/s/^# //; \
|
||||
\|DB_FILE|s|/path.*$|${DB_LOCATION}/production.sqlite3|" \
|
||||
-e "/^# TUDUDI_ALLOWED/s/^# //; \
|
||||
\|_ORIGINS=|s|=.*$|=<your tududi IP or FDQN>|" \
|
||||
-e "/^# TUDUDI_UPLOAD/s/^# //; \
|
||||
|
||||
@@ -29,8 +29,6 @@ fetch_and_deploy_gh_release "vaultwarden" "dani-garcia/vaultwarden" "tarball" "l
|
||||
|
||||
msg_info "Building Vaultwarden (Patience)"
|
||||
cd /tmp/vaultwarden-src
|
||||
VW_VERSION=$(get_latest_github_release "dani-garcia/vaultwarden")
|
||||
export VW_VERSION
|
||||
$STD cargo build --features "sqlite,mysql,postgresql" --release
|
||||
msg_ok "Built Vaultwarden"
|
||||
|
||||
|
||||
@@ -34,19 +34,11 @@ net_resolves() {
|
||||
}
|
||||
|
||||
ensure_usr_local_bin_persist() {
|
||||
# Login shells: /etc/profile.d/
|
||||
local PROFILE_FILE="/etc/profile.d/10-localbin.sh"
|
||||
if [ ! -f "$PROFILE_FILE" ]; then
|
||||
echo 'case ":$PATH:" in *:/usr/local/bin:*) ;; *) export PATH="/usr/local/bin:$PATH";; esac' >"$PROFILE_FILE"
|
||||
chmod +x "$PROFILE_FILE"
|
||||
fi
|
||||
|
||||
# Non-login shells (pct enter): /root/.profile and /root/.bashrc
|
||||
for rc_file in /root/.profile /root/.bashrc; do
|
||||
if [ -f "$rc_file" ] && ! grep -q '/usr/local/bin' "$rc_file"; then
|
||||
echo 'export PATH="/usr/local/bin:$PATH"' >>"$rc_file"
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
download_with_progress() {
|
||||
|
||||
@@ -135,44 +135,19 @@ explain_exit_code() {
|
||||
# --- Generic / Shell ---
|
||||
1) echo "General error / Operation not permitted" ;;
|
||||
2) echo "Misuse of shell builtins (e.g. syntax error)" ;;
|
||||
10) echo "Docker / privileged mode required (unsupported environment)" ;;
|
||||
|
||||
# --- curl / wget errors (commonly seen in downloads) ---
|
||||
4) echo "curl: Feature not supported or protocol error" ;;
|
||||
5) echo "curl: Could not resolve proxy" ;;
|
||||
6) echo "curl: DNS resolution failed (could not resolve host)" ;;
|
||||
7) echo "curl: Failed to connect (network unreachable / host down)" ;;
|
||||
8) echo "curl: FTP server reply error" ;;
|
||||
22) echo "curl: HTTP error returned (404, 429, 500+)" ;;
|
||||
23) echo "curl: Write error (disk full or permissions)" ;;
|
||||
25) echo "curl: Upload failed" ;;
|
||||
28) echo "curl: Operation timeout (network slow or server not responding)" ;;
|
||||
30) echo "curl: FTP port command failed" ;;
|
||||
35) echo "curl: SSL/TLS handshake failed (certificate error)" ;;
|
||||
56) echo "curl: Receive error (connection reset by peer)" ;;
|
||||
75) echo "Temporary failure (retry later)" ;;
|
||||
78) echo "curl: Remote file not found (404 on FTP/file)" ;;
|
||||
|
||||
# --- Package manager / APT / DPKG ---
|
||||
100) echo "APT: Package manager error (broken packages / dependency problems)" ;;
|
||||
101) echo "APT: Configuration error (bad sources.list, malformed config)" ;;
|
||||
102) echo "APT: Lock held by another process (dpkg/apt still running)" ;;
|
||||
|
||||
# --- BSD sysexits.h (64-78) ---
|
||||
64) echo "Usage error (wrong arguments)" ;;
|
||||
65) echo "Data format error (bad input data)" ;;
|
||||
66) echo "Input file not found (cannot open input)" ;;
|
||||
67) echo "User not found (addressee unknown)" ;;
|
||||
68) echo "Host not found (hostname unknown)" ;;
|
||||
69) echo "Service unavailable" ;;
|
||||
70) echo "Internal software error" ;;
|
||||
71) echo "System error (OS-level failure)" ;;
|
||||
72) echo "Critical OS file missing" ;;
|
||||
73) echo "Cannot create output file" ;;
|
||||
74) echo "I/O error" ;;
|
||||
76) echo "Remote protocol error" ;;
|
||||
77) echo "Permission denied" ;;
|
||||
|
||||
# --- Common shell/system errors ---
|
||||
124) echo "Command timed out (timeout command)" ;;
|
||||
126) echo "Command invoked cannot execute (permission problem?)" ;;
|
||||
@@ -649,8 +624,6 @@ EOF
|
||||
curl -fsS -m "${TELEMETRY_TIMEOUT}" -X POST "${TELEMETRY_URL}" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "$JSON_PAYLOAD" &>/dev/null || true
|
||||
|
||||
POST_TO_API_DONE=true
|
||||
}
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
@@ -5253,20 +5253,14 @@ ensure_log_on_host() {
|
||||
# - Exit trap handler for reporting to API telemetry
|
||||
# - Captures exit code and reports to PocketBase using centralized error descriptions
|
||||
# - Uses explain_exit_code() from api.func for consistent error messages
|
||||
# - For non-zero exit codes: posts "failed" status
|
||||
# - For zero exit codes where post_update_to_api was never called:
|
||||
# catches orphaned "installing" records (e.g., script exited cleanly
|
||||
# but description() was never reached)
|
||||
# - Posts failure status with exit code to API (error description resolved automatically)
|
||||
# - Only executes on non-zero exit codes
|
||||
# ------------------------------------------------------------------------------
|
||||
api_exit_script() {
|
||||
local exit_code=$?
|
||||
exit_code=$?
|
||||
if [ $exit_code -ne 0 ]; then
|
||||
ensure_log_on_host
|
||||
post_update_to_api "failed" "$exit_code"
|
||||
elif [[ "${POST_TO_API_DONE:-}" == "true" && "${POST_UPDATE_DONE:-}" != "true" ]]; then
|
||||
# Script exited with 0 but never sent a completion status
|
||||
# This catches edge cases like early returns after post_to_api()
|
||||
post_update_to_api "failed" "1"
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
@@ -37,34 +37,11 @@ if ! declare -f explain_exit_code &>/dev/null; then
|
||||
case "$code" in
|
||||
1) echo "General error / Operation not permitted" ;;
|
||||
2) echo "Misuse of shell builtins (e.g. syntax error)" ;;
|
||||
10) echo "Docker / privileged mode required (unsupported environment)" ;;
|
||||
4) echo "curl: Feature not supported or protocol error" ;;
|
||||
5) echo "curl: Could not resolve proxy" ;;
|
||||
6) echo "curl: DNS resolution failed (could not resolve host)" ;;
|
||||
7) echo "curl: Failed to connect (network unreachable / host down)" ;;
|
||||
8) echo "curl: FTP server reply error" ;;
|
||||
22) echo "curl: HTTP error returned (404, 429, 500+)" ;;
|
||||
23) echo "curl: Write error (disk full or permissions)" ;;
|
||||
25) echo "curl: Upload failed" ;;
|
||||
28) echo "curl: Operation timeout (network slow or server not responding)" ;;
|
||||
30) echo "curl: FTP port command failed" ;;
|
||||
35) echo "curl: SSL/TLS handshake failed (certificate error)" ;;
|
||||
56) echo "curl: Receive error (connection reset by peer)" ;;
|
||||
75) echo "Temporary failure (retry later)" ;;
|
||||
78) echo "curl: Remote file not found (404 on FTP/file)" ;;
|
||||
64) echo "Usage error (wrong arguments)" ;;
|
||||
65) echo "Data format error (bad input data)" ;;
|
||||
66) echo "Input file not found (cannot open input)" ;;
|
||||
67) echo "User not found (addressee unknown)" ;;
|
||||
68) echo "Host not found (hostname unknown)" ;;
|
||||
69) echo "Service unavailable" ;;
|
||||
70) echo "Internal software error" ;;
|
||||
71) echo "System error (OS-level failure)" ;;
|
||||
72) echo "Critical OS file missing" ;;
|
||||
73) echo "Cannot create output file" ;;
|
||||
74) echo "I/O error" ;;
|
||||
76) echo "Remote protocol error" ;;
|
||||
77) echo "Permission denied" ;;
|
||||
100) echo "APT: Package manager error (broken packages / dependency problems)" ;;
|
||||
101) echo "APT: Configuration error (bad sources.list, malformed config)" ;;
|
||||
102) echo "APT: Lock held by another process (dpkg/apt still running)" ;;
|
||||
|
||||
@@ -1851,26 +1851,16 @@ function download_with_progress() {
|
||||
# Ensures /usr/local/bin is permanently in system PATH.
|
||||
#
|
||||
# Description:
|
||||
# - Adds to /etc/profile.d for login shells (SSH, noVNC)
|
||||
# - Adds to /root/.bashrc for non-login shells (pct enter)
|
||||
# - Adds to /etc/profile.d if not present
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
function ensure_usr_local_bin_persist() {
|
||||
# Skip on Proxmox host
|
||||
command -v pveversion &>/dev/null && return
|
||||
|
||||
# Login shells: /etc/profile.d/
|
||||
local PROFILE_FILE="/etc/profile.d/custom_path.sh"
|
||||
if [[ ! -f "$PROFILE_FILE" ]]; then
|
||||
|
||||
if [[ ! -f "$PROFILE_FILE" ]] && ! command -v pveversion &>/dev/null; then
|
||||
echo 'export PATH="/usr/local/bin:$PATH"' >"$PROFILE_FILE"
|
||||
chmod +x "$PROFILE_FILE"
|
||||
fi
|
||||
|
||||
# Non-login shells (pct enter): /root/.bashrc
|
||||
local BASHRC="/root/.bashrc"
|
||||
if [[ -f "$BASHRC" ]] && ! grep -q '/usr/local/bin' "$BASHRC"; then
|
||||
echo 'export PATH="/usr/local/bin:$PATH"' >>"$BASHRC"
|
||||
fi
|
||||
}
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
@@ -529,21 +529,9 @@ cleanup_vmid() {
|
||||
}
|
||||
|
||||
cleanup() {
|
||||
local exit_code=$?
|
||||
if [[ "$(dirs -p | wc -l)" -gt 1 ]]; then
|
||||
popd >/dev/null || true
|
||||
fi
|
||||
# Report final telemetry status if post_to_api_vm was called but no update was sent
|
||||
if [[ "${POST_TO_API_DONE:-}" == "true" && "${POST_UPDATE_DONE:-}" != "true" ]]; then
|
||||
if declare -f post_update_to_api >/dev/null 2>&1; then
|
||||
if [[ $exit_code -ne 0 ]]; then
|
||||
post_update_to_api "failed" "$exit_code"
|
||||
else
|
||||
# Exited cleanly but description()/success was never called — shouldn't happen
|
||||
post_update_to_api "failed" "1"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
check_root() {
|
||||
@@ -624,417 +612,3 @@ EOF
|
||||
qm set "$VMID" -description "$DESCRIPTION" >/dev/null
|
||||
|
||||
}
|
||||
|
||||
# ==============================================================================
|
||||
# SECTION: VM SMART RECOVERY
|
||||
# ==============================================================================
|
||||
|
||||
# Global error log for VM creation — captures stderr from critical commands
|
||||
VM_ERROR_LOG="${VM_ERROR_LOG:-/tmp/vm-install-$$.log}"
|
||||
VM_RECOVERY_ATTEMPT=${VM_RECOVERY_ATTEMPT:-0}
|
||||
VM_MAX_RETRIES=${VM_MAX_RETRIES:-2}
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# vm_log_cmd()
|
||||
#
|
||||
# - Wraps a command to capture stderr into VM_ERROR_LOG
|
||||
# - Passes stdout through normally
|
||||
# - Returns the original exit code
|
||||
# Usage: vm_log_cmd qm importdisk "$VMID" "$IMAGE" "$STORAGE"
|
||||
# ------------------------------------------------------------------------------
|
||||
vm_log_cmd() {
|
||||
"$@" 2>>"$VM_ERROR_LOG"
|
||||
}
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# is_vm_download_error()
|
||||
#
|
||||
# - Detects download failures based on exit code and error log
|
||||
# - Checks curl exit codes (6, 7, 22, 28, 35, 52, 56) and HTTP error patterns
|
||||
# - Returns 0 (true) if download error detected, 1 otherwise
|
||||
# ------------------------------------------------------------------------------
|
||||
is_vm_download_error() {
|
||||
local exit_code="${1:-0}"
|
||||
local log_file="${2:-$VM_ERROR_LOG}"
|
||||
|
||||
# curl-specific exit codes indicating download issues
|
||||
case "$exit_code" in
|
||||
6 | 7 | 22 | 28 | 35 | 52 | 56) return 0 ;;
|
||||
esac
|
||||
|
||||
# Check log for download-related patterns
|
||||
if [[ -s "$log_file" ]]; then
|
||||
if grep -qiE "curl.*failed|download.*failed|HTTP.*[45][0-9]{2}|Could not resolve|Connection refused|Connection timed out|SSL.*error" "$log_file" 2>/dev/null; then
|
||||
return 0
|
||||
fi
|
||||
fi
|
||||
return 1
|
||||
}
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# is_vm_disk_import_error()
|
||||
#
|
||||
# - Detects disk import failures (qm importdisk / qm disk import)
|
||||
# - Checks for storage allocation and format conversion errors
|
||||
# - Returns 0 (true) if disk import error detected, 1 otherwise
|
||||
# ------------------------------------------------------------------------------
|
||||
is_vm_disk_import_error() {
|
||||
local exit_code="${1:-0}"
|
||||
local log_file="${2:-$VM_ERROR_LOG}"
|
||||
|
||||
if [[ -s "$log_file" ]]; then
|
||||
if grep -qiE "importdisk.*failed|disk import.*error|storage.*allocation.*failed|qcow2.*error|raw.*error|pvesm.*alloc.*failed|unable to create|volume.*already exists" "$log_file" 2>/dev/null; then
|
||||
return 0
|
||||
fi
|
||||
fi
|
||||
return 1
|
||||
}
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# is_vm_virt_customize_error()
|
||||
#
|
||||
# - Detects virt-customize / libguestfs failures
|
||||
# - Checks for guestfs, supermin, appliance boot errors
|
||||
# - Returns 0 (true) if virt-customize error detected, 1 otherwise
|
||||
# ------------------------------------------------------------------------------
|
||||
is_vm_virt_customize_error() {
|
||||
local exit_code="${1:-0}"
|
||||
local log_file="${2:-$VM_ERROR_LOG}"
|
||||
|
||||
if [[ -s "$log_file" ]]; then
|
||||
if grep -qiE "virt-customize|libguestfs|guestfs|supermin|appliance.*boot|virt-.*failed|launch.*failed" "$log_file" 2>/dev/null; then
|
||||
return 0
|
||||
fi
|
||||
fi
|
||||
return 1
|
||||
}
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# is_vm_vmid_conflict()
|
||||
#
|
||||
# - Detects VMID conflicts (VM already exists)
|
||||
# - Returns 0 (true) if conflict detected, 1 otherwise
|
||||
# ------------------------------------------------------------------------------
|
||||
is_vm_vmid_conflict() {
|
||||
local exit_code="${1:-0}"
|
||||
local log_file="${2:-$VM_ERROR_LOG}"
|
||||
|
||||
if [[ -s "$log_file" ]]; then
|
||||
if grep -qiE "already exists|VM $VMID already|unable to create VM|VMID.*in use" "$log_file" 2>/dev/null; then
|
||||
return 0
|
||||
fi
|
||||
fi
|
||||
return 1
|
||||
}
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# is_vm_storage_full()
|
||||
#
|
||||
# - Detects storage full / space exhaustion errors
|
||||
# - Returns 0 (true) if storage space issue detected, 1 otherwise
|
||||
# ------------------------------------------------------------------------------
|
||||
is_vm_storage_full() {
|
||||
local exit_code="${1:-0}"
|
||||
local log_file="${2:-$VM_ERROR_LOG}"
|
||||
|
||||
if [[ -s "$log_file" ]]; then
|
||||
if grep -qiE "not enough space|no space left|storage.*full|disk quota|ENOSPC|insufficient.*space|thin pool.*full" "$log_file" 2>/dev/null; then
|
||||
return 0
|
||||
fi
|
||||
fi
|
||||
return 1
|
||||
}
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# is_vm_network_error()
|
||||
#
|
||||
# - Detects general network/DNS errors beyond download failures
|
||||
# - Returns 0 (true) if network issue detected, 1 otherwise
|
||||
# ------------------------------------------------------------------------------
|
||||
is_vm_network_error() {
|
||||
local exit_code="${1:-0}"
|
||||
local log_file="${2:-$VM_ERROR_LOG}"
|
||||
|
||||
# Network-related curl/wget exit codes
|
||||
case "$exit_code" in
|
||||
6 | 7 | 28 | 52 | 56) return 0 ;;
|
||||
esac
|
||||
|
||||
if [[ -s "$log_file" ]]; then
|
||||
if grep -qiE "Name or service not known|Temporary failure in name resolution|Network is unreachable|No route to host|DNS.*failed|could not resolve" "$log_file" 2>/dev/null; then
|
||||
return 0
|
||||
fi
|
||||
fi
|
||||
return 1
|
||||
}
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# vm_classify_error()
|
||||
#
|
||||
# - Classifies a VM creation error into a category
|
||||
# - Order matters: most specific checks first
|
||||
# - Returns category string via stdout
|
||||
# - Categories: vmid_conflict, storage_full, download, disk_import,
|
||||
# virt_customize, network, unknown
|
||||
# ------------------------------------------------------------------------------
|
||||
vm_classify_error() {
|
||||
local exit_code="${1:-0}"
|
||||
local log_file="${2:-$VM_ERROR_LOG}"
|
||||
|
||||
if is_vm_vmid_conflict "$exit_code" "$log_file"; then
|
||||
echo "vmid_conflict"
|
||||
elif is_vm_storage_full "$exit_code" "$log_file"; then
|
||||
echo "storage_full"
|
||||
elif is_vm_download_error "$exit_code" "$log_file"; then
|
||||
echo "download"
|
||||
elif is_vm_disk_import_error "$exit_code" "$log_file"; then
|
||||
echo "disk_import"
|
||||
elif is_vm_virt_customize_error "$exit_code" "$log_file"; then
|
||||
echo "virt_customize"
|
||||
elif is_vm_network_error "$exit_code" "$log_file"; then
|
||||
echo "network"
|
||||
else
|
||||
echo "unknown"
|
||||
fi
|
||||
}
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# vm_show_recovery_menu()
|
||||
#
|
||||
# - Displays a whiptail menu with recovery options after a VM creation failure
|
||||
# - Options are dynamically built based on error category
|
||||
# - Returns the selected option via stdout
|
||||
# - Arguments:
|
||||
# $1: exit_code
|
||||
# $2: error_category (from vm_classify_error)
|
||||
# $3: current attempt number
|
||||
# ------------------------------------------------------------------------------
|
||||
vm_show_recovery_menu() {
|
||||
local exit_code="${1:-1}"
|
||||
local error_category="${2:-unknown}"
|
||||
local attempt="${3:-1}"
|
||||
|
||||
local menu_items=()
|
||||
local menu_height=12
|
||||
local item_count=0
|
||||
|
||||
# --- Dynamic options based on error category ---
|
||||
|
||||
# Retry (always available unless max retries reached)
|
||||
if ((attempt < VM_MAX_RETRIES)); then
|
||||
case "$error_category" in
|
||||
download)
|
||||
menu_items+=("RETRY_DOWNLOAD" "🔄 Retry download (clear cache & re-download)" "ON")
|
||||
((item_count++))
|
||||
;;
|
||||
disk_import)
|
||||
menu_items+=("RETRY" "🔄 Retry VM creation" "ON")
|
||||
((item_count++))
|
||||
;;
|
||||
virt_customize)
|
||||
menu_items+=("RETRY" "🔄 Retry VM creation" "ON")
|
||||
((item_count++))
|
||||
menu_items+=("SKIP_CUSTOMIZE" "⏭️ Skip virt-customize (use first-boot fallback)" "OFF")
|
||||
((item_count++))
|
||||
;;
|
||||
network)
|
||||
menu_items+=("RETRY" "🔄 Retry VM creation" "ON")
|
||||
((item_count++))
|
||||
;;
|
||||
vmid_conflict)
|
||||
menu_items+=("NEW_VMID" "🆔 Choose a different VM ID" "ON")
|
||||
((item_count++))
|
||||
;;
|
||||
storage_full)
|
||||
menu_items+=("RETRY_SETTINGS" "⚙️ Retry with different settings (storage/disk)" "ON")
|
||||
((item_count++))
|
||||
;;
|
||||
*)
|
||||
menu_items+=("RETRY" "🔄 Retry VM creation" "ON")
|
||||
((item_count++))
|
||||
;;
|
||||
esac
|
||||
|
||||
# Retry with different resources (always offered)
|
||||
menu_items+=("RETRY_SETTINGS" "⚙️ Retry with different settings (RAM/CPU/Disk)" "OFF")
|
||||
((item_count++))
|
||||
fi
|
||||
|
||||
# Keep VM for debugging (always available)
|
||||
menu_items+=("KEEP" "🔍 Keep partial VM for manual debugging" "OFF")
|
||||
((item_count++))
|
||||
|
||||
# Abort (always available)
|
||||
menu_items+=("ABORT" "❌ Destroy VM and exit" "OFF")
|
||||
((item_count++))
|
||||
|
||||
menu_height=$((item_count + 10))
|
||||
|
||||
# Error info for title
|
||||
local title="VM CREATION FAILED"
|
||||
local body="Exit code: ${exit_code} | Category: ${error_category}\nAttempt: ${attempt}/${VM_MAX_RETRIES}\n\nChoose a recovery action:"
|
||||
|
||||
if ((attempt >= VM_MAX_RETRIES)); then
|
||||
body="Exit code: ${exit_code} | Category: ${error_category}\n⚠️ Maximum retries (${VM_MAX_RETRIES}) reached.\n\nChoose an action:"
|
||||
fi
|
||||
|
||||
local choice
|
||||
choice=$(whiptail --backtitle "Proxmox VE Helper Scripts" --title "$title" \
|
||||
--radiolist "$body" "$menu_height" 72 "$item_count" \
|
||||
"${menu_items[@]}" 3>&1 1>&2 2>&3) || choice="ABORT"
|
||||
|
||||
echo "$choice"
|
||||
}
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# vm_handle_recovery()
|
||||
#
|
||||
# - Main recovery handler called from error_handler or a wrapper
|
||||
# - Classifies the error, shows recovery menu, and executes the chosen action
|
||||
# - Arguments:
|
||||
# $1: exit_code
|
||||
# $2: line_number
|
||||
# $3: failed_command
|
||||
# $4: cleanup_fn — function to call for VM cleanup (default: cleanup_vmid)
|
||||
# $5: retry_fn — function to re-invoke for full retry (required for retry)
|
||||
# - Uses global: VM_ERROR_LOG, VM_RECOVERY_ATTEMPT, VM_MAX_RETRIES, VMID
|
||||
# - Returns: 0 if retry was chosen (caller should re-run), 1 if abort/keep
|
||||
# ------------------------------------------------------------------------------
|
||||
vm_handle_recovery() {
|
||||
local exit_code="${1:-1}"
|
||||
local line_number="${2:-?}"
|
||||
local failed_command="${3:-unknown}"
|
||||
local cleanup_fn="${4:-cleanup_vmid}"
|
||||
local retry_fn="${5:-}"
|
||||
|
||||
# Stop any running spinner
|
||||
stop_spinner 2>/dev/null || true
|
||||
|
||||
# Classify the error
|
||||
local error_category
|
||||
error_category=$(vm_classify_error "$exit_code" "$VM_ERROR_LOG")
|
||||
|
||||
((VM_RECOVERY_ATTEMPT++))
|
||||
|
||||
# Show error details
|
||||
echo ""
|
||||
msg_error "VM creation failed in line ${line_number}"
|
||||
msg_error "Exit code: ${exit_code} | Category: ${error_category}"
|
||||
msg_error "Command: ${failed_command}"
|
||||
|
||||
# Show last few lines of error log if available
|
||||
if [[ -s "$VM_ERROR_LOG" ]]; then
|
||||
echo -e "\n${TAB}${YW}--- Last 5 lines of error log ---${CL}"
|
||||
tail -n 5 "$VM_ERROR_LOG" 2>/dev/null | while IFS= read -r line; do
|
||||
echo -e "${TAB} ${line}"
|
||||
done
|
||||
echo -e "${TAB}${YW}----------------------------------${CL}\n"
|
||||
fi
|
||||
|
||||
# Show recovery menu
|
||||
local choice
|
||||
choice=$(vm_show_recovery_menu "$exit_code" "$error_category" "$VM_RECOVERY_ATTEMPT")
|
||||
|
||||
case "$choice" in
|
||||
RETRY | RETRY_DOWNLOAD)
|
||||
msg_info "Cleaning up failed VM ${VMID} for retry"
|
||||
"$cleanup_fn" 2>/dev/null || true
|
||||
rm -f "$VM_ERROR_LOG"
|
||||
rm -f "${WORK_FILE:-}" 2>/dev/null
|
||||
|
||||
if [[ "$choice" == "RETRY_DOWNLOAD" ]]; then
|
||||
# Clear cached image
|
||||
if [[ -n "${CACHE_FILE:-}" && -f "$CACHE_FILE" ]]; then
|
||||
msg_info "Clearing cached image: $(basename "$CACHE_FILE")"
|
||||
rm -f "$CACHE_FILE"
|
||||
msg_ok "Cache cleared"
|
||||
fi
|
||||
fi
|
||||
|
||||
msg_ok "Ready for retry (attempt $((VM_RECOVERY_ATTEMPT + 1))/${VM_MAX_RETRIES})"
|
||||
|
||||
if [[ -n "$retry_fn" ]]; then
|
||||
# Re-invoke the retry function — caller loop handles this
|
||||
return 0
|
||||
else
|
||||
msg_warn "No retry function provided — please re-run the script manually"
|
||||
return 1
|
||||
fi
|
||||
;;
|
||||
|
||||
SKIP_CUSTOMIZE)
|
||||
msg_info "Cleaning up failed VM ${VMID} for retry (skipping virt-customize)"
|
||||
"$cleanup_fn" 2>/dev/null || true
|
||||
rm -f "$VM_ERROR_LOG"
|
||||
rm -f "${WORK_FILE:-}" 2>/dev/null
|
||||
# Set flag so docker-vm.sh skips virt-customize
|
||||
export SKIP_VIRT_CUSTOMIZE="yes"
|
||||
msg_ok "Will use first-boot fallback for package installation"
|
||||
|
||||
if [[ -n "$retry_fn" ]]; then
|
||||
return 0
|
||||
else
|
||||
msg_warn "No retry function provided — please re-run the script manually"
|
||||
return 1
|
||||
fi
|
||||
;;
|
||||
|
||||
RETRY_SETTINGS)
|
||||
msg_info "Cleaning up failed VM ${VMID} for retry with new settings"
|
||||
"$cleanup_fn" 2>/dev/null || true
|
||||
rm -f "$VM_ERROR_LOG"
|
||||
rm -f "${WORK_FILE:-}" 2>/dev/null
|
||||
|
||||
# Let user choose new settings via advanced_settings if available
|
||||
if declare -f advanced_settings >/dev/null 2>&1; then
|
||||
header_info 2>/dev/null || true
|
||||
echo -e "${ADVANCED:-}${BOLD}${RD}Reconfigure VM Settings${CL}"
|
||||
advanced_settings
|
||||
else
|
||||
msg_warn "advanced_settings() not available — using current settings"
|
||||
fi
|
||||
|
||||
if [[ -n "$retry_fn" ]]; then
|
||||
return 0
|
||||
else
|
||||
msg_warn "No retry function provided — please re-run the script manually"
|
||||
return 1
|
||||
fi
|
||||
;;
|
||||
|
||||
NEW_VMID)
|
||||
msg_info "Cleaning up conflicting VM ${VMID}"
|
||||
"$cleanup_fn" 2>/dev/null || true
|
||||
rm -f "$VM_ERROR_LOG"
|
||||
rm -f "${WORK_FILE:-}" 2>/dev/null
|
||||
VMID=$(get_valid_nextid)
|
||||
echo -e "${CONTAINERID:-}${BOLD}${DGN}New Virtual Machine ID: ${BGN}${VMID}${CL}"
|
||||
msg_ok "Using new VMID: ${VMID}"
|
||||
|
||||
if [[ -n "$retry_fn" ]]; then
|
||||
return 0
|
||||
else
|
||||
msg_warn "No retry function provided — please re-run the script manually"
|
||||
return 1
|
||||
fi
|
||||
;;
|
||||
|
||||
KEEP)
|
||||
msg_warn "Keeping partial VM ${VMID} for manual debugging"
|
||||
msg_warn "You can inspect it with: qm config ${VMID}"
|
||||
msg_warn "To remove it later: qm destroy ${VMID} --destroy-unreferenced-disks --purge"
|
||||
# Report failure to telemetry
|
||||
post_update_to_api "failed" "$exit_code" 2>/dev/null || true
|
||||
exit "$exit_code"
|
||||
;;
|
||||
|
||||
ABORT | *)
|
||||
msg_info "Destroying failed VM ${VMID}"
|
||||
"$cleanup_fn" 2>/dev/null || true
|
||||
rm -f "$VM_ERROR_LOG"
|
||||
post_update_to_api "failed" "$exit_code" 2>/dev/null || true
|
||||
msg_error "VM creation aborted by user"
|
||||
exit "$exit_code"
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
@@ -100,15 +100,8 @@ function cleanup_vmid() {
|
||||
}
|
||||
|
||||
function cleanup() {
|
||||
local exit_code=$?
|
||||
popd >/dev/null
|
||||
if [[ "${POST_TO_API_DONE:-}" == "true" && "${POST_UPDATE_DONE:-}" != "true" ]]; then
|
||||
if [[ $exit_code -eq 0 ]]; then
|
||||
post_update_to_api "done" "none"
|
||||
else
|
||||
post_update_to_api "failed" "$exit_code"
|
||||
fi
|
||||
fi
|
||||
post_update_to_api "done" "none"
|
||||
rm -rf $TEMP_DIR
|
||||
}
|
||||
|
||||
|
||||
@@ -65,63 +65,13 @@ trap 'error_handler $LINENO "$BASH_COMMAND"' ERR
|
||||
trap cleanup EXIT
|
||||
trap 'post_update_to_api "failed" "INTERRUPTED"' SIGINT
|
||||
trap 'post_update_to_api "failed" "TERMINATED"' SIGTERM
|
||||
|
||||
# Smart recovery state
|
||||
VM_CREATION_PHASE="no"
|
||||
VM_RECOVERY_ATTEMPT=0
|
||||
VM_MAX_RETRIES=2
|
||||
|
||||
function error_handler() {
|
||||
local exit_code="$?"
|
||||
local line_number="$1"
|
||||
local command="$2"
|
||||
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
|
||||
echo -e "\n$error_message\n"
|
||||
|
||||
# During VM creation phase: offer recovery menu instead of immediate cleanup
|
||||
if [[ "$VM_CREATION_PHASE" == "yes" && $VM_RECOVERY_ATTEMPT -lt $VM_MAX_RETRIES ]]; then
|
||||
((VM_RECOVERY_ATTEMPT++))
|
||||
trap - ERR
|
||||
set +e
|
||||
|
||||
local choice
|
||||
choice=$(whiptail --backtitle "Proxmox VE Helper Scripts" --title "VM CREATION FAILED" \
|
||||
--radiolist "Exit code: ${exit_code} | Attempt: ${VM_RECOVERY_ATTEMPT}/${VM_MAX_RETRIES}\nFailed command: ${command}\n\nChoose a recovery action:" 16 72 4 \
|
||||
"RETRY" "Retry VM creation" "ON" \
|
||||
"SKIP_CUSTOMIZE" "Retry and skip image customization" "OFF" \
|
||||
"KEEP" "Keep partial VM for debugging" "OFF" \
|
||||
"ABORT" "Destroy VM and exit" "OFF" \
|
||||
3>&1 1>&2 2>&3) || choice="ABORT"
|
||||
|
||||
case "$choice" in
|
||||
RETRY | SKIP_CUSTOMIZE)
|
||||
msg_info "Cleaning up failed VM ${VMID} for retry"
|
||||
cleanup_vmid 2>/dev/null || true
|
||||
rm -f "${WORK_FILE:-}" 2>/dev/null
|
||||
[[ "$choice" == "SKIP_CUSTOMIZE" ]] && export SKIP_VIRT_CUSTOMIZE="yes"
|
||||
msg_ok "Ready for retry (attempt $((VM_RECOVERY_ATTEMPT + 1))/${VM_MAX_RETRIES})"
|
||||
set -e
|
||||
trap 'error_handler $LINENO "$BASH_COMMAND"' ERR
|
||||
create_vm
|
||||
exit $?
|
||||
;;
|
||||
KEEP)
|
||||
echo -e "\n${YW} Keeping partial VM ${VMID} for debugging${CL}"
|
||||
echo -e " Inspect: qm config ${VMID}"
|
||||
echo -e " Remove: qm destroy ${VMID} --destroy-unreferenced-disks --purge\n"
|
||||
post_update_to_api "failed" "$exit_code"
|
||||
exit "$exit_code"
|
||||
;;
|
||||
*)
|
||||
post_update_to_api "failed" "$exit_code"
|
||||
cleanup_vmid
|
||||
exit "$exit_code"
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
|
||||
# Default: no recovery (max retries exceeded or outside creation phase)
|
||||
post_update_to_api "failed" "${exit_code}"
|
||||
echo -e "\n$error_message\n"
|
||||
cleanup_vmid
|
||||
}
|
||||
|
||||
@@ -150,15 +100,8 @@ function cleanup_vmid() {
|
||||
}
|
||||
|
||||
function cleanup() {
|
||||
local exit_code=$?
|
||||
popd >/dev/null
|
||||
if [[ "${POST_TO_API_DONE:-}" == "true" && "${POST_UPDATE_DONE:-}" != "true" ]]; then
|
||||
if [[ $exit_code -eq 0 ]]; then
|
||||
post_update_to_api "done" "none"
|
||||
else
|
||||
post_update_to_api "failed" "$exit_code"
|
||||
fi
|
||||
fi
|
||||
post_update_to_api "done" "none"
|
||||
rm -rf $TEMP_DIR
|
||||
}
|
||||
|
||||
@@ -535,130 +478,125 @@ fi
|
||||
msg_ok "Using ${CL}${BL}$STORAGE${CL} ${GN}for Storage Location."
|
||||
msg_ok "Virtual Machine ID is ${CL}${BL}$VMID${CL}."
|
||||
|
||||
create_vm() {
|
||||
# ==============================================================================
|
||||
# PREREQUISITES
|
||||
# ==============================================================================
|
||||
if ! command -v virt-customize &>/dev/null; then
|
||||
msg_info "Installing libguestfs-tools"
|
||||
apt-get update >/dev/null 2>&1
|
||||
apt-get install -y libguestfs-tools >/dev/null 2>&1
|
||||
msg_ok "Installed libguestfs-tools"
|
||||
fi
|
||||
# ==============================================================================
|
||||
# PREREQUISITES
|
||||
# ==============================================================================
|
||||
if ! command -v virt-customize &>/dev/null; then
|
||||
msg_info "Installing libguestfs-tools"
|
||||
apt-get update >/dev/null 2>&1
|
||||
apt-get install -y libguestfs-tools >/dev/null 2>&1
|
||||
msg_ok "Installed libguestfs-tools"
|
||||
fi
|
||||
|
||||
msg_info "Retrieving the URL for the Debian 13 Qcow2 Disk Image"
|
||||
if [ "$CLOUD_INIT" == "yes" ]; then
|
||||
URL=https://cloud.debian.org/images/cloud/trixie/latest/debian-13-genericcloud-amd64.qcow2
|
||||
else
|
||||
URL=https://cloud.debian.org/images/cloud/trixie/latest/debian-13-nocloud-amd64.qcow2
|
||||
fi
|
||||
sleep 2
|
||||
msg_ok "${CL}${BL}${URL}${CL}"
|
||||
curl -f#SL -o "$(basename "$URL")" "$URL"
|
||||
echo -en "\e[1A\e[0K"
|
||||
FILE=$(basename $URL)
|
||||
msg_ok "Downloaded ${CL}${BL}${FILE}${CL}"
|
||||
msg_info "Retrieving the URL for the Debian 13 Qcow2 Disk Image"
|
||||
if [ "$CLOUD_INIT" == "yes" ]; then
|
||||
URL=https://cloud.debian.org/images/cloud/trixie/latest/debian-13-genericcloud-amd64.qcow2
|
||||
else
|
||||
URL=https://cloud.debian.org/images/cloud/trixie/latest/debian-13-nocloud-amd64.qcow2
|
||||
fi
|
||||
sleep 2
|
||||
msg_ok "${CL}${BL}${URL}${CL}"
|
||||
curl -f#SL -o "$(basename "$URL")" "$URL"
|
||||
echo -en "\e[1A\e[0K"
|
||||
FILE=$(basename $URL)
|
||||
msg_ok "Downloaded ${CL}${BL}${FILE}${CL}"
|
||||
|
||||
# ==============================================================================
|
||||
# IMAGE CUSTOMIZATION
|
||||
# ==============================================================================
|
||||
WORK_FILE=$(mktemp --suffix=.qcow2)
|
||||
cp "$FILE" "$WORK_FILE"
|
||||
# ==============================================================================
|
||||
# IMAGE CUSTOMIZATION
|
||||
# ==============================================================================
|
||||
msg_info "Customizing ${FILE} image"
|
||||
|
||||
if [[ "${SKIP_VIRT_CUSTOMIZE:-}" != "yes" ]]; then
|
||||
msg_info "Customizing ${FILE} image"
|
||||
WORK_FILE=$(mktemp --suffix=.qcow2)
|
||||
cp "$FILE" "$WORK_FILE"
|
||||
|
||||
# Set hostname
|
||||
virt-customize -q -a "$WORK_FILE" --hostname "${HN}" >/dev/null 2>&1
|
||||
# Set hostname
|
||||
virt-customize -q -a "$WORK_FILE" --hostname "${HN}" >/dev/null 2>&1
|
||||
|
||||
# Prepare for unique machine-id on first boot
|
||||
virt-customize -q -a "$WORK_FILE" --run-command "truncate -s 0 /etc/machine-id" >/dev/null 2>&1
|
||||
virt-customize -q -a "$WORK_FILE" --run-command "rm -f /var/lib/dbus/machine-id" >/dev/null 2>&1
|
||||
# Prepare for unique machine-id on first boot
|
||||
virt-customize -q -a "$WORK_FILE" --run-command "truncate -s 0 /etc/machine-id" >/dev/null 2>&1
|
||||
virt-customize -q -a "$WORK_FILE" --run-command "rm -f /var/lib/dbus/machine-id" >/dev/null 2>&1
|
||||
|
||||
# Disable systemd-firstboot to prevent interactive prompts blocking the console
|
||||
virt-customize -q -a "$WORK_FILE" --run-command "systemctl disable systemd-firstboot.service 2>/dev/null; rm -f /etc/systemd/system/sysinit.target.wants/systemd-firstboot.service; ln -sf /dev/null /etc/systemd/system/systemd-firstboot.service" >/dev/null 2>&1 || true
|
||||
# Disable systemd-firstboot to prevent interactive prompts blocking the console
|
||||
virt-customize -q -a "$WORK_FILE" --run-command "systemctl disable systemd-firstboot.service 2>/dev/null; rm -f /etc/systemd/system/sysinit.target.wants/systemd-firstboot.service; ln -sf /dev/null /etc/systemd/system/systemd-firstboot.service" >/dev/null 2>&1 || true
|
||||
|
||||
# Pre-seed firstboot settings so it won't prompt even if triggered
|
||||
virt-customize -q -a "$WORK_FILE" --run-command "echo 'Etc/UTC' > /etc/timezone && ln -sf /usr/share/zoneinfo/Etc/UTC /etc/localtime" >/dev/null 2>&1 || true
|
||||
virt-customize -q -a "$WORK_FILE" --run-command "touch /etc/locale.conf" >/dev/null 2>&1 || true
|
||||
# Pre-seed firstboot settings so it won't prompt even if triggered
|
||||
virt-customize -q -a "$WORK_FILE" --run-command "echo 'Etc/UTC' > /etc/timezone && ln -sf /usr/share/zoneinfo/Etc/UTC /etc/localtime" >/dev/null 2>&1 || true
|
||||
virt-customize -q -a "$WORK_FILE" --run-command "touch /etc/locale.conf" >/dev/null 2>&1 || true
|
||||
|
||||
if [ "$CLOUD_INIT" == "yes" ]; then
|
||||
# Cloud-Init handles SSH and login
|
||||
virt-customize -q -a "$WORK_FILE" --run-command "sed -i 's/^#*PermitRootLogin.*/PermitRootLogin yes/' /etc/ssh/sshd_config" >/dev/null 2>&1 || true
|
||||
virt-customize -q -a "$WORK_FILE" --run-command "sed -i 's/^#*PasswordAuthentication.*/PasswordAuthentication yes/' /etc/ssh/sshd_config" >/dev/null 2>&1 || true
|
||||
else
|
||||
# Configure auto-login on serial console (ttyS0) and virtual console (tty1)
|
||||
virt-customize -q -a "$WORK_FILE" --run-command "mkdir -p /etc/systemd/system/serial-getty@ttyS0.service.d" >/dev/null 2>&1 || true
|
||||
virt-customize -q -a "$WORK_FILE" --run-command 'cat > /etc/systemd/system/serial-getty@ttyS0.service.d/autologin.conf << EOF
|
||||
if [ "$CLOUD_INIT" == "yes" ]; then
|
||||
# Cloud-Init handles SSH and login
|
||||
virt-customize -q -a "$WORK_FILE" --run-command "sed -i 's/^#*PermitRootLogin.*/PermitRootLogin yes/' /etc/ssh/sshd_config" >/dev/null 2>&1 || true
|
||||
virt-customize -q -a "$WORK_FILE" --run-command "sed -i 's/^#*PasswordAuthentication.*/PasswordAuthentication yes/' /etc/ssh/sshd_config" >/dev/null 2>&1 || true
|
||||
else
|
||||
# Configure auto-login on serial console (ttyS0) and virtual console (tty1)
|
||||
virt-customize -q -a "$WORK_FILE" --run-command "mkdir -p /etc/systemd/system/serial-getty@ttyS0.service.d" >/dev/null 2>&1 || true
|
||||
virt-customize -q -a "$WORK_FILE" --run-command 'cat > /etc/systemd/system/serial-getty@ttyS0.service.d/autologin.conf << EOF
|
||||
[Service]
|
||||
ExecStart=
|
||||
ExecStart=-/sbin/agetty --autologin root --noclear %I \$TERM
|
||||
EOF' >/dev/null 2>&1 || true
|
||||
virt-customize -q -a "$WORK_FILE" --run-command "mkdir -p /etc/systemd/system/getty@tty1.service.d" >/dev/null 2>&1 || true
|
||||
virt-customize -q -a "$WORK_FILE" --run-command 'cat > /etc/systemd/system/getty@tty1.service.d/autologin.conf << EOF
|
||||
virt-customize -q -a "$WORK_FILE" --run-command "mkdir -p /etc/systemd/system/getty@tty1.service.d" >/dev/null 2>&1 || true
|
||||
virt-customize -q -a "$WORK_FILE" --run-command 'cat > /etc/systemd/system/getty@tty1.service.d/autologin.conf << EOF
|
||||
[Service]
|
||||
ExecStart=
|
||||
ExecStart=-/sbin/agetty --autologin root --noclear %I \$TERM
|
||||
EOF' >/dev/null 2>&1 || true
|
||||
fi
|
||||
fi
|
||||
|
||||
msg_ok "Customized image"
|
||||
else
|
||||
msg_ok "Skipped image customization (hostname and login not pre-configured)"
|
||||
fi
|
||||
msg_ok "Customized image"
|
||||
|
||||
STORAGE_TYPE=$(pvesm status -storage "$STORAGE" | awk 'NR>1 {print $2}')
|
||||
case $STORAGE_TYPE in
|
||||
nfs | dir)
|
||||
DISK_EXT=".qcow2"
|
||||
DISK_REF="$VMID/"
|
||||
DISK_IMPORT="-format qcow2"
|
||||
THIN=""
|
||||
;;
|
||||
btrfs)
|
||||
DISK_EXT=".raw"
|
||||
DISK_REF="$VMID/"
|
||||
DISK_IMPORT="-format raw"
|
||||
FORMAT=",efitype=4m"
|
||||
THIN=""
|
||||
;;
|
||||
*)
|
||||
DISK_EXT=""
|
||||
DISK_REF=""
|
||||
DISK_IMPORT="-format raw"
|
||||
;;
|
||||
esac
|
||||
for i in {0,1}; do
|
||||
disk="DISK$i"
|
||||
eval DISK"${i}"=vm-"${VMID}"-disk-"${i}"${DISK_EXT:-}
|
||||
eval DISK"${i}"_REF="${STORAGE}":"${DISK_REF:-}"${!disk}
|
||||
done
|
||||
STORAGE_TYPE=$(pvesm status -storage "$STORAGE" | awk 'NR>1 {print $2}')
|
||||
case $STORAGE_TYPE in
|
||||
nfs | dir)
|
||||
DISK_EXT=".qcow2"
|
||||
DISK_REF="$VMID/"
|
||||
DISK_IMPORT="-format qcow2"
|
||||
THIN=""
|
||||
;;
|
||||
btrfs)
|
||||
DISK_EXT=".raw"
|
||||
DISK_REF="$VMID/"
|
||||
DISK_IMPORT="-format raw"
|
||||
FORMAT=",efitype=4m"
|
||||
THIN=""
|
||||
;;
|
||||
*)
|
||||
DISK_EXT=""
|
||||
DISK_REF=""
|
||||
DISK_IMPORT="-format raw"
|
||||
;;
|
||||
esac
|
||||
for i in {0,1}; do
|
||||
disk="DISK$i"
|
||||
eval DISK"${i}"=vm-"${VMID}"-disk-"${i}"${DISK_EXT:-}
|
||||
eval DISK"${i}"_REF="${STORAGE}":"${DISK_REF:-}"${!disk}
|
||||
done
|
||||
|
||||
msg_info "Creating a Debian 13 VM"
|
||||
qm create $VMID -agent 1${MACHINE} -tablet 0 -localtime 1 -bios ovmf${CPU_TYPE} -cores $CORE_COUNT -memory $RAM_SIZE \
|
||||
-name $HN -tags community-script -net0 virtio,bridge=$BRG,macaddr=$MAC$VLAN$MTU -onboot 1 -ostype l26 -scsihw virtio-scsi-pci
|
||||
pvesm alloc $STORAGE $VMID $DISK0 4M 1>&/dev/null
|
||||
qm importdisk $VMID ${WORK_FILE} $STORAGE ${DISK_IMPORT:-} 1>&/dev/null
|
||||
if [ "$CLOUD_INIT" == "yes" ]; then
|
||||
qm set $VMID \
|
||||
-efidisk0 ${DISK0_REF}${FORMAT} \
|
||||
-scsi0 ${DISK1_REF},${DISK_CACHE}${THIN}size=${DISK_SIZE} \
|
||||
-scsi1 ${STORAGE}:cloudinit \
|
||||
-boot order=scsi0 \
|
||||
-serial0 socket >/dev/null
|
||||
else
|
||||
qm set $VMID \
|
||||
-efidisk0 ${DISK0_REF}${FORMAT} \
|
||||
-scsi0 ${DISK1_REF},${DISK_CACHE}${THIN}size=${DISK_SIZE} \
|
||||
-boot order=scsi0 \
|
||||
-serial0 socket >/dev/null
|
||||
fi
|
||||
msg_info "Creating a Debian 13 VM"
|
||||
qm create $VMID -agent 1${MACHINE} -tablet 0 -localtime 1 -bios ovmf${CPU_TYPE} -cores $CORE_COUNT -memory $RAM_SIZE \
|
||||
-name $HN -tags community-script -net0 virtio,bridge=$BRG,macaddr=$MAC$VLAN$MTU -onboot 1 -ostype l26 -scsihw virtio-scsi-pci
|
||||
pvesm alloc $STORAGE $VMID $DISK0 4M 1>&/dev/null
|
||||
qm importdisk $VMID ${WORK_FILE} $STORAGE ${DISK_IMPORT:-} 1>&/dev/null
|
||||
if [ "$CLOUD_INIT" == "yes" ]; then
|
||||
qm set $VMID \
|
||||
-efidisk0 ${DISK0_REF}${FORMAT} \
|
||||
-scsi0 ${DISK1_REF},${DISK_CACHE}${THIN}size=${DISK_SIZE} \
|
||||
-scsi1 ${STORAGE}:cloudinit \
|
||||
-boot order=scsi0 \
|
||||
-serial0 socket >/dev/null
|
||||
else
|
||||
qm set $VMID \
|
||||
-efidisk0 ${DISK0_REF}${FORMAT} \
|
||||
-scsi0 ${DISK1_REF},${DISK_CACHE}${THIN}size=${DISK_SIZE} \
|
||||
-boot order=scsi0 \
|
||||
-serial0 socket >/dev/null
|
||||
fi
|
||||
|
||||
# Clean up work file
|
||||
rm -f "$WORK_FILE"
|
||||
# Clean up work file
|
||||
rm -f "$WORK_FILE"
|
||||
|
||||
DESCRIPTION=$(
|
||||
cat <<EOF
|
||||
DESCRIPTION=$(
|
||||
cat <<EOF
|
||||
<div align='center'>
|
||||
<a href='https://Helper-Scripts.com' target='_blank' rel='noopener noreferrer'>
|
||||
<img src='https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/images/logo-81x112.png' alt='Logo' style='width:81px;height:112px;'/>
|
||||
@@ -686,27 +624,22 @@ EOF' >/dev/null 2>&1 || true
|
||||
</span>
|
||||
</div>
|
||||
EOF
|
||||
)
|
||||
qm set $VMID -description "$DESCRIPTION" >/dev/null
|
||||
if [ -n "$DISK_SIZE" ]; then
|
||||
msg_info "Resizing disk to $DISK_SIZE GB"
|
||||
qm resize $VMID scsi0 ${DISK_SIZE} >/dev/null
|
||||
else
|
||||
msg_info "Using default disk size of $DEFAULT_DISK_SIZE GB"
|
||||
qm resize $VMID scsi0 ${DEFAULT_DISK_SIZE} >/dev/null
|
||||
fi
|
||||
)
|
||||
qm set $VMID -description "$DESCRIPTION" >/dev/null
|
||||
if [ -n "$DISK_SIZE" ]; then
|
||||
msg_info "Resizing disk to $DISK_SIZE GB"
|
||||
qm resize $VMID scsi0 ${DISK_SIZE} >/dev/null
|
||||
else
|
||||
msg_info "Using default disk size of $DEFAULT_DISK_SIZE GB"
|
||||
qm resize $VMID scsi0 ${DEFAULT_DISK_SIZE} >/dev/null
|
||||
fi
|
||||
|
||||
msg_ok "Created a Debian 13 VM ${CL}${BL}(${HN})"
|
||||
if [ "$START_VM" == "yes" ]; then
|
||||
msg_info "Starting Debian 13 VM"
|
||||
qm start $VMID
|
||||
msg_ok "Started Debian 13 VM"
|
||||
fi
|
||||
msg_ok "Created a Debian 13 VM ${CL}${BL}(${HN})"
|
||||
if [ "$START_VM" == "yes" ]; then
|
||||
msg_info "Starting Debian 13 VM"
|
||||
qm start $VMID
|
||||
msg_ok "Started Debian 13 VM"
|
||||
fi
|
||||
|
||||
msg_ok "Completed successfully!\n"
|
||||
echo "More Info at https://github.com/community-scripts/ProxmoxVE/discussions/836"
|
||||
} # end create_vm
|
||||
|
||||
VM_CREATION_PHASE="yes"
|
||||
create_vm
|
||||
VM_CREATION_PHASE="no"
|
||||
msg_ok "Completed successfully!\n"
|
||||
echo "More Info at https://github.com/community-scripts/ProxmoxVE/discussions/836"
|
||||
|
||||
@@ -100,15 +100,8 @@ function cleanup_vmid() {
|
||||
}
|
||||
|
||||
function cleanup() {
|
||||
local exit_code=$?
|
||||
popd >/dev/null
|
||||
if [[ "${POST_TO_API_DONE:-}" == "true" && "${POST_UPDATE_DONE:-}" != "true" ]]; then
|
||||
if [[ $exit_code -eq 0 ]]; then
|
||||
post_update_to_api "done" "none"
|
||||
else
|
||||
post_update_to_api "failed" "$exit_code"
|
||||
fi
|
||||
fi
|
||||
post_update_to_api "done" "none"
|
||||
rm -rf $TEMP_DIR
|
||||
}
|
||||
|
||||
|
||||
448
vm/docker-vm.sh
448
vm/docker-vm.sh
@@ -40,32 +40,10 @@ trap cleanup EXIT
|
||||
trap 'post_update_to_api "failed" "INTERRUPTED"' SIGINT
|
||||
trap 'post_update_to_api "failed" "TERMINATED"' SIGTERM
|
||||
|
||||
# Flag to control whether recovery menu is shown (set during create_vm)
|
||||
VM_CREATION_PHASE="no"
|
||||
|
||||
function error_handler() {
|
||||
local exit_code="$?"
|
||||
local line_number="$1"
|
||||
local command="$2"
|
||||
|
||||
# During VM creation phase: use smart recovery if available
|
||||
if [[ "$VM_CREATION_PHASE" == "yes" ]] && declare -f vm_handle_recovery >/dev/null 2>&1; then
|
||||
# Temporarily disable ERR trap + set -e to prevent recursion during recovery menu
|
||||
trap - ERR
|
||||
set +e
|
||||
|
||||
if vm_handle_recovery "$exit_code" "$line_number" "$command" "cleanup_vmid" "create_vm"; then
|
||||
# Recovery chose retry — re-invoke create_vm with traps restored
|
||||
set -e
|
||||
trap 'error_handler $LINENO "$BASH_COMMAND"' ERR
|
||||
create_vm
|
||||
exit $?
|
||||
fi
|
||||
# Recovery chose abort/keep — vm_handle_recovery already called exit
|
||||
exit "$exit_code"
|
||||
fi
|
||||
|
||||
# Default error handling (outside VM creation phase)
|
||||
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
|
||||
post_update_to_api "failed" "${exit_code}"
|
||||
echo -e "\n$error_message\n"
|
||||
@@ -459,87 +437,74 @@ if ! command -v virt-customize &>/dev/null; then
|
||||
fi
|
||||
|
||||
# ==============================================================================
|
||||
# VM CREATION FUNCTION (wrapped for smart recovery retry)
|
||||
# IMAGE DOWNLOAD
|
||||
# ==============================================================================
|
||||
create_vm() {
|
||||
msg_info "Retrieving the URL for the ${OS_DISPLAY} Qcow2 Disk Image"
|
||||
URL=$(get_image_url)
|
||||
CACHE_DIR="/var/lib/vz/template/cache"
|
||||
CACHE_FILE="$CACHE_DIR/$(basename "$URL")"
|
||||
mkdir -p "$CACHE_DIR"
|
||||
msg_ok "${CL}${BL}${URL}${CL}"
|
||||
|
||||
# Reset error log for this attempt
|
||||
VM_ERROR_LOG="/tmp/vm-install-${VMID}.log"
|
||||
: >"$VM_ERROR_LOG"
|
||||
if [[ ! -s "$CACHE_FILE" ]]; then
|
||||
curl -f#SL -o "$CACHE_FILE" "$URL"
|
||||
echo -en "\e[1A\e[0K"
|
||||
msg_ok "Downloaded ${CL}${BL}$(basename "$CACHE_FILE")${CL}"
|
||||
else
|
||||
msg_ok "Using cached image ${CL}${BL}$(basename "$CACHE_FILE")${CL}"
|
||||
fi
|
||||
|
||||
# ==============================================================================
|
||||
# IMAGE DOWNLOAD
|
||||
# ==============================================================================
|
||||
msg_info "Retrieving the URL for the ${OS_DISPLAY} Qcow2 Disk Image"
|
||||
URL=$(get_image_url)
|
||||
CACHE_DIR="/var/lib/vz/template/cache"
|
||||
CACHE_FILE="$CACHE_DIR/$(basename "$URL")"
|
||||
mkdir -p "$CACHE_DIR"
|
||||
msg_ok "${CL}${BL}${URL}${CL}"
|
||||
# ==============================================================================
|
||||
# STORAGE TYPE DETECTION
|
||||
# ==============================================================================
|
||||
STORAGE_TYPE=$(pvesm status -storage "$STORAGE" | awk 'NR>1 {print $2}')
|
||||
case $STORAGE_TYPE in
|
||||
nfs | dir)
|
||||
DISK_EXT=".qcow2"
|
||||
DISK_REF="$VMID/"
|
||||
DISK_IMPORT="--format qcow2"
|
||||
THIN=""
|
||||
;;
|
||||
btrfs)
|
||||
DISK_EXT=".raw"
|
||||
DISK_REF="$VMID/"
|
||||
DISK_IMPORT="--format raw"
|
||||
FORMAT=",efitype=4m"
|
||||
THIN=""
|
||||
;;
|
||||
*)
|
||||
DISK_EXT=""
|
||||
DISK_REF=""
|
||||
DISK_IMPORT="--format raw"
|
||||
;;
|
||||
esac
|
||||
|
||||
if [[ ! -s "$CACHE_FILE" ]]; then
|
||||
curl -f#SL -o "$CACHE_FILE" "$URL"
|
||||
echo -en "\e[1A\e[0K"
|
||||
msg_ok "Downloaded ${CL}${BL}$(basename "$CACHE_FILE")${CL}"
|
||||
else
|
||||
msg_ok "Using cached image ${CL}${BL}$(basename "$CACHE_FILE")${CL}"
|
||||
fi
|
||||
# ==============================================================================
|
||||
# IMAGE CUSTOMIZATION WITH DOCKER
|
||||
# ==============================================================================
|
||||
msg_info "Preparing ${OS_DISPLAY} image with Docker"
|
||||
|
||||
# ==============================================================================
|
||||
# STORAGE TYPE DETECTION
|
||||
# ==============================================================================
|
||||
STORAGE_TYPE=$(pvesm status -storage "$STORAGE" | awk 'NR>1 {print $2}')
|
||||
case $STORAGE_TYPE in
|
||||
nfs | dir)
|
||||
DISK_EXT=".qcow2"
|
||||
DISK_REF="$VMID/"
|
||||
DISK_IMPORT="--format qcow2"
|
||||
THIN=""
|
||||
;;
|
||||
btrfs)
|
||||
DISK_EXT=".raw"
|
||||
DISK_REF="$VMID/"
|
||||
DISK_IMPORT="--format raw"
|
||||
FORMAT=",efitype=4m"
|
||||
THIN=""
|
||||
;;
|
||||
*)
|
||||
DISK_EXT=""
|
||||
DISK_REF=""
|
||||
DISK_IMPORT="--format raw"
|
||||
;;
|
||||
esac
|
||||
WORK_FILE=$(mktemp --suffix=.qcow2)
|
||||
cp "$CACHE_FILE" "$WORK_FILE"
|
||||
|
||||
# ==============================================================================
|
||||
# IMAGE CUSTOMIZATION WITH DOCKER
|
||||
# ==============================================================================
|
||||
msg_info "Preparing ${OS_DISPLAY} image with Docker"
|
||||
export LIBGUESTFS_BACKEND_SETTINGS=dns=8.8.8.8,1.1.1.1
|
||||
|
||||
WORK_FILE=$(mktemp --suffix=.qcow2)
|
||||
cp "$CACHE_FILE" "$WORK_FILE"
|
||||
DOCKER_PREINSTALLED="no"
|
||||
|
||||
export LIBGUESTFS_BACKEND_SETTINGS=dns=8.8.8.8,1.1.1.1
|
||||
# Install qemu-guest-agent and Docker during image customization
|
||||
msg_info "Installing base packages in image"
|
||||
if virt-customize -a "$WORK_FILE" --install qemu-guest-agent,curl,ca-certificates >/dev/null 2>&1; then
|
||||
msg_ok "Installed base packages"
|
||||
|
||||
DOCKER_PREINSTALLED="no"
|
||||
msg_info "Installing Docker (this may take 2-5 minutes)"
|
||||
if virt-customize -q -a "$WORK_FILE" --run-command "curl -fsSL https://get.docker.com | sh" >/dev/null 2>&1 &&
|
||||
virt-customize -q -a "$WORK_FILE" --run-command "systemctl enable docker" >/dev/null 2>&1; then
|
||||
msg_ok "Installed Docker"
|
||||
|
||||
# Install qemu-guest-agent and Docker during image customization
|
||||
# Skip if recovery set SKIP_VIRT_CUSTOMIZE (virt-customize failed before)
|
||||
if [[ "${SKIP_VIRT_CUSTOMIZE:-}" == "yes" ]]; then
|
||||
msg_ok "Skipping virt-customize (using first-boot fallback)"
|
||||
else
|
||||
msg_info "Installing base packages in image"
|
||||
if virt-customize -a "$WORK_FILE" --install qemu-guest-agent,curl,ca-certificates 2>>"$VM_ERROR_LOG" >/dev/null; then
|
||||
msg_ok "Installed base packages"
|
||||
|
||||
msg_info "Installing Docker (this may take 2-5 minutes)"
|
||||
if virt-customize -q -a "$WORK_FILE" --run-command "curl -fsSL https://get.docker.com | sh" >/dev/null 2>&1 &&
|
||||
virt-customize -q -a "$WORK_FILE" --run-command "systemctl enable docker" >/dev/null 2>&1; then
|
||||
msg_ok "Installed Docker"
|
||||
|
||||
msg_info "Configuring Docker daemon"
|
||||
# Optimize Docker daemon configuration
|
||||
virt-customize -q -a "$WORK_FILE" --run-command "mkdir -p /etc/docker" >/dev/null 2>&1
|
||||
virt-customize -q -a "$WORK_FILE" --run-command 'cat > /etc/docker/daemon.json << EOF
|
||||
msg_info "Configuring Docker daemon"
|
||||
# Optimize Docker daemon configuration
|
||||
virt-customize -q -a "$WORK_FILE" --run-command "mkdir -p /etc/docker" >/dev/null 2>&1
|
||||
virt-customize -q -a "$WORK_FILE" --run-command 'cat > /etc/docker/daemon.json << EOF
|
||||
{
|
||||
"storage-driver": "overlay2",
|
||||
"log-driver": "json-file",
|
||||
@@ -549,46 +514,45 @@ create_vm() {
|
||||
}
|
||||
}
|
||||
EOF' >/dev/null 2>&1
|
||||
DOCKER_PREINSTALLED="yes"
|
||||
msg_ok "Configured Docker daemon"
|
||||
else
|
||||
msg_ok "Docker will be installed on first boot"
|
||||
fi
|
||||
else
|
||||
msg_ok "Packages will be installed on first boot"
|
||||
fi
|
||||
fi
|
||||
|
||||
msg_info "Finalizing image (hostname, SSH config)"
|
||||
# Set hostname and prepare for unique machine-id
|
||||
virt-customize -q -a "$WORK_FILE" --hostname "${HN}" >/dev/null 2>&1
|
||||
virt-customize -q -a "$WORK_FILE" --run-command "truncate -s 0 /etc/machine-id" >/dev/null 2>&1
|
||||
virt-customize -q -a "$WORK_FILE" --run-command "rm -f /var/lib/dbus/machine-id" >/dev/null 2>&1
|
||||
|
||||
# Configure SSH for Cloud-Init
|
||||
if [ "$USE_CLOUD_INIT" = "yes" ]; then
|
||||
virt-customize -q -a "$WORK_FILE" --run-command "sed -i 's/^#*PermitRootLogin.*/PermitRootLogin yes/' /etc/ssh/sshd_config" >/dev/null 2>&1 || true
|
||||
virt-customize -q -a "$WORK_FILE" --run-command "sed -i 's/^#*PasswordAuthentication.*/PasswordAuthentication yes/' /etc/ssh/sshd_config" >/dev/null 2>&1 || true
|
||||
DOCKER_PREINSTALLED="yes"
|
||||
msg_ok "Configured Docker daemon"
|
||||
else
|
||||
# Configure auto-login for nocloud images (no Cloud-Init)
|
||||
virt-customize -q -a "$WORK_FILE" --run-command "mkdir -p /etc/systemd/system/serial-getty@ttyS0.service.d" >/dev/null 2>&1 || true
|
||||
virt-customize -q -a "$WORK_FILE" --run-command 'cat > /etc/systemd/system/serial-getty@ttyS0.service.d/autologin.conf << EOF
|
||||
[Service]
|
||||
ExecStart=
|
||||
ExecStart=-/sbin/agetty --autologin root --noclear %I \$TERM
|
||||
EOF' >/dev/null 2>&1 || true
|
||||
virt-customize -q -a "$WORK_FILE" --run-command "mkdir -p /etc/systemd/system/getty@tty1.service.d" >/dev/null 2>&1 || true
|
||||
virt-customize -q -a "$WORK_FILE" --run-command 'cat > /etc/systemd/system/getty@tty1.service.d/autologin.conf << EOF
|
||||
[Service]
|
||||
ExecStart=
|
||||
ExecStart=-/sbin/agetty --autologin root --noclear %I \$TERM
|
||||
EOF' >/dev/null 2>&1 || true
|
||||
msg_ok "Docker will be installed on first boot"
|
||||
fi
|
||||
msg_ok "Finalized image"
|
||||
else
|
||||
msg_ok "Packages will be installed on first boot"
|
||||
fi
|
||||
|
||||
# Create first-boot Docker install script (fallback if virt-customize failed)
|
||||
if [ "$DOCKER_PREINSTALLED" = "no" ]; then
|
||||
virt-customize -q -a "$WORK_FILE" --run-command 'cat > /root/install-docker.sh << "DOCKERSCRIPT"
|
||||
msg_info "Finalizing image (hostname, SSH config)"
|
||||
# Set hostname and prepare for unique machine-id
|
||||
virt-customize -q -a "$WORK_FILE" --hostname "${HN}" >/dev/null 2>&1
|
||||
virt-customize -q -a "$WORK_FILE" --run-command "truncate -s 0 /etc/machine-id" >/dev/null 2>&1
|
||||
virt-customize -q -a "$WORK_FILE" --run-command "rm -f /var/lib/dbus/machine-id" >/dev/null 2>&1
|
||||
|
||||
# Configure SSH for Cloud-Init
|
||||
if [ "$USE_CLOUD_INIT" = "yes" ]; then
|
||||
virt-customize -q -a "$WORK_FILE" --run-command "sed -i 's/^#*PermitRootLogin.*/PermitRootLogin yes/' /etc/ssh/sshd_config" >/dev/null 2>&1 || true
|
||||
virt-customize -q -a "$WORK_FILE" --run-command "sed -i 's/^#*PasswordAuthentication.*/PasswordAuthentication yes/' /etc/ssh/sshd_config" >/dev/null 2>&1 || true
|
||||
else
|
||||
# Configure auto-login for nocloud images (no Cloud-Init)
|
||||
virt-customize -q -a "$WORK_FILE" --run-command "mkdir -p /etc/systemd/system/serial-getty@ttyS0.service.d" >/dev/null 2>&1 || true
|
||||
virt-customize -q -a "$WORK_FILE" --run-command 'cat > /etc/systemd/system/serial-getty@ttyS0.service.d/autologin.conf << EOF
|
||||
[Service]
|
||||
ExecStart=
|
||||
ExecStart=-/sbin/agetty --autologin root --noclear %I \$TERM
|
||||
EOF' >/dev/null 2>&1 || true
|
||||
virt-customize -q -a "$WORK_FILE" --run-command "mkdir -p /etc/systemd/system/getty@tty1.service.d" >/dev/null 2>&1 || true
|
||||
virt-customize -q -a "$WORK_FILE" --run-command 'cat > /etc/systemd/system/getty@tty1.service.d/autologin.conf << EOF
|
||||
[Service]
|
||||
ExecStart=
|
||||
ExecStart=-/sbin/agetty --autologin root --noclear %I \$TERM
|
||||
EOF' >/dev/null 2>&1 || true
|
||||
fi
|
||||
msg_ok "Finalized image"
|
||||
|
||||
# Create first-boot Docker install script (fallback if virt-customize failed)
|
||||
if [ "$DOCKER_PREINSTALLED" = "no" ]; then
|
||||
virt-customize -q -a "$WORK_FILE" --run-command 'cat > /root/install-docker.sh << "DOCKERSCRIPT"
|
||||
#!/bin/bash
|
||||
exec > /var/log/install-docker.log 2>&1
|
||||
echo "[$(date)] Starting Docker installation"
|
||||
@@ -619,7 +583,7 @@ echo "[$(date)] Docker installation completed"
|
||||
DOCKERSCRIPT
|
||||
chmod +x /root/install-docker.sh' >/dev/null 2>&1
|
||||
|
||||
virt-customize -q -a "$WORK_FILE" --run-command 'cat > /etc/systemd/system/install-docker.service << "DOCKERSERVICE"
|
||||
virt-customize -q -a "$WORK_FILE" --run-command 'cat > /etc/systemd/system/install-docker.service << "DOCKERSERVICE"
|
||||
[Unit]
|
||||
Description=Install Docker on First Boot
|
||||
After=network-online.target
|
||||
@@ -635,123 +599,113 @@ RemainAfterExit=yes
|
||||
WantedBy=multi-user.target
|
||||
DOCKERSERVICE
|
||||
systemctl enable install-docker.service' >/dev/null 2>&1
|
||||
fi
|
||||
fi
|
||||
|
||||
# Resize disk to target size
|
||||
msg_info "Resizing disk image to ${DISK_SIZE}"
|
||||
qemu-img resize "$WORK_FILE" "${DISK_SIZE}" >/dev/null 2>&1
|
||||
msg_ok "Resized disk image"
|
||||
|
||||
# ==============================================================================
|
||||
# VM CREATION
|
||||
# ==============================================================================
|
||||
msg_info "Creating Docker VM shell"
|
||||
|
||||
qm create $VMID -agent 1${MACHINE} -tablet 0 -localtime 1 -bios ovmf${CPU_TYPE} -cores $CORE_COUNT -memory $RAM_SIZE \
|
||||
-name $HN -tags community-script -net0 virtio,bridge=$BRG,macaddr=$MAC$VLAN$MTU -onboot 1 -ostype l26 -scsihw virtio-scsi-pci 2>>"$VM_ERROR_LOG" >/dev/null
|
||||
|
||||
msg_ok "Created VM shell"
|
||||
|
||||
# ==============================================================================
|
||||
# DISK IMPORT
|
||||
# ==============================================================================
|
||||
msg_info "Importing disk into storage ($STORAGE)"
|
||||
|
||||
if qm disk import --help >/dev/null 2>&1; then
|
||||
IMPORT_CMD=(qm disk import)
|
||||
else
|
||||
IMPORT_CMD=(qm importdisk)
|
||||
fi
|
||||
|
||||
IMPORT_OUT="$("${IMPORT_CMD[@]}" "$VMID" "$WORK_FILE" "$STORAGE" ${DISK_IMPORT:-} 2> >(tee -a "$VM_ERROR_LOG") || true)"
|
||||
DISK_REF_IMPORTED="$(printf '%s\n' "$IMPORT_OUT" | sed -n "s/.*successfully imported disk '\([^']\+\)'.*/\1/p" | tr -d "\r\"'")"
|
||||
[[ -z "$DISK_REF_IMPORTED" ]] && DISK_REF_IMPORTED="$(pvesm list "$STORAGE" | awk -v id="$VMID" '$5 ~ ("vm-"id"-disk-") {print $1":"$5}' | sort | tail -n1)"
|
||||
[[ -z "$DISK_REF_IMPORTED" ]] && {
|
||||
msg_error "Unable to determine imported disk reference."
|
||||
echo "$IMPORT_OUT"
|
||||
exit 1
|
||||
}
|
||||
|
||||
msg_ok "Imported disk (${CL}${BL}${DISK_REF_IMPORTED}${CL})"
|
||||
|
||||
# Clean up work file
|
||||
rm -f "$WORK_FILE"
|
||||
|
||||
# ==============================================================================
|
||||
# VM CONFIGURATION
|
||||
# ==============================================================================
|
||||
msg_info "Attaching EFI and root disk"
|
||||
|
||||
qm set "$VMID" \
|
||||
--efidisk0 "${STORAGE}:0,efitype=4m" \
|
||||
--scsi0 "${DISK_REF_IMPORTED},${DISK_CACHE}${THIN%,}" \
|
||||
--boot order=scsi0 \
|
||||
--serial0 socket >/dev/null
|
||||
|
||||
qm set $VMID --agent enabled=1 >/dev/null
|
||||
|
||||
msg_ok "Attached EFI and root disk"
|
||||
|
||||
# Set VM description
|
||||
set_description
|
||||
|
||||
# Cloud-Init configuration
|
||||
if [ "$USE_CLOUD_INIT" = "yes" ]; then
|
||||
msg_info "Configuring Cloud-Init"
|
||||
setup_cloud_init "$VMID" "$STORAGE" "$HN" "yes"
|
||||
msg_ok "Cloud-Init configured"
|
||||
fi
|
||||
|
||||
# Start VM
|
||||
if [ "$START_VM" == "yes" ]; then
|
||||
msg_info "Starting Docker VM"
|
||||
qm start $VMID >/dev/null 2>&1
|
||||
msg_ok "Started Docker VM"
|
||||
fi
|
||||
|
||||
# ==============================================================================
|
||||
# FINAL OUTPUT
|
||||
# ==============================================================================
|
||||
VM_IP=""
|
||||
if [ "$START_VM" == "yes" ]; then
|
||||
set +e
|
||||
for i in {1..10}; do
|
||||
VM_IP=$(qm guest cmd "$VMID" network-get-interfaces 2>/dev/null |
|
||||
jq -r '.[] | select(.name != "lo") | ."ip-addresses"[]? | select(."ip-address-type" == "ipv4") | ."ip-address"' 2>/dev/null |
|
||||
grep -v "^127\." | head -1) || true
|
||||
[ -n "$VM_IP" ] && break
|
||||
sleep 3
|
||||
done
|
||||
set -e
|
||||
fi
|
||||
|
||||
echo -e "\n${INFO}${BOLD}${GN}Docker VM Configuration Summary:${CL}"
|
||||
echo -e "${TAB}${DGN}VM ID: ${BGN}${VMID}${CL}"
|
||||
echo -e "${TAB}${DGN}Hostname: ${BGN}${HN}${CL}"
|
||||
echo -e "${TAB}${DGN}OS: ${BGN}${OS_DISPLAY}${CL}"
|
||||
[ -n "$VM_IP" ] && echo -e "${TAB}${DGN}IP Address: ${BGN}${VM_IP}${CL}"
|
||||
|
||||
if [ "$DOCKER_PREINSTALLED" = "yes" ]; then
|
||||
echo -e "${TAB}${DGN}Docker: ${BGN}Pre-installed (via get.docker.com)${CL}"
|
||||
else
|
||||
echo -e "${TAB}${DGN}Docker: ${BGN}Installing on first boot${CL}"
|
||||
echo -e "${TAB}${YW}⚠️ Wait 2-3 minutes for installation to complete${CL}"
|
||||
echo -e "${TAB}${YW}⚠️ Check progress: ${BL}cat /var/log/install-docker.log${CL}"
|
||||
fi
|
||||
|
||||
if [ "$USE_CLOUD_INIT" = "yes" ]; then
|
||||
display_cloud_init_info "$VMID" "$HN" 2>/dev/null || true
|
||||
fi
|
||||
|
||||
post_update_to_api "done" "none"
|
||||
msg_ok "Completed successfully!\n"
|
||||
|
||||
} # end of create_vm()
|
||||
# Resize disk to target size
|
||||
msg_info "Resizing disk image to ${DISK_SIZE}"
|
||||
qemu-img resize "$WORK_FILE" "${DISK_SIZE}" >/dev/null 2>&1
|
||||
msg_ok "Resized disk image"
|
||||
|
||||
# ==============================================================================
|
||||
# VM CREATION WITH SMART RECOVERY
|
||||
# VM CREATION
|
||||
# ==============================================================================
|
||||
VM_CREATION_PHASE="yes"
|
||||
create_vm
|
||||
VM_CREATION_PHASE="no"
|
||||
rm -f "$VM_ERROR_LOG" 2>/dev/null || true
|
||||
msg_info "Creating Docker VM shell"
|
||||
|
||||
qm create $VMID -agent 1${MACHINE} -tablet 0 -localtime 1 -bios ovmf${CPU_TYPE} -cores $CORE_COUNT -memory $RAM_SIZE \
|
||||
-name $HN -tags community-script -net0 virtio,bridge=$BRG,macaddr=$MAC$VLAN$MTU -onboot 1 -ostype l26 -scsihw virtio-scsi-pci >/dev/null
|
||||
|
||||
msg_ok "Created VM shell"
|
||||
|
||||
# ==============================================================================
|
||||
# DISK IMPORT
|
||||
# ==============================================================================
|
||||
msg_info "Importing disk into storage ($STORAGE)"
|
||||
|
||||
if qm disk import --help >/dev/null 2>&1; then
|
||||
IMPORT_CMD=(qm disk import)
|
||||
else
|
||||
IMPORT_CMD=(qm importdisk)
|
||||
fi
|
||||
|
||||
IMPORT_OUT="$("${IMPORT_CMD[@]}" "$VMID" "$WORK_FILE" "$STORAGE" ${DISK_IMPORT:-} 2>&1 || true)"
|
||||
DISK_REF_IMPORTED="$(printf '%s\n' "$IMPORT_OUT" | sed -n "s/.*successfully imported disk '\([^']\+\)'.*/\1/p" | tr -d "\r\"'")"
|
||||
[[ -z "$DISK_REF_IMPORTED" ]] && DISK_REF_IMPORTED="$(pvesm list "$STORAGE" | awk -v id="$VMID" '$5 ~ ("vm-"id"-disk-") {print $1":"$5}' | sort | tail -n1)"
|
||||
[[ -z "$DISK_REF_IMPORTED" ]] && {
|
||||
msg_error "Unable to determine imported disk reference."
|
||||
echo "$IMPORT_OUT"
|
||||
exit 1
|
||||
}
|
||||
|
||||
msg_ok "Imported disk (${CL}${BL}${DISK_REF_IMPORTED}${CL})"
|
||||
|
||||
# Clean up work file
|
||||
rm -f "$WORK_FILE"
|
||||
|
||||
# ==============================================================================
|
||||
# VM CONFIGURATION
|
||||
# ==============================================================================
|
||||
msg_info "Attaching EFI and root disk"
|
||||
|
||||
qm set "$VMID" \
|
||||
--efidisk0 "${STORAGE}:0,efitype=4m" \
|
||||
--scsi0 "${DISK_REF_IMPORTED},${DISK_CACHE}${THIN%,}" \
|
||||
--boot order=scsi0 \
|
||||
--serial0 socket >/dev/null
|
||||
|
||||
qm set $VMID --agent enabled=1 >/dev/null
|
||||
|
||||
msg_ok "Attached EFI and root disk"
|
||||
|
||||
# Set VM description
|
||||
set_description
|
||||
|
||||
# Cloud-Init configuration
|
||||
if [ "$USE_CLOUD_INIT" = "yes" ]; then
|
||||
msg_info "Configuring Cloud-Init"
|
||||
setup_cloud_init "$VMID" "$STORAGE" "$HN" "yes"
|
||||
msg_ok "Cloud-Init configured"
|
||||
fi
|
||||
|
||||
# Start VM
|
||||
if [ "$START_VM" == "yes" ]; then
|
||||
msg_info "Starting Docker VM"
|
||||
qm start $VMID >/dev/null 2>&1
|
||||
msg_ok "Started Docker VM"
|
||||
fi
|
||||
|
||||
# ==============================================================================
|
||||
# FINAL OUTPUT
|
||||
# ==============================================================================
|
||||
VM_IP=""
|
||||
if [ "$START_VM" == "yes" ]; then
|
||||
set +e
|
||||
for i in {1..10}; do
|
||||
VM_IP=$(qm guest cmd "$VMID" network-get-interfaces 2>/dev/null |
|
||||
jq -r '.[] | select(.name != "lo") | ."ip-addresses"[]? | select(."ip-address-type" == "ipv4") | ."ip-address"' 2>/dev/null |
|
||||
grep -v "^127\." | head -1) || true
|
||||
[ -n "$VM_IP" ] && break
|
||||
sleep 3
|
||||
done
|
||||
set -e
|
||||
fi
|
||||
|
||||
echo -e "\n${INFO}${BOLD}${GN}Docker VM Configuration Summary:${CL}"
|
||||
echo -e "${TAB}${DGN}VM ID: ${BGN}${VMID}${CL}"
|
||||
echo -e "${TAB}${DGN}Hostname: ${BGN}${HN}${CL}"
|
||||
echo -e "${TAB}${DGN}OS: ${BGN}${OS_DISPLAY}${CL}"
|
||||
[ -n "$VM_IP" ] && echo -e "${TAB}${DGN}IP Address: ${BGN}${VM_IP}${CL}"
|
||||
|
||||
if [ "$DOCKER_PREINSTALLED" = "yes" ]; then
|
||||
echo -e "${TAB}${DGN}Docker: ${BGN}Pre-installed (via get.docker.com)${CL}"
|
||||
else
|
||||
echo -e "${TAB}${DGN}Docker: ${BGN}Installing on first boot${CL}"
|
||||
echo -e "${TAB}${YW}⚠️ Wait 2-3 minutes for installation to complete${CL}"
|
||||
echo -e "${TAB}${YW}⚠️ Check progress: ${BL}cat /var/log/install-docker.log${CL}"
|
||||
fi
|
||||
|
||||
if [ "$USE_CLOUD_INIT" = "yes" ]; then
|
||||
display_cloud_init_info "$VMID" "$HN" 2>/dev/null || true
|
||||
fi
|
||||
|
||||
post_update_to_api "done" "none"
|
||||
msg_ok "Completed successfully!\n"
|
||||
|
||||
200
vm/haos-vm.sh
200
vm/haos-vm.sh
@@ -69,65 +69,13 @@ trap 'error_handler $LINENO "$BASH_COMMAND"' ERR
|
||||
trap cleanup EXIT
|
||||
trap 'post_update_to_api "failed" "INTERRUPTED"' SIGINT
|
||||
trap 'post_update_to_api "failed" "TERMINATED"' SIGTERM
|
||||
|
||||
# Smart recovery state
|
||||
VM_CREATION_PHASE="no"
|
||||
VM_RECOVERY_ATTEMPT=0
|
||||
VM_MAX_RETRIES=2
|
||||
|
||||
function error_handler() {
|
||||
local exit_code="$?"
|
||||
local line_number="$1"
|
||||
local command="$2"
|
||||
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
|
||||
echo -e "\n$error_message\n"
|
||||
|
||||
# During VM creation phase: offer recovery menu instead of immediate cleanup
|
||||
if [[ "$VM_CREATION_PHASE" == "yes" && $VM_RECOVERY_ATTEMPT -lt $VM_MAX_RETRIES ]]; then
|
||||
((VM_RECOVERY_ATTEMPT++))
|
||||
trap - ERR
|
||||
set +e
|
||||
|
||||
local choice
|
||||
choice=$(whiptail --backtitle "Proxmox VE Helper Scripts" --title "VM CREATION FAILED" \
|
||||
--radiolist "Exit code: ${exit_code} | Attempt: ${VM_RECOVERY_ATTEMPT}/${VM_MAX_RETRIES}\nFailed command: ${command}\n\nChoose a recovery action:" 16 72 4 \
|
||||
"RETRY" "Retry VM creation" "ON" \
|
||||
"RETRY_DOWNLOAD" "Retry with fresh download (clear cache)" "OFF" \
|
||||
"KEEP" "Keep partial VM for debugging" "OFF" \
|
||||
"ABORT" "Destroy VM and exit" "OFF" \
|
||||
3>&1 1>&2 2>&3) || choice="ABORT"
|
||||
|
||||
case "$choice" in
|
||||
RETRY | RETRY_DOWNLOAD)
|
||||
msg_info "Cleaning up failed VM ${VMID} for retry"
|
||||
cleanup_vmid 2>/dev/null || true
|
||||
if [[ "$choice" == "RETRY_DOWNLOAD" && -n "${CACHE_FILE:-}" ]]; then
|
||||
rm -f "$CACHE_FILE"
|
||||
msg_ok "Cleared cached image"
|
||||
fi
|
||||
msg_ok "Ready for retry (attempt $((VM_RECOVERY_ATTEMPT + 1))/${VM_MAX_RETRIES})"
|
||||
set -e
|
||||
trap 'error_handler $LINENO "$BASH_COMMAND"' ERR
|
||||
create_vm
|
||||
exit $?
|
||||
;;
|
||||
KEEP)
|
||||
echo -e "\n${YW} Keeping partial VM ${VMID} for debugging${CL}"
|
||||
echo -e " Inspect: qm config ${VMID}"
|
||||
echo -e " Remove: qm destroy ${VMID} --destroy-unreferenced-disks --purge\n"
|
||||
post_update_to_api "failed" "$exit_code"
|
||||
exit "$exit_code"
|
||||
;;
|
||||
*)
|
||||
post_update_to_api "failed" "$exit_code"
|
||||
cleanup_vmid
|
||||
exit "$exit_code"
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
|
||||
# Default: no recovery (max retries exceeded or outside creation phase)
|
||||
post_update_to_api "failed" "${exit_code}"
|
||||
echo -e "\n$error_message\n"
|
||||
cleanup_vmid
|
||||
}
|
||||
|
||||
@@ -156,16 +104,8 @@ function cleanup_vmid() {
|
||||
}
|
||||
|
||||
function cleanup() {
|
||||
local exit_code=$?
|
||||
popd >/dev/null
|
||||
# Only send telemetry if post_to_api_vm was called (installing status was sent)
|
||||
if [[ "${POST_TO_API_DONE:-}" == "true" && "${POST_UPDATE_DONE:-}" != "true" ]]; then
|
||||
if [[ $exit_code -eq 0 ]]; then
|
||||
post_update_to_api "done" "none"
|
||||
else
|
||||
post_update_to_api "failed" "$exit_code"
|
||||
fi
|
||||
fi
|
||||
post_update_to_api "done" "none"
|
||||
rm -rf $TEMP_DIR
|
||||
}
|
||||
|
||||
@@ -606,65 +546,64 @@ fi
|
||||
msg_ok "Using ${CL}${BL}$STORAGE${CL} ${GN}for Storage Location."
|
||||
msg_ok "Virtual Machine ID is ${CL}${BL}$VMID${CL}."
|
||||
|
||||
create_vm() {
|
||||
var_version="${BRANCH}"
|
||||
msg_info "Retrieving the URL for Home Assistant ${BRANCH} Disk Image"
|
||||
if [ "$BRANCH" == "$dev" ]; then
|
||||
URL="https://os-artifacts.home-assistant.io/${BRANCH}/haos_ova-${BRANCH}.qcow2.xz"
|
||||
else
|
||||
URL="https://github.com/home-assistant/operating-system/releases/download/${BRANCH}/haos_ova-${BRANCH}.qcow2.xz"
|
||||
fi
|
||||
var_version="${BRANCH}"
|
||||
msg_info "Retrieving the URL for Home Assistant ${BRANCH} Disk Image"
|
||||
if [ "$BRANCH" == "$dev" ]; then
|
||||
URL="https://os-artifacts.home-assistant.io/${BRANCH}/haos_ova-${BRANCH}.qcow2.xz"
|
||||
else
|
||||
URL="https://github.com/home-assistant/operating-system/releases/download/${BRANCH}/haos_ova-${BRANCH}.qcow2.xz"
|
||||
fi
|
||||
|
||||
CACHE_DIR="/var/lib/vz/template/cache"
|
||||
CACHE_FILE="$CACHE_DIR/$(basename "$URL")"
|
||||
FILE_IMG="/var/lib/vz/template/tmp/${CACHE_FILE##*/%.xz}" # .qcow2
|
||||
CACHE_DIR="/var/lib/vz/template/cache"
|
||||
CACHE_FILE="$CACHE_DIR/$(basename "$URL")"
|
||||
FILE_IMG="/var/lib/vz/template/tmp/${CACHE_FILE##*/%.xz}" # .qcow2
|
||||
|
||||
mkdir -p "$CACHE_DIR" "$(dirname "$FILE_IMG")"
|
||||
msg_ok "${CL}${BL}${URL}${CL}"
|
||||
mkdir -p "$CACHE_DIR" "$(dirname "$FILE_IMG")"
|
||||
msg_ok "${CL}${BL}${URL}${CL}"
|
||||
|
||||
download_and_validate_xz "$URL" "$CACHE_FILE"
|
||||
download_and_validate_xz "$URL" "$CACHE_FILE"
|
||||
|
||||
msg_info "Creating Home Assistant OS VM shell"
|
||||
qm create $VMID -machine q35 -bios ovmf -agent 1 -tablet 0 -localtime 1 ${CPU_TYPE} \
|
||||
-cores "$CORE_COUNT" -memory "$RAM_SIZE" -name "$HN" -tags community-script \
|
||||
-net0 "virtio,bridge=$BRG,macaddr=$MAC$VLAN$MTU" -onboot 1 -ostype l26 -scsihw virtio-scsi-pci >/dev/null
|
||||
msg_ok "Created VM shell"
|
||||
msg_info "Creating Home Assistant OS VM shell"
|
||||
qm create $VMID -machine q35 -bios ovmf -agent 1 -tablet 0 -localtime 1 ${CPU_TYPE} \
|
||||
-cores "$CORE_COUNT" -memory "$RAM_SIZE" -name "$HN" -tags community-script \
|
||||
-net0 "virtio,bridge=$BRG,macaddr=$MAC$VLAN$MTU" -onboot 1 -ostype l26 -scsihw virtio-scsi-pci >/dev/null
|
||||
msg_ok "Created VM shell"
|
||||
|
||||
extract_xz_with_pv "$CACHE_FILE" "$FILE_IMG"
|
||||
extract_xz_with_pv "$CACHE_FILE" "$FILE_IMG"
|
||||
|
||||
msg_info "Importing disk into storage ($STORAGE)"
|
||||
if qm disk import --help >/dev/null 2>&1; then
|
||||
IMPORT_CMD=(qm disk import)
|
||||
else
|
||||
IMPORT_CMD=(qm importdisk)
|
||||
fi
|
||||
IMPORT_OUT="$("${IMPORT_CMD[@]}" "$VMID" "$FILE_IMG" "$STORAGE" --format raw 2>&1 || true)"
|
||||
DISK_REF="$(printf '%s\n' "$IMPORT_OUT" | sed -n "s/.*successfully imported disk '\([^']\+\)'.*/\1/p" | tr -d "\r\"'")"
|
||||
[[ -z "$DISK_REF" ]] && DISK_REF="$(pvesm list "$STORAGE" | awk -v id="$VMID" '$5 ~ ("vm-"id"-disk-") {print $1":"$5}' | sort | tail -n1)"
|
||||
[[ -z "$DISK_REF" ]] && {
|
||||
msg_error "Unable to determine imported disk reference."
|
||||
echo "$IMPORT_OUT"
|
||||
exit 1
|
||||
}
|
||||
msg_ok "Imported disk (${CL}${BL}${DISK_REF}${CL})"
|
||||
msg_info "Importing disk into storage ($STORAGE)"
|
||||
if qm disk import --help >/dev/null 2>&1; then
|
||||
IMPORT_CMD=(qm disk import)
|
||||
else
|
||||
IMPORT_CMD=(qm importdisk)
|
||||
fi
|
||||
IMPORT_OUT="$("${IMPORT_CMD[@]}" "$VMID" "$FILE_IMG" "$STORAGE" --format raw 2>&1 || true)"
|
||||
DISK_REF="$(printf '%s\n' "$IMPORT_OUT" | sed -n "s/.*successfully imported disk '\([^']\+\)'.*/\1/p" | tr -d "\r\"'")"
|
||||
[[ -z "$DISK_REF" ]] && DISK_REF="$(pvesm list "$STORAGE" | awk -v id="$VMID" '$5 ~ ("vm-"id"-disk-") {print $1":"$5}' | sort | tail -n1)"
|
||||
[[ -z "$DISK_REF" ]] && {
|
||||
msg_error "Unable to determine imported disk reference."
|
||||
echo "$IMPORT_OUT"
|
||||
exit 1
|
||||
}
|
||||
msg_ok "Imported disk (${CL}${BL}${DISK_REF}${CL})"
|
||||
|
||||
rm -f "$FILE_IMG"
|
||||
rm -f "$FILE_IMG"
|
||||
|
||||
msg_info "Attaching EFI and root disk"
|
||||
qm set $VMID \
|
||||
--efidisk0 ${STORAGE}:0,efitype=4m \
|
||||
--scsi0 ${DISK_REF},ssd=1,discard=on \
|
||||
--boot order=scsi0 \
|
||||
--serial0 socket >/dev/null
|
||||
qm set $VMID --agent enabled=1 >/dev/null
|
||||
msg_ok "Attached EFI and root disk"
|
||||
msg_info "Attaching EFI and root disk"
|
||||
qm set $VMID \
|
||||
--efidisk0 ${STORAGE}:0,efitype=4m \
|
||||
--scsi0 ${DISK_REF},ssd=1,discard=on \
|
||||
--boot order=scsi0 \
|
||||
--serial0 socket >/dev/null
|
||||
qm set $VMID --agent enabled=1 >/dev/null
|
||||
msg_ok "Attached EFI and root disk"
|
||||
|
||||
msg_info "Resizing disk to $DISK_SIZE"
|
||||
qm resize $VMID scsi0 ${DISK_SIZE} >/dev/null
|
||||
msg_ok "Resized disk"
|
||||
msg_info "Resizing disk to $DISK_SIZE"
|
||||
qm resize $VMID scsi0 ${DISK_SIZE} >/dev/null
|
||||
msg_ok "Resized disk"
|
||||
|
||||
DESCRIPTION=$(
|
||||
cat <<EOF
|
||||
DESCRIPTION=$(
|
||||
cat <<EOF
|
||||
<div align='center'>
|
||||
<a href='https://Helper-Scripts.com' target='_blank' rel='noopener noreferrer'>
|
||||
<img src='https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/images/logo-81x112.png' alt='Logo' style='width:81px;height:112px;'/>
|
||||
@@ -692,27 +631,22 @@ create_vm() {
|
||||
</span>
|
||||
</div>
|
||||
EOF
|
||||
)
|
||||
qm set $VMID -description "$DESCRIPTION" >/dev/null
|
||||
msg_ok "Created Homeassistant OS VM ${CL}${BL}(${HN})"
|
||||
)
|
||||
qm set $VMID -description "$DESCRIPTION" >/dev/null
|
||||
msg_ok "Created Homeassistant OS VM ${CL}${BL}(${HN})"
|
||||
|
||||
if whiptail --backtitle "Proxmox VE Helper Scripts" --title "Image Cache" \
|
||||
--yesno "Keep downloaded Home Assistant OS image for future VMs?\n\nFile: $CACHE_FILE" 10 70; then
|
||||
msg_ok "Keeping cached image"
|
||||
else
|
||||
rm -f "$CACHE_FILE"
|
||||
msg_ok "Deleted cached image"
|
||||
fi
|
||||
if whiptail --backtitle "Proxmox VE Helper Scripts" --title "Image Cache" \
|
||||
--yesno "Keep downloaded Home Assistant OS image for future VMs?\n\nFile: $CACHE_FILE" 10 70; then
|
||||
msg_ok "Keeping cached image"
|
||||
else
|
||||
rm -f "$CACHE_FILE"
|
||||
msg_ok "Deleted cached image"
|
||||
fi
|
||||
|
||||
if [ "$START_VM" == "yes" ]; then
|
||||
msg_info "Starting Home Assistant OS VM"
|
||||
qm start $VMID
|
||||
msg_ok "Started Home Assistant OS VM"
|
||||
fi
|
||||
post_update_to_api "done" "none"
|
||||
msg_ok "Completed successfully!\n"
|
||||
} # end create_vm
|
||||
|
||||
VM_CREATION_PHASE="yes"
|
||||
create_vm
|
||||
VM_CREATION_PHASE="no"
|
||||
if [ "$START_VM" == "yes" ]; then
|
||||
msg_info "Starting Home Assistant OS VM"
|
||||
qm start $VMID
|
||||
msg_ok "Started Home Assistant OS VM"
|
||||
fi
|
||||
post_update_to_api "done" "none"
|
||||
msg_ok "Completed successfully!\n"
|
||||
|
||||
@@ -101,15 +101,8 @@ function cleanup_vmid() {
|
||||
}
|
||||
|
||||
function cleanup() {
|
||||
local exit_code=$?
|
||||
popd >/dev/null
|
||||
if [[ "${POST_TO_API_DONE:-}" == "true" && "${POST_UPDATE_DONE:-}" != "true" ]]; then
|
||||
if [[ $exit_code -eq 0 ]]; then
|
||||
post_update_to_api "done" "none"
|
||||
else
|
||||
post_update_to_api "failed" "$exit_code"
|
||||
fi
|
||||
fi
|
||||
post_update_to_api "done" "none"
|
||||
rm -rf $TEMP_DIR
|
||||
}
|
||||
|
||||
|
||||
@@ -100,15 +100,8 @@ function cleanup_vmid() {
|
||||
}
|
||||
|
||||
function cleanup() {
|
||||
local exit_code=$?
|
||||
popd >/dev/null
|
||||
if [[ "${POST_TO_API_DONE:-}" == "true" && "${POST_UPDATE_DONE:-}" != "true" ]]; then
|
||||
if [[ $exit_code -eq 0 ]]; then
|
||||
post_update_to_api "done" "none"
|
||||
else
|
||||
post_update_to_api "failed" "$exit_code"
|
||||
fi
|
||||
fi
|
||||
post_update_to_api "done" "none"
|
||||
rm -rf $TEMP_DIR
|
||||
}
|
||||
|
||||
|
||||
257
vm/openwrt-vm.sh
257
vm/openwrt-vm.sh
@@ -70,61 +70,13 @@ trap 'error_handler $LINENO "$BASH_COMMAND"' ERR
|
||||
trap cleanup EXIT
|
||||
trap 'post_update_to_api "failed" "INTERRUPTED"' SIGINT
|
||||
trap 'post_update_to_api "failed" "TERMINATED"' SIGTERM
|
||||
|
||||
# Smart recovery state
|
||||
VM_CREATION_PHASE="no"
|
||||
VM_RECOVERY_ATTEMPT=0
|
||||
VM_MAX_RETRIES=2
|
||||
|
||||
function error_handler() {
|
||||
local exit_code="$?"
|
||||
local line_number="$1"
|
||||
local command="$2"
|
||||
post_update_to_api "failed" "$exit_code"
|
||||
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
|
||||
echo -e "\n$error_message\n"
|
||||
|
||||
# During VM creation phase: offer recovery menu instead of immediate cleanup
|
||||
if [[ "$VM_CREATION_PHASE" == "yes" && $VM_RECOVERY_ATTEMPT -lt $VM_MAX_RETRIES ]]; then
|
||||
((VM_RECOVERY_ATTEMPT++))
|
||||
trap - ERR
|
||||
set +e
|
||||
set +o pipefail
|
||||
|
||||
local choice
|
||||
choice=$(whiptail --backtitle "Proxmox VE Helper Scripts" --title "VM CREATION FAILED" \
|
||||
--radiolist "Exit code: ${exit_code} | Attempt: ${VM_RECOVERY_ATTEMPT}/${VM_MAX_RETRIES}\nFailed command: ${command}\n\nChoose a recovery action:" 16 72 3 \
|
||||
"RETRY" "Retry VM creation" "ON" \
|
||||
"KEEP" "Keep partial VM for debugging" "OFF" \
|
||||
"ABORT" "Destroy VM and exit" "OFF" \
|
||||
3>&1 1>&2 2>&3) || choice="ABORT"
|
||||
|
||||
case "$choice" in
|
||||
RETRY)
|
||||
msg_info "Cleaning up failed VM ${VMID} for retry"
|
||||
cleanup_vmid 2>/dev/null || true
|
||||
msg_ok "Ready for retry (attempt $((VM_RECOVERY_ATTEMPT + 1))/${VM_MAX_RETRIES})"
|
||||
set -Eeo pipefail
|
||||
trap 'error_handler $LINENO "$BASH_COMMAND"' ERR
|
||||
create_vm
|
||||
exit $?
|
||||
;;
|
||||
KEEP)
|
||||
echo -e "\n${YW} Keeping partial VM ${VMID} for debugging${CL}"
|
||||
echo -e " Inspect: qm config ${VMID}"
|
||||
echo -e " Remove: qm destroy ${VMID} --destroy-unreferenced-disks --purge\n"
|
||||
post_update_to_api "failed" "$exit_code"
|
||||
exit "$exit_code"
|
||||
;;
|
||||
*)
|
||||
post_update_to_api "failed" "$exit_code"
|
||||
cleanup_vmid
|
||||
exit "$exit_code"
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
|
||||
# Default: no recovery (max retries exceeded or outside creation phase)
|
||||
post_update_to_api "failed" "$exit_code"
|
||||
cleanup_vmid
|
||||
}
|
||||
|
||||
@@ -153,15 +105,7 @@ function cleanup_vmid() {
|
||||
}
|
||||
|
||||
function cleanup() {
|
||||
local exit_code=$?
|
||||
popd >/dev/null
|
||||
if [[ "${POST_TO_API_DONE:-}" == "true" && "${POST_UPDATE_DONE:-}" != "true" ]]; then
|
||||
if [[ $exit_code -eq 0 ]]; then
|
||||
post_update_to_api "done" "none"
|
||||
else
|
||||
post_update_to_api "failed" "$exit_code"
|
||||
fi
|
||||
fi
|
||||
rm -rf $TEMP_DIR
|
||||
}
|
||||
|
||||
@@ -568,59 +512,57 @@ else
|
||||
fi
|
||||
msg_ok "Using ${CL}${BL}$STORAGE${CL} ${GN}for Storage Location."
|
||||
msg_ok "Virtual Machine ID is ${CL}${BL}$VMID${CL}."
|
||||
msg_info "Getting URL for OpenWrt Disk Image"
|
||||
|
||||
create_vm() {
|
||||
msg_info "Getting URL for OpenWrt Disk Image"
|
||||
response=$(curl -fsSL https://openwrt.org)
|
||||
stableversion=$(echo "$response" | sed -n 's/.*Current stable release - OpenWrt \([0-9.]\+\).*/\1/p' | head -n 1)
|
||||
URL="https://downloads.openwrt.org/releases/$stableversion/targets/x86/64/openwrt-$stableversion-x86-64-generic-ext4-combined.img.gz"
|
||||
|
||||
response=$(curl -fsSL https://openwrt.org)
|
||||
stableversion=$(echo "$response" | sed -n 's/.*Current stable release - OpenWrt \([0-9.]\+\).*/\1/p' | head -n 1)
|
||||
URL="https://downloads.openwrt.org/releases/$stableversion/targets/x86/64/openwrt-$stableversion-x86-64-generic-ext4-combined.img.gz"
|
||||
msg_ok "${CL}${BL}${URL}${CL}"
|
||||
curl -f#SL -o "$(basename "$URL")" "$URL"
|
||||
FILE=$(basename "$URL")
|
||||
msg_ok "Downloaded ${CL}${BL}$FILE${CL}"
|
||||
|
||||
msg_ok "${CL}${BL}${URL}${CL}"
|
||||
curl -f#SL -o "$(basename "$URL")" "$URL"
|
||||
FILE=$(basename "$URL")
|
||||
msg_ok "Downloaded ${CL}${BL}$FILE${CL}"
|
||||
gunzip -f "$FILE" >/dev/null 2>&1 || true
|
||||
FILE="${FILE%.*}"
|
||||
msg_ok "Extracted OpenWrt Disk Image ${CL}${BL}$FILE${CL}"
|
||||
|
||||
gunzip -f "$FILE" >/dev/null 2>&1 || true
|
||||
FILE="${FILE%.*}"
|
||||
msg_ok "Extracted OpenWrt Disk Image ${CL}${BL}$FILE${CL}"
|
||||
msg_info "Creating OpenWrt VM"
|
||||
qm create $VMID -cores $CORE_COUNT -memory $RAM_SIZE -name $HN \
|
||||
-onboot 1 -ostype l26 -scsihw virtio-scsi-pci --tablet 0
|
||||
if [[ "$(pvesm status | awk -v s=$STORAGE '$1==s {print $2}')" == "dir" ]]; then
|
||||
qm set $VMID -efidisk0 ${STORAGE}:0,efitype=4m,size=4M
|
||||
else
|
||||
pvesm alloc $STORAGE $VMID vm-$VMID-disk-0 4M >/dev/null
|
||||
qm set $VMID -efidisk0 ${STORAGE}:vm-$VMID-disk-0,efitype=4m,size=4M
|
||||
fi
|
||||
|
||||
msg_info "Creating OpenWrt VM"
|
||||
qm create $VMID -cores $CORE_COUNT -memory $RAM_SIZE -name $HN \
|
||||
-onboot 1 -ostype l26 -scsihw virtio-scsi-pci --tablet 0
|
||||
if [[ "$(pvesm status | awk -v s=$STORAGE '$1==s {print $2}')" == "dir" ]]; then
|
||||
qm set $VMID -efidisk0 ${STORAGE}:0,efitype=4m,size=4M
|
||||
else
|
||||
pvesm alloc $STORAGE $VMID vm-$VMID-disk-0 4M >/dev/null
|
||||
qm set $VMID -efidisk0 ${STORAGE}:vm-$VMID-disk-0,efitype=4m,size=4M
|
||||
fi
|
||||
IMPORT_OUT="$(qm importdisk $VMID $FILE $STORAGE --format raw 2>&1 || true)"
|
||||
DISK_REF="$(printf '%s\n' "$IMPORT_OUT" | sed -n "s/.*successfully imported disk '\([^']\+\)'.*/\1/p")"
|
||||
|
||||
IMPORT_OUT="$(qm importdisk $VMID $FILE $STORAGE --format raw 2>&1 || true)"
|
||||
DISK_REF="$(printf '%s\n' "$IMPORT_OUT" | sed -n "s/.*successfully imported disk '\([^']\+\)'.*/\1/p")"
|
||||
if [[ -z "$DISK_REF" ]]; then
|
||||
DISK_REF="$(pvesm list "$STORAGE" | awk -v id="$VMID" '$1 ~ ("vm-"id"-disk-") {print $1}' | sort | tail -n1)"
|
||||
fi
|
||||
|
||||
if [[ -z "$DISK_REF" ]]; then
|
||||
DISK_REF="$(pvesm list "$STORAGE" | awk -v id="$VMID" '$1 ~ ("vm-"id"-disk-") {print $1}' | sort | tail -n1)"
|
||||
fi
|
||||
if [[ -z "$DISK_REF" ]]; then
|
||||
msg_error "Unable to determine imported disk reference."
|
||||
echo "$IMPORT_OUT"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ -z "$DISK_REF" ]]; then
|
||||
msg_error "Unable to determine imported disk reference."
|
||||
echo "$IMPORT_OUT"
|
||||
exit 1
|
||||
fi
|
||||
qm set $VMID \
|
||||
-efidisk0 ${STORAGE}:0,efitype=4m,size=4M \
|
||||
-scsi0 ${DISK_REF} \
|
||||
-boot order=scsi0 \
|
||||
-tags community-script >/dev/null
|
||||
msg_ok "Attached disk"
|
||||
|
||||
qm set $VMID \
|
||||
-efidisk0 ${STORAGE}:0,efitype=4m,size=4M \
|
||||
-scsi0 ${DISK_REF} \
|
||||
-boot order=scsi0 \
|
||||
-tags community-script >/dev/null
|
||||
msg_ok "Attached disk"
|
||||
msg_info "Resizing disk to ${DISK_SIZE}"
|
||||
qm disk resize "$VMID" scsi0 "${DISK_SIZE}" >/dev/null
|
||||
msg_ok "Resized disk to ${DISK_SIZE}"
|
||||
|
||||
msg_info "Resizing disk to ${DISK_SIZE}"
|
||||
qm disk resize "$VMID" scsi0 "${DISK_SIZE}" >/dev/null
|
||||
msg_ok "Resized disk to ${DISK_SIZE}"
|
||||
|
||||
DESCRIPTION=$(
|
||||
cat <<EOF
|
||||
DESCRIPTION=$(
|
||||
cat <<EOF
|
||||
<div align='center'>
|
||||
<a href='https://Helper-Scripts.com' target='_blank' rel='noopener noreferrer'>
|
||||
<img src='https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/images/logo-81x112.png' alt='Logo' style='width:81px;height:112px;'/>
|
||||
@@ -648,71 +590,66 @@ create_vm() {
|
||||
</span>
|
||||
</div>
|
||||
EOF
|
||||
)
|
||||
qm set $VMID -description "$DESCRIPTION" >/dev/null
|
||||
|
||||
msg_ok "Created OpenWrt VM ${CL}${BL}(${HN})"
|
||||
msg_info "OpenWrt is being started in order to configure the network interfaces."
|
||||
qm start $VMID
|
||||
sleep 15
|
||||
msg_info "Waiting for OpenWrt to boot..."
|
||||
for i in {1..30}; do
|
||||
if qm status "$VMID" | grep -q "running"; then
|
||||
sleep 5
|
||||
msg_ok "OpenWrt is running"
|
||||
break
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
|
||||
msg_ok "Network interfaces are being configured as OpenWrt initiates."
|
||||
)
|
||||
qm set $VMID -description "$DESCRIPTION" >/dev/null
|
||||
|
||||
msg_ok "Created OpenWrt VM ${CL}${BL}(${HN})"
|
||||
msg_info "OpenWrt is being started in order to configure the network interfaces."
|
||||
qm start $VMID
|
||||
sleep 15
|
||||
msg_info "Waiting for OpenWrt to boot..."
|
||||
for i in {1..30}; do
|
||||
if qm status "$VMID" | grep -q "running"; then
|
||||
send_line_to_vm ""
|
||||
send_line_to_vm "uci delete network.@device[0]"
|
||||
send_line_to_vm "uci set network.wan=interface"
|
||||
send_line_to_vm "uci set network.wan.device=eth1"
|
||||
send_line_to_vm "uci set network.wan.proto=dhcp"
|
||||
send_line_to_vm "uci delete network.lan"
|
||||
send_line_to_vm "uci set network.lan=interface"
|
||||
send_line_to_vm "uci set network.lan.device=eth0"
|
||||
send_line_to_vm "uci set network.lan.proto=static"
|
||||
send_line_to_vm "uci set network.lan.ipaddr=${LAN_IP_ADDR}"
|
||||
send_line_to_vm "uci set network.lan.netmask=${LAN_NETMASK}"
|
||||
send_line_to_vm "uci commit"
|
||||
send_line_to_vm "halt"
|
||||
msg_ok "Network interfaces configured in OpenWrt"
|
||||
else
|
||||
msg_error "VM is not running"
|
||||
exit 1
|
||||
sleep 5
|
||||
msg_ok "OpenWrt is running"
|
||||
break
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
|
||||
msg_info "Waiting for OpenWrt to shut down..."
|
||||
until qm status "$VMID" | grep -q "stopped"; do
|
||||
sleep 2
|
||||
done
|
||||
msg_ok "OpenWrt has shut down"
|
||||
msg_ok "Network interfaces are being configured as OpenWrt initiates."
|
||||
|
||||
msg_info "Adding bridge interfaces on Proxmox side"
|
||||
qm set $VMID \
|
||||
-net0 virtio,bridge=${LAN_BRG},macaddr=${LAN_MAC}${LAN_VLAN}${MTU} \
|
||||
-net1 virtio,bridge=${BRG},macaddr=${MAC}${VLAN}${MTU} >/dev/null
|
||||
msg_ok "Bridge interfaces added"
|
||||
if qm status "$VMID" | grep -q "running"; then
|
||||
send_line_to_vm ""
|
||||
send_line_to_vm "uci delete network.@device[0]"
|
||||
send_line_to_vm "uci set network.wan=interface"
|
||||
send_line_to_vm "uci set network.wan.device=eth1"
|
||||
send_line_to_vm "uci set network.wan.proto=dhcp"
|
||||
send_line_to_vm "uci delete network.lan"
|
||||
send_line_to_vm "uci set network.lan=interface"
|
||||
send_line_to_vm "uci set network.lan.device=eth0"
|
||||
send_line_to_vm "uci set network.lan.proto=static"
|
||||
send_line_to_vm "uci set network.lan.ipaddr=${LAN_IP_ADDR}"
|
||||
send_line_to_vm "uci set network.lan.netmask=${LAN_NETMASK}"
|
||||
send_line_to_vm "uci commit"
|
||||
send_line_to_vm "halt"
|
||||
msg_ok "Network interfaces configured in OpenWrt"
|
||||
else
|
||||
msg_error "VM is not running"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ "$START_VM" = "yes" ]; then
|
||||
msg_info "Starting OpenWrt VM"
|
||||
qm start $VMID
|
||||
msg_ok "Started OpenWrt VM"
|
||||
fi
|
||||
msg_info "Waiting for OpenWrt to shut down..."
|
||||
until qm status "$VMID" | grep -q "stopped"; do
|
||||
sleep 2
|
||||
done
|
||||
msg_ok "OpenWrt has shut down"
|
||||
|
||||
VLAN_FINISH=""
|
||||
if [ -z "$VLAN" ] && [ "$VLAN2" != "999" ]; then
|
||||
VLAN_FINISH=" Please remember to adjust the VLAN tags to suit your network."
|
||||
fi
|
||||
post_update_to_api "done" "none"
|
||||
msg_ok "Completed Successfully!${VLAN_FINISH:+\n$VLAN_FINISH}"
|
||||
} # end create_vm
|
||||
msg_info "Adding bridge interfaces on Proxmox side"
|
||||
qm set $VMID \
|
||||
-net0 virtio,bridge=${LAN_BRG},macaddr=${LAN_MAC}${LAN_VLAN}${MTU} \
|
||||
-net1 virtio,bridge=${BRG},macaddr=${MAC}${VLAN}${MTU} >/dev/null
|
||||
msg_ok "Bridge interfaces added"
|
||||
|
||||
VM_CREATION_PHASE="yes"
|
||||
create_vm
|
||||
VM_CREATION_PHASE="no"
|
||||
if [ "$START_VM" = "yes" ]; then
|
||||
msg_info "Starting OpenWrt VM"
|
||||
qm start $VMID
|
||||
msg_ok "Started OpenWrt VM"
|
||||
fi
|
||||
|
||||
VLAN_FINISH=""
|
||||
if [ -z "$VLAN" ] && [ "$VLAN2" != "999" ]; then
|
||||
VLAN_FINISH=" Please remember to adjust the VLAN tags to suit your network."
|
||||
fi
|
||||
post_update_to_api "done" "none"
|
||||
msg_ok "Completed Successfully!${VLAN_FINISH:+\n$VLAN_FINISH}"
|
||||
|
||||
@@ -79,15 +79,8 @@ function cleanup_vmid() {
|
||||
}
|
||||
|
||||
function cleanup() {
|
||||
local exit_code=$?
|
||||
popd >/dev/null
|
||||
if [[ "${POST_TO_API_DONE:-}" == "true" && "${POST_UPDATE_DONE:-}" != "true" ]]; then
|
||||
if [[ $exit_code -eq 0 ]]; then
|
||||
post_update_to_api "done" "none"
|
||||
else
|
||||
post_update_to_api "failed" "$exit_code"
|
||||
fi
|
||||
fi
|
||||
post_update_to_api "done" "none"
|
||||
rm -rf $TEMP_DIR
|
||||
}
|
||||
|
||||
|
||||
@@ -101,15 +101,8 @@ function cleanup_vmid() {
|
||||
}
|
||||
|
||||
function cleanup() {
|
||||
local exit_code=$?
|
||||
popd >/dev/null
|
||||
if [[ "${POST_TO_API_DONE:-}" == "true" && "${POST_UPDATE_DONE:-}" != "true" ]]; then
|
||||
if [[ $exit_code -eq 0 ]]; then
|
||||
post_update_to_api "done" "none"
|
||||
else
|
||||
post_update_to_api "failed" "$exit_code"
|
||||
fi
|
||||
fi
|
||||
post_update_to_api "done" "none"
|
||||
rm -rf $TEMP_DIR
|
||||
}
|
||||
|
||||
|
||||
@@ -109,15 +109,8 @@ function cleanup_vmid() {
|
||||
}
|
||||
|
||||
function cleanup() {
|
||||
local exit_code=$?
|
||||
popd >/dev/null
|
||||
if [[ "${POST_TO_API_DONE:-}" == "true" && "${POST_UPDATE_DONE:-}" != "true" ]]; then
|
||||
if [[ $exit_code -eq 0 ]]; then
|
||||
post_update_to_api "done" "none"
|
||||
else
|
||||
post_update_to_api "failed" "$exit_code"
|
||||
fi
|
||||
fi
|
||||
post_update_to_api "done" "none"
|
||||
rm -rf $TEMP_DIR
|
||||
}
|
||||
|
||||
|
||||
@@ -62,60 +62,13 @@ trap 'error_handler $LINENO "$BASH_COMMAND"' ERR
|
||||
trap cleanup EXIT
|
||||
trap 'post_update_to_api "failed" "INTERRUPTED"' SIGINT
|
||||
trap 'post_update_to_api "failed" "TERMINATED"' SIGTERM
|
||||
|
||||
# Smart recovery state
|
||||
VM_CREATION_PHASE="no"
|
||||
VM_RECOVERY_ATTEMPT=0
|
||||
VM_MAX_RETRIES=2
|
||||
|
||||
function error_handler() {
|
||||
local exit_code="$?"
|
||||
local line_number="$1"
|
||||
local command="$2"
|
||||
post_update_to_api "failed" "$exit_code"
|
||||
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
|
||||
echo -e "\n$error_message\n"
|
||||
|
||||
# During VM creation phase: offer recovery menu instead of immediate cleanup
|
||||
if [[ "$VM_CREATION_PHASE" == "yes" && $VM_RECOVERY_ATTEMPT -lt $VM_MAX_RETRIES ]]; then
|
||||
((VM_RECOVERY_ATTEMPT++))
|
||||
trap - ERR
|
||||
set +e
|
||||
|
||||
local choice
|
||||
choice=$(whiptail --backtitle "Proxmox VE Helper Scripts" --title "VM CREATION FAILED" \
|
||||
--radiolist "Exit code: ${exit_code} | Attempt: ${VM_RECOVERY_ATTEMPT}/${VM_MAX_RETRIES}\nFailed command: ${command}\n\nChoose a recovery action:" 16 72 3 \
|
||||
"RETRY" "Retry VM creation" "ON" \
|
||||
"KEEP" "Keep partial VM for debugging" "OFF" \
|
||||
"ABORT" "Destroy VM and exit" "OFF" \
|
||||
3>&1 1>&2 2>&3) || choice="ABORT"
|
||||
|
||||
case "$choice" in
|
||||
RETRY)
|
||||
msg_info "Cleaning up failed VM ${VMID} for retry"
|
||||
cleanup_vmid 2>/dev/null || true
|
||||
msg_ok "Ready for retry (attempt $((VM_RECOVERY_ATTEMPT + 1))/${VM_MAX_RETRIES})"
|
||||
set -e
|
||||
trap 'error_handler $LINENO "$BASH_COMMAND"' ERR
|
||||
create_vm
|
||||
exit $?
|
||||
;;
|
||||
KEEP)
|
||||
echo -e "\n${YW} Keeping partial VM ${VMID} for debugging${CL}"
|
||||
echo -e " Inspect: qm config ${VMID}"
|
||||
echo -e " Remove: qm destroy ${VMID} --destroy-unreferenced-disks --purge\n"
|
||||
post_update_to_api "failed" "$exit_code"
|
||||
exit "$exit_code"
|
||||
;;
|
||||
*)
|
||||
post_update_to_api "failed" "$exit_code"
|
||||
cleanup_vmid
|
||||
exit "$exit_code"
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
|
||||
# Default: no recovery (max retries exceeded or outside creation phase)
|
||||
post_update_to_api "failed" "$exit_code"
|
||||
cleanup_vmid
|
||||
}
|
||||
|
||||
@@ -144,15 +97,7 @@ function cleanup_vmid() {
|
||||
}
|
||||
|
||||
function cleanup() {
|
||||
local exit_code=$?
|
||||
popd >/dev/null
|
||||
if [[ "${POST_TO_API_DONE:-}" == "true" && "${POST_UPDATE_DONE:-}" != "true" ]]; then
|
||||
if [[ $exit_code -eq 0 ]]; then
|
||||
post_update_to_api "done" "none"
|
||||
else
|
||||
post_update_to_api "failed" "$exit_code"
|
||||
fi
|
||||
fi
|
||||
rm -rf $TEMP_DIR
|
||||
}
|
||||
|
||||
@@ -513,57 +458,55 @@ else
|
||||
fi
|
||||
msg_ok "Using ${CL}${BL}$STORAGE${CL} ${GN}for Storage Location."
|
||||
msg_ok "Virtual Machine ID is ${CL}${BL}$VMID${CL}."
|
||||
msg_info "Retrieving the URL for the Ubuntu 22.04 Disk Image"
|
||||
URL=https://cloud-images.ubuntu.com/jammy/current/jammy-server-cloudimg-amd64.img
|
||||
sleep 2
|
||||
msg_ok "${CL}${BL}${URL}${CL}"
|
||||
curl -f#SL -o "$(basename "$URL")" "$URL"
|
||||
echo -en "\e[1A\e[0K"
|
||||
FILE=$(basename $URL)
|
||||
msg_ok "Downloaded ${CL}${BL}${FILE}${CL}"
|
||||
|
||||
create_vm() {
|
||||
msg_info "Retrieving the URL for the Ubuntu 22.04 Disk Image"
|
||||
URL=https://cloud-images.ubuntu.com/jammy/current/jammy-server-cloudimg-amd64.img
|
||||
sleep 2
|
||||
msg_ok "${CL}${BL}${URL}${CL}"
|
||||
curl -f#SL -o "$(basename "$URL")" "$URL"
|
||||
echo -en "\e[1A\e[0K"
|
||||
FILE=$(basename $URL)
|
||||
msg_ok "Downloaded ${CL}${BL}${FILE}${CL}"
|
||||
STORAGE_TYPE=$(pvesm status -storage $STORAGE | awk 'NR>1 {print $2}')
|
||||
case $STORAGE_TYPE in
|
||||
nfs | dir | cifs)
|
||||
DISK_EXT=".qcow2"
|
||||
DISK_REF="$VMID/"
|
||||
DISK_IMPORT="-format qcow2"
|
||||
THIN=""
|
||||
;;
|
||||
btrfs)
|
||||
DISK_EXT=".raw"
|
||||
DISK_REF="$VMID/"
|
||||
DISK_IMPORT="-format raw"
|
||||
FORMAT=",efitype=4m"
|
||||
THIN=""
|
||||
;;
|
||||
*)
|
||||
DISK_EXT=""
|
||||
DISK_REF=""
|
||||
DISK_IMPORT="-format raw"
|
||||
;;
|
||||
esac
|
||||
for i in {0,1}; do
|
||||
disk="DISK$i"
|
||||
eval DISK${i}=vm-${VMID}-disk-${i}${DISK_EXT:-}
|
||||
eval DISK${i}_REF=${STORAGE}:${DISK_REF:-}${!disk}
|
||||
done
|
||||
|
||||
STORAGE_TYPE=$(pvesm status -storage $STORAGE | awk 'NR>1 {print $2}')
|
||||
case $STORAGE_TYPE in
|
||||
nfs | dir | cifs)
|
||||
DISK_EXT=".qcow2"
|
||||
DISK_REF="$VMID/"
|
||||
DISK_IMPORT="-format qcow2"
|
||||
THIN=""
|
||||
;;
|
||||
btrfs)
|
||||
DISK_EXT=".raw"
|
||||
DISK_REF="$VMID/"
|
||||
DISK_IMPORT="-format raw"
|
||||
FORMAT=",efitype=4m"
|
||||
THIN=""
|
||||
;;
|
||||
*)
|
||||
DISK_EXT=""
|
||||
DISK_REF=""
|
||||
DISK_IMPORT="-format raw"
|
||||
;;
|
||||
esac
|
||||
for i in {0,1}; do
|
||||
disk="DISK$i"
|
||||
eval DISK${i}=vm-${VMID}-disk-${i}${DISK_EXT:-}
|
||||
eval DISK${i}_REF=${STORAGE}:${DISK_REF:-}${!disk}
|
||||
done
|
||||
|
||||
msg_info "Creating a Ubuntu 22.04 VM"
|
||||
qm create $VMID -agent 1${MACHINE} -tablet 0 -localtime 1 -bios ovmf${CPU_TYPE} -cores $CORE_COUNT -memory $RAM_SIZE \
|
||||
-name $HN -tags community-script -net0 virtio,bridge=$BRG,macaddr=$MAC$VLAN$MTU -onboot 1 -ostype l26 -scsihw virtio-scsi-pci
|
||||
pvesm alloc $STORAGE $VMID $DISK0 4M 1>&/dev/null
|
||||
qm importdisk $VMID ${FILE} $STORAGE ${DISK_IMPORT:-} 1>&/dev/null
|
||||
qm set $VMID \
|
||||
-efidisk0 ${DISK0_REF}${FORMAT} \
|
||||
-scsi0 ${DISK1_REF},${DISK_CACHE}${THIN}size=${DISK_SIZE} \
|
||||
-ide2 ${STORAGE}:cloudinit \
|
||||
-boot order=scsi0 \
|
||||
-serial0 socket >/dev/null
|
||||
DESCRIPTION=$(
|
||||
cat <<EOF
|
||||
msg_info "Creating a Ubuntu 22.04 VM"
|
||||
qm create $VMID -agent 1${MACHINE} -tablet 0 -localtime 1 -bios ovmf${CPU_TYPE} -cores $CORE_COUNT -memory $RAM_SIZE \
|
||||
-name $HN -tags community-script -net0 virtio,bridge=$BRG,macaddr=$MAC$VLAN$MTU -onboot 1 -ostype l26 -scsihw virtio-scsi-pci
|
||||
pvesm alloc $STORAGE $VMID $DISK0 4M 1>&/dev/null
|
||||
qm importdisk $VMID ${FILE} $STORAGE ${DISK_IMPORT:-} 1>&/dev/null
|
||||
qm set $VMID \
|
||||
-efidisk0 ${DISK0_REF}${FORMAT} \
|
||||
-scsi0 ${DISK1_REF},${DISK_CACHE}${THIN}size=${DISK_SIZE} \
|
||||
-ide2 ${STORAGE}:cloudinit \
|
||||
-boot order=scsi0 \
|
||||
-serial0 socket >/dev/null
|
||||
DESCRIPTION=$(
|
||||
cat <<EOF
|
||||
<div align='center'>
|
||||
<a href='https://Helper-Scripts.com' target='_blank' rel='noopener noreferrer'>
|
||||
<img src='https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/images/logo-81x112.png' alt='Logo' style='width:81px;height:112px;'/>
|
||||
@@ -591,28 +534,23 @@ create_vm() {
|
||||
</span>
|
||||
</div>
|
||||
EOF
|
||||
)
|
||||
qm set $VMID -description "$DESCRIPTION" >/dev/null
|
||||
if [ -n "$DISK_SIZE" ]; then
|
||||
msg_info "Resizing disk to $DISK_SIZE GB"
|
||||
qm resize $VMID scsi0 ${DISK_SIZE} >/dev/null
|
||||
else
|
||||
msg_info "Using default disk size of $DEFAULT_DISK_SIZE GB"
|
||||
qm resize $VMID scsi0 ${DEFAULT_DISK_SIZE} >/dev/null
|
||||
fi
|
||||
)
|
||||
qm set $VMID -description "$DESCRIPTION" >/dev/null
|
||||
if [ -n "$DISK_SIZE" ]; then
|
||||
msg_info "Resizing disk to $DISK_SIZE GB"
|
||||
qm resize $VMID scsi0 ${DISK_SIZE} >/dev/null
|
||||
else
|
||||
msg_info "Using default disk size of $DEFAULT_DISK_SIZE GB"
|
||||
qm resize $VMID scsi0 ${DEFAULT_DISK_SIZE} >/dev/null
|
||||
fi
|
||||
|
||||
msg_ok "Created a Ubuntu 22.04 VM ${CL}${BL}(${HN})"
|
||||
if [ "$START_VM" == "yes" ]; then
|
||||
msg_info "Starting Ubuntu 22.04 VM"
|
||||
qm start $VMID
|
||||
msg_ok "Started Ubuntu 22.04 VM"
|
||||
fi
|
||||
post_update_to_api "done" "none"
|
||||
msg_ok "Completed successfully!\n"
|
||||
echo -e "Setup Cloud-Init before starting \n
|
||||
msg_ok "Created a Ubuntu 22.04 VM ${CL}${BL}(${HN})"
|
||||
if [ "$START_VM" == "yes" ]; then
|
||||
msg_info "Starting Ubuntu 22.04 VM"
|
||||
qm start $VMID
|
||||
msg_ok "Started Ubuntu 22.04 VM"
|
||||
fi
|
||||
post_update_to_api "done" "none"
|
||||
msg_ok "Completed successfully!\n"
|
||||
echo -e "Setup Cloud-Init before starting \n
|
||||
More info at https://github.com/community-scripts/ProxmoxVE/discussions/272 \n"
|
||||
} # end create_vm
|
||||
|
||||
VM_CREATION_PHASE="yes"
|
||||
create_vm
|
||||
VM_CREATION_PHASE="no"
|
||||
|
||||
@@ -65,60 +65,13 @@ trap 'error_handler $LINENO "$BASH_COMMAND"' ERR
|
||||
trap cleanup EXIT
|
||||
trap 'post_update_to_api "failed" "INTERRUPTED"' SIGINT
|
||||
trap 'post_update_to_api "failed" "TERMINATED"' SIGTERM
|
||||
|
||||
# Smart recovery state
|
||||
VM_CREATION_PHASE="no"
|
||||
VM_RECOVERY_ATTEMPT=0
|
||||
VM_MAX_RETRIES=2
|
||||
|
||||
function error_handler() {
|
||||
local exit_code="$?"
|
||||
local line_number="$1"
|
||||
local command="$2"
|
||||
post_update_to_api "failed" "$exit_code"
|
||||
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
|
||||
echo -e "\n$error_message\n"
|
||||
|
||||
# During VM creation phase: offer recovery menu instead of immediate cleanup
|
||||
if [[ "$VM_CREATION_PHASE" == "yes" && $VM_RECOVERY_ATTEMPT -lt $VM_MAX_RETRIES ]]; then
|
||||
((VM_RECOVERY_ATTEMPT++))
|
||||
trap - ERR
|
||||
set +e
|
||||
|
||||
local choice
|
||||
choice=$(whiptail --backtitle "Proxmox VE Helper Scripts" --title "VM CREATION FAILED" \
|
||||
--radiolist "Exit code: ${exit_code} | Attempt: ${VM_RECOVERY_ATTEMPT}/${VM_MAX_RETRIES}\nFailed command: ${command}\n\nChoose a recovery action:" 16 72 3 \
|
||||
"RETRY" "Retry VM creation" "ON" \
|
||||
"KEEP" "Keep partial VM for debugging" "OFF" \
|
||||
"ABORT" "Destroy VM and exit" "OFF" \
|
||||
3>&1 1>&2 2>&3) || choice="ABORT"
|
||||
|
||||
case "$choice" in
|
||||
RETRY)
|
||||
msg_info "Cleaning up failed VM ${VMID} for retry"
|
||||
cleanup_vmid 2>/dev/null || true
|
||||
msg_ok "Ready for retry (attempt $((VM_RECOVERY_ATTEMPT + 1))/${VM_MAX_RETRIES})"
|
||||
set -e
|
||||
trap 'error_handler $LINENO "$BASH_COMMAND"' ERR
|
||||
create_vm
|
||||
exit $?
|
||||
;;
|
||||
KEEP)
|
||||
echo -e "\n${YW} Keeping partial VM ${VMID} for debugging${CL}"
|
||||
echo -e " Inspect: qm config ${VMID}"
|
||||
echo -e " Remove: qm destroy ${VMID} --destroy-unreferenced-disks --purge\n"
|
||||
post_update_to_api "failed" "$exit_code"
|
||||
exit "$exit_code"
|
||||
;;
|
||||
*)
|
||||
post_update_to_api "failed" "$exit_code"
|
||||
cleanup_vmid
|
||||
exit "$exit_code"
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
|
||||
# Default: no recovery (max retries exceeded or outside creation phase)
|
||||
post_update_to_api "failed" "$exit_code"
|
||||
cleanup_vmid
|
||||
}
|
||||
|
||||
@@ -147,15 +100,7 @@ function cleanup_vmid() {
|
||||
}
|
||||
|
||||
function cleanup() {
|
||||
local exit_code=$?
|
||||
popd >/dev/null
|
||||
if [[ "${POST_TO_API_DONE:-}" == "true" && "${POST_UPDATE_DONE:-}" != "true" ]]; then
|
||||
if [[ $exit_code -eq 0 ]]; then
|
||||
post_update_to_api "done" "none"
|
||||
else
|
||||
post_update_to_api "failed" "$exit_code"
|
||||
fi
|
||||
fi
|
||||
rm -rf $TEMP_DIR
|
||||
}
|
||||
|
||||
@@ -515,57 +460,55 @@ else
|
||||
fi
|
||||
msg_ok "Using ${CL}${BL}$STORAGE${CL} ${GN}for Storage Location."
|
||||
msg_ok "Virtual Machine ID is ${CL}${BL}$VMID${CL}."
|
||||
msg_info "Retrieving the URL for the Ubuntu 24.04 Disk Image"
|
||||
URL=https://cloud-images.ubuntu.com/noble/current/noble-server-cloudimg-amd64.img
|
||||
sleep 2
|
||||
msg_ok "${CL}${BL}${URL}${CL}"
|
||||
curl -f#SL -o "$(basename "$URL")" "$URL"
|
||||
echo -en "\e[1A\e[0K"
|
||||
FILE=$(basename $URL)
|
||||
msg_ok "Downloaded ${CL}${BL}${FILE}${CL}"
|
||||
|
||||
create_vm() {
|
||||
msg_info "Retrieving the URL for the Ubuntu 24.04 Disk Image"
|
||||
URL=https://cloud-images.ubuntu.com/noble/current/noble-server-cloudimg-amd64.img
|
||||
sleep 2
|
||||
msg_ok "${CL}${BL}${URL}${CL}"
|
||||
curl -f#SL -o "$(basename "$URL")" "$URL"
|
||||
echo -en "\e[1A\e[0K"
|
||||
FILE=$(basename $URL)
|
||||
msg_ok "Downloaded ${CL}${BL}${FILE}${CL}"
|
||||
STORAGE_TYPE=$(pvesm status -storage $STORAGE | awk 'NR>1 {print $2}')
|
||||
case $STORAGE_TYPE in
|
||||
nfs | dir | cifs)
|
||||
DISK_EXT=".qcow2"
|
||||
DISK_REF="$VMID/"
|
||||
DISK_IMPORT="-format qcow2"
|
||||
THIN=""
|
||||
;;
|
||||
btrfs)
|
||||
DISK_EXT=".raw"
|
||||
DISK_REF="$VMID/"
|
||||
DISK_IMPORT="-format raw"
|
||||
FORMAT=",efitype=4m"
|
||||
THIN=""
|
||||
;;
|
||||
*)
|
||||
DISK_EXT=""
|
||||
DISK_REF=""
|
||||
DISK_IMPORT="-format raw"
|
||||
;;
|
||||
esac
|
||||
for i in {0,1}; do
|
||||
disk="DISK$i"
|
||||
eval DISK${i}=vm-${VMID}-disk-${i}${DISK_EXT:-}
|
||||
eval DISK${i}_REF=${STORAGE}:${DISK_REF:-}${!disk}
|
||||
done
|
||||
|
||||
STORAGE_TYPE=$(pvesm status -storage $STORAGE | awk 'NR>1 {print $2}')
|
||||
case $STORAGE_TYPE in
|
||||
nfs | dir | cifs)
|
||||
DISK_EXT=".qcow2"
|
||||
DISK_REF="$VMID/"
|
||||
DISK_IMPORT="-format qcow2"
|
||||
THIN=""
|
||||
;;
|
||||
btrfs)
|
||||
DISK_EXT=".raw"
|
||||
DISK_REF="$VMID/"
|
||||
DISK_IMPORT="-format raw"
|
||||
FORMAT=",efitype=4m"
|
||||
THIN=""
|
||||
;;
|
||||
*)
|
||||
DISK_EXT=""
|
||||
DISK_REF=""
|
||||
DISK_IMPORT="-format raw"
|
||||
;;
|
||||
esac
|
||||
for i in {0,1}; do
|
||||
disk="DISK$i"
|
||||
eval DISK${i}=vm-${VMID}-disk-${i}${DISK_EXT:-}
|
||||
eval DISK${i}_REF=${STORAGE}:${DISK_REF:-}${!disk}
|
||||
done
|
||||
|
||||
msg_info "Creating a Ubuntu 24.04 VM"
|
||||
qm create $VMID -agent 1${MACHINE} -tablet 0 -localtime 1 -bios ovmf${CPU_TYPE} -cores $CORE_COUNT -memory $RAM_SIZE \
|
||||
-name $HN -tags community-script -net0 virtio,bridge=$BRG,macaddr=$MAC$VLAN$MTU -onboot 1 -ostype l26 -scsihw virtio-scsi-pci
|
||||
pvesm alloc $STORAGE $VMID $DISK0 4M 1>&/dev/null
|
||||
qm importdisk $VMID ${FILE} $STORAGE ${DISK_IMPORT:-} 1>&/dev/null
|
||||
qm set $VMID \
|
||||
-efidisk0 ${DISK0_REF}${FORMAT} \
|
||||
-scsi0 ${DISK1_REF},${DISK_CACHE}${THIN}size=${DISK_SIZE} \
|
||||
-ide2 ${STORAGE}:cloudinit \
|
||||
-boot order=scsi0 \
|
||||
-serial0 socket >/dev/null
|
||||
DESCRIPTION=$(
|
||||
cat <<EOF
|
||||
msg_info "Creating a Ubuntu 24.04 VM"
|
||||
qm create $VMID -agent 1${MACHINE} -tablet 0 -localtime 1 -bios ovmf${CPU_TYPE} -cores $CORE_COUNT -memory $RAM_SIZE \
|
||||
-name $HN -tags community-script -net0 virtio,bridge=$BRG,macaddr=$MAC$VLAN$MTU -onboot 1 -ostype l26 -scsihw virtio-scsi-pci
|
||||
pvesm alloc $STORAGE $VMID $DISK0 4M 1>&/dev/null
|
||||
qm importdisk $VMID ${FILE} $STORAGE ${DISK_IMPORT:-} 1>&/dev/null
|
||||
qm set $VMID \
|
||||
-efidisk0 ${DISK0_REF}${FORMAT} \
|
||||
-scsi0 ${DISK1_REF},${DISK_CACHE}${THIN}size=${DISK_SIZE} \
|
||||
-ide2 ${STORAGE}:cloudinit \
|
||||
-boot order=scsi0 \
|
||||
-serial0 socket >/dev/null
|
||||
DESCRIPTION=$(
|
||||
cat <<EOF
|
||||
<div align='center'>
|
||||
<a href='https://Helper-Scripts.com' target='_blank' rel='noopener noreferrer'>
|
||||
<img src='https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/images/logo-81x112.png' alt='Logo' style='width:81px;height:112px;'/>
|
||||
@@ -593,28 +536,23 @@ create_vm() {
|
||||
</span>
|
||||
</div>
|
||||
EOF
|
||||
)
|
||||
qm set $VMID -description "$DESCRIPTION" >/dev/null
|
||||
if [ -n "$DISK_SIZE" ]; then
|
||||
msg_info "Resizing disk to $DISK_SIZE GB"
|
||||
qm resize $VMID scsi0 ${DISK_SIZE} >/dev/null
|
||||
else
|
||||
msg_info "Using default disk size of $DEFAULT_DISK_SIZE GB"
|
||||
qm resize $VMID scsi0 ${DEFAULT_DISK_SIZE} >/dev/null
|
||||
fi
|
||||
)
|
||||
qm set $VMID -description "$DESCRIPTION" >/dev/null
|
||||
if [ -n "$DISK_SIZE" ]; then
|
||||
msg_info "Resizing disk to $DISK_SIZE GB"
|
||||
qm resize $VMID scsi0 ${DISK_SIZE} >/dev/null
|
||||
else
|
||||
msg_info "Using default disk size of $DEFAULT_DISK_SIZE GB"
|
||||
qm resize $VMID scsi0 ${DEFAULT_DISK_SIZE} >/dev/null
|
||||
fi
|
||||
|
||||
msg_ok "Created a Ubuntu 24.04 VM ${CL}${BL}(${HN})"
|
||||
if [ "$START_VM" == "yes" ]; then
|
||||
msg_info "Starting Ubuntu 24.04 VM"
|
||||
qm start $VMID
|
||||
msg_ok "Started Ubuntu 24.04 VM"
|
||||
fi
|
||||
post_update_to_api "done" "none"
|
||||
msg_ok "Completed successfully!\n"
|
||||
echo -e "Setup Cloud-Init before starting \n
|
||||
msg_ok "Created a Ubuntu 24.04 VM ${CL}${BL}(${HN})"
|
||||
if [ "$START_VM" == "yes" ]; then
|
||||
msg_info "Starting Ubuntu 24.04 VM"
|
||||
qm start $VMID
|
||||
msg_ok "Started Ubuntu 24.04 VM"
|
||||
fi
|
||||
post_update_to_api "done" "none"
|
||||
msg_ok "Completed successfully!\n"
|
||||
echo -e "Setup Cloud-Init before starting \n
|
||||
More info at https://github.com/community-scripts/ProxmoxVE/discussions/272 \n"
|
||||
} # end create_vm
|
||||
|
||||
VM_CREATION_PHASE="yes"
|
||||
create_vm
|
||||
VM_CREATION_PHASE="no"
|
||||
|
||||
@@ -64,60 +64,13 @@ trap 'error_handler $LINENO "$BASH_COMMAND"' ERR
|
||||
trap cleanup EXIT
|
||||
trap 'post_update_to_api "failed" "INTERRUPTED"' SIGINT
|
||||
trap 'post_update_to_api "failed" "TERMINATED"' SIGTERM
|
||||
|
||||
# Smart recovery state
|
||||
VM_CREATION_PHASE="no"
|
||||
VM_RECOVERY_ATTEMPT=0
|
||||
VM_MAX_RETRIES=2
|
||||
|
||||
function error_handler() {
|
||||
local exit_code="$?"
|
||||
local line_number="$1"
|
||||
local command="$2"
|
||||
post_update_to_api "failed" "$exit_code"
|
||||
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
|
||||
echo -e "\n$error_message\n"
|
||||
|
||||
# During VM creation phase: offer recovery menu instead of immediate cleanup
|
||||
if [[ "$VM_CREATION_PHASE" == "yes" && $VM_RECOVERY_ATTEMPT -lt $VM_MAX_RETRIES ]]; then
|
||||
((VM_RECOVERY_ATTEMPT++))
|
||||
trap - ERR
|
||||
set +e
|
||||
|
||||
local choice
|
||||
choice=$(whiptail --backtitle "Proxmox VE Helper Scripts" --title "VM CREATION FAILED" \
|
||||
--radiolist "Exit code: ${exit_code} | Attempt: ${VM_RECOVERY_ATTEMPT}/${VM_MAX_RETRIES}\nFailed command: ${command}\n\nChoose a recovery action:" 16 72 3 \
|
||||
"RETRY" "Retry VM creation" "ON" \
|
||||
"KEEP" "Keep partial VM for debugging" "OFF" \
|
||||
"ABORT" "Destroy VM and exit" "OFF" \
|
||||
3>&1 1>&2 2>&3) || choice="ABORT"
|
||||
|
||||
case "$choice" in
|
||||
RETRY)
|
||||
msg_info "Cleaning up failed VM ${VMID} for retry"
|
||||
cleanup_vmid 2>/dev/null || true
|
||||
msg_ok "Ready for retry (attempt $((VM_RECOVERY_ATTEMPT + 1))/${VM_MAX_RETRIES})"
|
||||
set -e
|
||||
trap 'error_handler $LINENO "$BASH_COMMAND"' ERR
|
||||
create_vm
|
||||
exit $?
|
||||
;;
|
||||
KEEP)
|
||||
echo -e "\n${YW} Keeping partial VM ${VMID} for debugging${CL}"
|
||||
echo -e " Inspect: qm config ${VMID}"
|
||||
echo -e " Remove: qm destroy ${VMID} --destroy-unreferenced-disks --purge\n"
|
||||
post_update_to_api "failed" "$exit_code"
|
||||
exit "$exit_code"
|
||||
;;
|
||||
*)
|
||||
post_update_to_api "failed" "$exit_code"
|
||||
cleanup_vmid
|
||||
exit "$exit_code"
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
|
||||
# Default: no recovery (max retries exceeded or outside creation phase)
|
||||
post_update_to_api "failed" "$exit_code"
|
||||
cleanup_vmid
|
||||
}
|
||||
|
||||
@@ -146,15 +99,7 @@ function cleanup_vmid() {
|
||||
}
|
||||
|
||||
function cleanup() {
|
||||
local exit_code=$?
|
||||
popd >/dev/null
|
||||
if [[ "${POST_TO_API_DONE:-}" == "true" && "${POST_UPDATE_DONE:-}" != "true" ]]; then
|
||||
if [[ $exit_code -eq 0 ]]; then
|
||||
post_update_to_api "done" "none"
|
||||
else
|
||||
post_update_to_api "failed" "$exit_code"
|
||||
fi
|
||||
fi
|
||||
rm -rf $TEMP_DIR
|
||||
}
|
||||
|
||||
@@ -514,57 +459,55 @@ else
|
||||
fi
|
||||
msg_ok "Using ${CL}${BL}$STORAGE${CL} ${GN}for Storage Location."
|
||||
msg_ok "Virtual Machine ID is ${CL}${BL}$VMID${CL}."
|
||||
msg_info "Retrieving the URL for the Ubuntu 25.04 Disk Image"
|
||||
URL=https://cloud-images.ubuntu.com/plucky/current/plucky-server-cloudimg-amd64.img
|
||||
sleep 2
|
||||
msg_ok "${CL}${BL}${URL}${CL}"
|
||||
curl -f#SL -o "$(basename "$URL")" "$URL"
|
||||
echo -en "\e[1A\e[0K"
|
||||
FILE=$(basename $URL)
|
||||
msg_ok "Downloaded ${CL}${BL}${FILE}${CL}"
|
||||
|
||||
create_vm() {
|
||||
msg_info "Retrieving the URL for the Ubuntu 25.04 Disk Image"
|
||||
URL=https://cloud-images.ubuntu.com/plucky/current/plucky-server-cloudimg-amd64.img
|
||||
sleep 2
|
||||
msg_ok "${CL}${BL}${URL}${CL}"
|
||||
curl -f#SL -o "$(basename "$URL")" "$URL"
|
||||
echo -en "\e[1A\e[0K"
|
||||
FILE=$(basename $URL)
|
||||
msg_ok "Downloaded ${CL}${BL}${FILE}${CL}"
|
||||
STORAGE_TYPE=$(pvesm status -storage $STORAGE | awk 'NR>1 {print $2}')
|
||||
case $STORAGE_TYPE in
|
||||
nfs | dir | cifs)
|
||||
DISK_EXT=".qcow2"
|
||||
DISK_REF="$VMID/"
|
||||
DISK_IMPORT="-format qcow2"
|
||||
THIN=""
|
||||
;;
|
||||
btrfs)
|
||||
DISK_EXT=".raw"
|
||||
DISK_REF="$VMID/"
|
||||
DISK_IMPORT="-format raw"
|
||||
FORMAT=",efitype=4m"
|
||||
THIN=""
|
||||
;;
|
||||
*)
|
||||
DISK_EXT=""
|
||||
DISK_REF=""
|
||||
DISK_IMPORT="-format raw"
|
||||
;;
|
||||
esac
|
||||
for i in {0,1}; do
|
||||
disk="DISK$i"
|
||||
eval DISK${i}=vm-${VMID}-disk-${i}${DISK_EXT:-}
|
||||
eval DISK${i}_REF=${STORAGE}:${DISK_REF:-}${!disk}
|
||||
done
|
||||
|
||||
STORAGE_TYPE=$(pvesm status -storage $STORAGE | awk 'NR>1 {print $2}')
|
||||
case $STORAGE_TYPE in
|
||||
nfs | dir | cifs)
|
||||
DISK_EXT=".qcow2"
|
||||
DISK_REF="$VMID/"
|
||||
DISK_IMPORT="-format qcow2"
|
||||
THIN=""
|
||||
;;
|
||||
btrfs)
|
||||
DISK_EXT=".raw"
|
||||
DISK_REF="$VMID/"
|
||||
DISK_IMPORT="-format raw"
|
||||
FORMAT=",efitype=4m"
|
||||
THIN=""
|
||||
;;
|
||||
*)
|
||||
DISK_EXT=""
|
||||
DISK_REF=""
|
||||
DISK_IMPORT="-format raw"
|
||||
;;
|
||||
esac
|
||||
for i in {0,1}; do
|
||||
disk="DISK$i"
|
||||
eval DISK${i}=vm-${VMID}-disk-${i}${DISK_EXT:-}
|
||||
eval DISK${i}_REF=${STORAGE}:${DISK_REF:-}${!disk}
|
||||
done
|
||||
|
||||
msg_info "Creating a Ubuntu 25.04 VM"
|
||||
qm create $VMID -agent 1${MACHINE} -tablet 0 -localtime 1 -bios ovmf${CPU_TYPE} -cores $CORE_COUNT -memory $RAM_SIZE \
|
||||
-name $HN -tags community-script -net0 virtio,bridge=$BRG,macaddr=$MAC$VLAN$MTU -onboot 1 -ostype l26 -scsihw virtio-scsi-pci
|
||||
pvesm alloc $STORAGE $VMID $DISK0 4M 1>&/dev/null
|
||||
qm importdisk $VMID ${FILE} $STORAGE ${DISK_IMPORT:-} 1>&/dev/null
|
||||
qm set $VMID \
|
||||
-efidisk0 ${DISK0_REF}${FORMAT} \
|
||||
-scsi0 ${DISK1_REF},${DISK_CACHE}${THIN}size=${DISK_SIZE} \
|
||||
-ide2 ${STORAGE}:cloudinit \
|
||||
-boot order=scsi0 \
|
||||
-serial0 socket >/dev/null
|
||||
DESCRIPTION=$(
|
||||
cat <<EOF
|
||||
msg_info "Creating a Ubuntu 25.04 VM"
|
||||
qm create $VMID -agent 1${MACHINE} -tablet 0 -localtime 1 -bios ovmf${CPU_TYPE} -cores $CORE_COUNT -memory $RAM_SIZE \
|
||||
-name $HN -tags community-script -net0 virtio,bridge=$BRG,macaddr=$MAC$VLAN$MTU -onboot 1 -ostype l26 -scsihw virtio-scsi-pci
|
||||
pvesm alloc $STORAGE $VMID $DISK0 4M 1>&/dev/null
|
||||
qm importdisk $VMID ${FILE} $STORAGE ${DISK_IMPORT:-} 1>&/dev/null
|
||||
qm set $VMID \
|
||||
-efidisk0 ${DISK0_REF}${FORMAT} \
|
||||
-scsi0 ${DISK1_REF},${DISK_CACHE}${THIN}size=${DISK_SIZE} \
|
||||
-ide2 ${STORAGE}:cloudinit \
|
||||
-boot order=scsi0 \
|
||||
-serial0 socket >/dev/null
|
||||
DESCRIPTION=$(
|
||||
cat <<EOF
|
||||
<div align='center'>
|
||||
<a href='https://Helper-Scripts.com' target='_blank' rel='noopener noreferrer'>
|
||||
<img src='https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/images/logo-81x112.png' alt='Logo' style='width:81px;height:112px;'/>
|
||||
@@ -592,28 +535,23 @@ create_vm() {
|
||||
</span>
|
||||
</div>
|
||||
EOF
|
||||
)
|
||||
qm set $VMID -description "$DESCRIPTION" >/dev/null
|
||||
if [ -n "$DISK_SIZE" ]; then
|
||||
msg_info "Resizing disk to $DISK_SIZE GB"
|
||||
qm resize $VMID scsi0 ${DISK_SIZE} >/dev/null
|
||||
else
|
||||
msg_info "Using default disk size of $DEFAULT_DISK_SIZE GB"
|
||||
qm resize $VMID scsi0 ${DEFAULT_DISK_SIZE} >/dev/null
|
||||
fi
|
||||
)
|
||||
qm set $VMID -description "$DESCRIPTION" >/dev/null
|
||||
if [ -n "$DISK_SIZE" ]; then
|
||||
msg_info "Resizing disk to $DISK_SIZE GB"
|
||||
qm resize $VMID scsi0 ${DISK_SIZE} >/dev/null
|
||||
else
|
||||
msg_info "Using default disk size of $DEFAULT_DISK_SIZE GB"
|
||||
qm resize $VMID scsi0 ${DEFAULT_DISK_SIZE} >/dev/null
|
||||
fi
|
||||
|
||||
msg_ok "Created a Ubuntu 25.04 VM ${CL}${BL}(${HN})"
|
||||
if [ "$START_VM" == "yes" ]; then
|
||||
msg_info "Starting Ubuntu 25.04 VM"
|
||||
qm start $VMID
|
||||
msg_ok "Started Ubuntu 25.04 VM"
|
||||
fi
|
||||
post_update_to_api "done" "none"
|
||||
msg_ok "Completed successfully!\n"
|
||||
echo -e "Setup Cloud-Init before starting \n
|
||||
msg_ok "Created a Ubuntu 25.04 VM ${CL}${BL}(${HN})"
|
||||
if [ "$START_VM" == "yes" ]; then
|
||||
msg_info "Starting Ubuntu 25.04 VM"
|
||||
qm start $VMID
|
||||
msg_ok "Started Ubuntu 25.04 VM"
|
||||
fi
|
||||
post_update_to_api "done" "none"
|
||||
msg_ok "Completed successfully!\n"
|
||||
echo -e "Setup Cloud-Init before starting \n
|
||||
More info at https://github.com/community-scripts/ProxmoxVE/discussions/272 \n"
|
||||
} # end create_vm
|
||||
|
||||
VM_CREATION_PHASE="yes"
|
||||
create_vm
|
||||
VM_CREATION_PHASE="no"
|
||||
|
||||
@@ -99,15 +99,8 @@ function cleanup_vmid() {
|
||||
}
|
||||
|
||||
function cleanup() {
|
||||
local exit_code=$?
|
||||
popd >/dev/null
|
||||
if [[ "${POST_TO_API_DONE:-}" == "true" && "${POST_UPDATE_DONE:-}" != "true" ]]; then
|
||||
if [[ $exit_code -eq 0 ]]; then
|
||||
post_update_to_api "done" "none"
|
||||
else
|
||||
post_update_to_api "failed" "$exit_code"
|
||||
fi
|
||||
fi
|
||||
post_update_to_api "done" "none"
|
||||
rm -rf $TEMP_DIR
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user