mirror of
https://github.com/community-scripts/ProxmoxVE.git
synced 2026-02-17 10:43:26 +01:00
Compare commits
14 Commits
feature/vm
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
cbee9d64b5 | ||
|
|
ffcda217e3 | ||
|
|
438d5d6b94 | ||
|
|
104366bc64 | ||
|
|
9dab79f8ca | ||
|
|
2dddeaf966 | ||
|
|
fae06a3a58 | ||
|
|
137272c354 | ||
|
|
52a9e23401 | ||
|
|
c2333de180 | ||
|
|
ad8974894b | ||
|
|
38af4be5ba | ||
|
|
80ae1f34fa | ||
|
|
06bc6e20d5 |
20
CHANGELOG.md
20
CHANGELOG.md
@@ -404,6 +404,24 @@ Exercise vigilance regarding copycat or coat-tailing sites that seek to exploit
|
||||
|
||||
</details>
|
||||
|
||||
## 2026-02-17
|
||||
|
||||
### 🆕 New Scripts
|
||||
|
||||
- Databasus ([#12018](https://github.com/community-scripts/ProxmoxVE/pull/12018))
|
||||
|
||||
### 🚀 Updated Scripts
|
||||
|
||||
- #### 🐞 Bug Fixes
|
||||
|
||||
- fix: pterodactyl-panel add symlink [@CrazyWolf13](https://github.com/CrazyWolf13) ([#11997](https://github.com/community-scripts/ProxmoxVE/pull/11997))
|
||||
|
||||
### 💾 Core
|
||||
|
||||
- #### 🐞 Bug Fixes
|
||||
|
||||
- core: call get_lxc_ip in start() before updates [@MickLesk](https://github.com/MickLesk) ([#12015](https://github.com/community-scripts/ProxmoxVE/pull/12015))
|
||||
|
||||
## 2026-02-16
|
||||
|
||||
### 🆕 New Scripts
|
||||
@@ -413,6 +431,8 @@ Exercise vigilance regarding copycat or coat-tailing sites that seek to exploit
|
||||
|
||||
### 🚀 Updated Scripts
|
||||
|
||||
- Opencloud: Pin version to 5.1.0 [@vhsdream](https://github.com/vhsdream) ([#12004](https://github.com/community-scripts/ProxmoxVE/pull/12004))
|
||||
|
||||
- #### 🐞 Bug Fixes
|
||||
|
||||
- Tududi: Fix sed command for DB_FILE configuration [@tremor021](https://github.com/tremor021) ([#11988](https://github.com/community-scripts/ProxmoxVE/pull/11988))
|
||||
|
||||
78
ct/databasus.sh
Normal file
78
ct/databasus.sh
Normal file
@@ -0,0 +1,78 @@
|
||||
#!/usr/bin/env bash
|
||||
source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/build.func)
|
||||
# Copyright (c) 2021-2026 community-scripts ORG
|
||||
# Author: MickLesk (CanbiZ)
|
||||
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
|
||||
# Source: https://github.com/databasus/databasus
|
||||
|
||||
APP="Databasus"
|
||||
var_tags="${var_tags:-backup;postgresql;database}"
|
||||
var_cpu="${var_cpu:-2}"
|
||||
var_ram="${var_ram:-2048}"
|
||||
var_disk="${var_disk:-8}"
|
||||
var_os="${var_os:-debian}"
|
||||
var_version="${var_version:-13}"
|
||||
var_unprivileged="${var_unprivileged:-1}"
|
||||
|
||||
header_info "$APP"
|
||||
variables
|
||||
color
|
||||
catch_errors
|
||||
|
||||
function update_script() {
|
||||
header_info
|
||||
check_container_storage
|
||||
check_container_resources
|
||||
|
||||
if [[ ! -f /opt/databasus/databasus ]]; then
|
||||
msg_error "No ${APP} Installation Found!"
|
||||
exit
|
||||
fi
|
||||
|
||||
if check_for_gh_release "databasus" "databasus/databasus"; then
|
||||
msg_info "Stopping Databasus"
|
||||
$STD systemctl stop databasus
|
||||
msg_ok "Stopped Databasus"
|
||||
|
||||
msg_info "Backing up Configuration"
|
||||
cp /opt/databasus/.env /opt/databasus.env.bak
|
||||
msg_ok "Backed up Configuration"
|
||||
|
||||
CLEAN_INSTALL=1 fetch_and_deploy_gh_release "databasus" "databasus/databasus" "tarball" "latest" "/opt/databasus"
|
||||
|
||||
msg_info "Updating Databasus"
|
||||
cd /opt/databasus/frontend
|
||||
$STD npm ci
|
||||
$STD npm run build
|
||||
cd /opt/databasus/backend
|
||||
$STD go mod download
|
||||
$STD /root/go/bin/swag init -g cmd/main.go -o swagger
|
||||
$STD env CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -o databasus ./cmd/main.go
|
||||
mv /opt/databasus/backend/databasus /opt/databasus/databasus
|
||||
cp -r /opt/databasus/frontend/dist/* /opt/databasus/ui/build/
|
||||
cp -r /opt/databasus/backend/migrations /opt/databasus/
|
||||
chown -R postgres:postgres /opt/databasus
|
||||
msg_ok "Updated Databasus"
|
||||
|
||||
msg_info "Restoring Configuration"
|
||||
cp /opt/databasus.env.bak /opt/databasus/.env
|
||||
rm -f /opt/databasus.env.bak
|
||||
chown postgres:postgres /opt/databasus/.env
|
||||
msg_ok "Restored Configuration"
|
||||
|
||||
msg_info "Starting Databasus"
|
||||
$STD systemctl start databasus
|
||||
msg_ok "Started Databasus"
|
||||
msg_ok "Updated successfully!"
|
||||
fi
|
||||
exit
|
||||
}
|
||||
|
||||
start
|
||||
build_container
|
||||
description
|
||||
|
||||
msg_ok "Completed successfully!\n"
|
||||
echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}"
|
||||
echo -e "${INFO}${YW} Access it using the following URL:${CL}"
|
||||
echo -e "${TAB}${GATEWAY}${BGN}http://${IP}${CL}"
|
||||
@@ -29,7 +29,7 @@ function update_script() {
|
||||
exit
|
||||
fi
|
||||
|
||||
RELEASE="v5.0.2"
|
||||
RELEASE="v5.1.0"
|
||||
if check_for_gh_release "OpenCloud" "opencloud-eu/opencloud" "${RELEASE}"; then
|
||||
msg_info "Stopping services"
|
||||
systemctl stop opencloud opencloud-wopi
|
||||
|
||||
@@ -71,6 +71,7 @@ EOF
|
||||
$STD php artisan migrate --seed --force --no-interaction
|
||||
chown -R www-data:www-data /opt/pterodactyl-panel/*
|
||||
chmod -R 755 /opt/pterodactyl-panel/storage /opt/pterodactyl-panel/bootstrap/cache/
|
||||
ln -s /opt/pterodactyl-panel /var/www/pterodactyl
|
||||
rm -rf "/opt/pterodactyl-panel/panel.tar.gz"
|
||||
echo "${RELEASE}" >/opt/${APP}_version.txt
|
||||
msg_ok "Updated $APP to v${RELEASE}"
|
||||
|
||||
@@ -1,301 +0,0 @@
|
||||
# VM Smart Recovery — Arbeitsanweisung
|
||||
|
||||
**Branch:** `feature/vm-smart-recovery` (basiert auf `main`)
|
||||
**Verwandt:** `feature/smart-error-recovery` (LXC, PR #11221)
|
||||
**Erstellt:** 2026-02-16
|
||||
|
||||
---
|
||||
|
||||
## 1. Ausgangslage
|
||||
|
||||
### Architektur-Vergleich LXC vs. VM
|
||||
|
||||
| Aspekt | LXC (fertig in PR #11221) | VM (offen) |
|
||||
|---|---|---|
|
||||
| Shared Code | `misc/build.func` (5577 Zeilen) | `misc/vm-core.func` (627 Zeilen) — **nur von `docker-vm.sh` genutzt** |
|
||||
| Anzahl Scripts | ~170 | 15 |
|
||||
| Architektur | Alle nutzen `build_container()` | **2 Generationen** (s.u.) |
|
||||
| Software-Install | `pct exec` → Install-Script im Container | Variiert: `virt-customize`, Cloud-Init, `qm sendkey`, oder gar nichts |
|
||||
| Telemetrie | `post_to_api()` + `post_update_to_api()` | Identisch — alle sourcen `misc/api.func` |
|
||||
| Error Handling | Zentral in `build.func` Traps | Jedes Script hat eigenen `error_handler()` |
|
||||
| Recovery | Smart-Menü mit 6 dynamischen Optionen | **Keine** — bei Fehler wird VM sofort zerstört (`cleanup_vmid`) |
|
||||
|
||||
### Zwei Generationen von VM-Scripts
|
||||
|
||||
**Generation 1 — Legacy (monolithisch):** `haos-vm.sh`, `debian-vm.sh`, `openwrt-vm.sh` und 11 weitere.
|
||||
- Selbstständige 500–700-Zeilen-Scripts
|
||||
- Definieren **alle** Utility-Funktionen inline (Colors, Icons, `msg_info`/`msg_ok`, `error_handler`, `cleanup`, etc.)
|
||||
- Sourcen nur `misc/api.func` für Telemetrie
|
||||
|
||||
**Generation 2 — Modern (modular):** Ausschließlich `docker-vm.sh`.
|
||||
- Sourced drei Shared Libraries:
|
||||
- `misc/api.func` — Telemetrie
|
||||
- `misc/vm-core.func` — Shared Utilities (627 Zeilen)
|
||||
- `misc/cloud-init.func` — Cloud-Init Konfiguration (709 Zeilen)
|
||||
- Ruft `load_functions` aus `vm-core.func` auf
|
||||
|
||||
### Telemetrie-Daten (Top VM-Failures)
|
||||
|
||||
| Script | Anteil an VM-Failures |
|
||||
|---|---|
|
||||
| `docker-vm.sh` | 30.1 % |
|
||||
| `openwrt-vm.sh` | 25.9 % |
|
||||
| `debian-13-vm.sh` | 9.6 % |
|
||||
|
||||
---
|
||||
|
||||
## 2. Scope & Abgrenzung
|
||||
|
||||
### In Scope
|
||||
|
||||
- Smart Recovery für VM-Erstellungsfehler (Retry-Menü analog LXC)
|
||||
- Fehlererkennung: Download, Disk-Import, virt-customize, Ressourcen-Konflikte, Netzwerk
|
||||
- Exit-Code-Mapping (bereits in `api.func` vorhanden, wird geteilt)
|
||||
|
||||
### Out of Scope (bewusst)
|
||||
|
||||
- **Migration aller Legacy-Scripts auf `vm-core.func`** → eigenes Refactoring-Ticket
|
||||
- **In-VM-Repair** → VMs haben kein `pct exec`-Äquivalent
|
||||
- **`qm sendkey`-Recovery** (OpenWrt) → prinzipbedingt nicht retryable
|
||||
- **APT/DPKG-Repair innerhalb der VM** → kein Shell-Zugang während Install
|
||||
|
||||
---
|
||||
|
||||
## 3. Software-Installationsmethoden pro Script
|
||||
|
||||
| Script | Methode | Beschreibung |
|
||||
|---|---|---|
|
||||
| `docker-vm.sh` | `virt-customize` | Offline Image-Manipulation (libguestfs) |
|
||||
| `docker-vm.sh` (Fallback) | systemd First-Boot-Service | Script läuft in VM beim ersten Boot |
|
||||
| `haos-vm.sh` | Keine | Pre-built Appliance (qcow2) |
|
||||
| `debian-vm.sh` / `debian-13-vm.sh` | Keine / Cloud-Init | Basis Cloud-Image |
|
||||
| `openwrt-vm.sh` | `qm sendkey` | Virtuelle Tastatur-Automation |
|
||||
| `opnsense-vm.sh` | `qm sendkey` + Bootstrap | Virtuelle Tastatur |
|
||||
| `ubuntu-*-vm.sh` | Cloud-Init | User konfiguriert vor Start |
|
||||
| `owncloud-vm.sh` | `virt-customize` | Wie docker-vm.sh |
|
||||
|
||||
---
|
||||
|
||||
## 4. Dateien & Änderungen
|
||||
|
||||
### 4.1 `misc/vm-core.func` — Zentrale Recovery-Logik
|
||||
|
||||
#### Neue Funktion: `vm_error_handler_with_recovery()`
|
||||
|
||||
```
|
||||
Ablauf:
|
||||
├── Exit-Code erfassen ($? als ERSTES — kein ensure_log_on_host davor!)
|
||||
├── Fehlerklassifikation:
|
||||
│ ├── Download-Fehler (curl exit 6/7/22/28/35/52/56)
|
||||
│ ├── Disk-Import-Fehler (qm importdisk, pvesm alloc)
|
||||
│ ├── virt-customize-Fehler (libguestfs)
|
||||
│ ├── Ressourcen-Konflikt (VMID exists, Storage full)
|
||||
│ └── Netzwerk-Fehler (DNS, Timeout)
|
||||
├── Smart Recovery Menü:
|
||||
│ ├── [1] Retry (VM zerstören & neu erstellen)
|
||||
│ ├── [2] Retry mit anderen Einstellungen (RAM/CPU/Disk ändern)
|
||||
│ ├── [3] VM behalten (nicht zerstören, manuell debuggen)
|
||||
│ ├── [4] Abbrechen (VM zerstören, Exit)
|
||||
│ └── Dynamische Optionen je nach Fehlertyp:
|
||||
│ ├── Download-Fehler → "Cache löschen & neu downloaden"
|
||||
│ └── Ressourcen-Konflikt → "Andere VMID wählen"
|
||||
└── Bei Retry: cleanup_vmid() + create-Funktion erneut aufrufen
|
||||
```
|
||||
|
||||
#### Neue Helper-Funktionen (Fehlererkennung):
|
||||
|
||||
```bash
|
||||
is_download_error() # curl exit codes + HTTP 404/500
|
||||
is_disk_import_error() # qm importdisk stderr patterns
|
||||
is_virt_customize_err() # libguestfs error patterns
|
||||
is_vmid_conflict() # "already exists" in stderr
|
||||
is_storage_full() # "not enough space" patterns
|
||||
```
|
||||
|
||||
#### Log-Erfassung für VMs
|
||||
|
||||
Anders als LXC (wo `/root/.install*.log` im Container liegt) müssen VM-Fehler direkt aus stderr der `qm`/`virt-customize` Befehle erfasst werden:
|
||||
|
||||
```bash
|
||||
# Jeder kritische Befehl mit stderr-Capture:
|
||||
VM_ERROR_LOG="/tmp/vm-install-${VMID}.log"
|
||||
qm importdisk "$VMID" "$IMAGE" "$STORAGE" 2>> "$VM_ERROR_LOG"
|
||||
virt-customize -a "$IMAGE" --install docker.io 2>> "$VM_ERROR_LOG"
|
||||
```
|
||||
|
||||
### 4.2 Retry-Wrapper-Architektur
|
||||
|
||||
Da VMs kein zentrales `build_container()` haben, gibt es zwei Ansätze:
|
||||
|
||||
#### Option A: Wrapper in `vm-core.func` (empfohlen für Gen-2 Scripts)
|
||||
|
||||
```bash
|
||||
vm_create_with_recovery() {
|
||||
local create_fn="$1" # VM-spezifische Erstellungsfunktion
|
||||
local max_retries=2
|
||||
local attempt=0
|
||||
|
||||
while true; do
|
||||
if "$create_fn"; then
|
||||
return 0 # Erfolg
|
||||
fi
|
||||
((attempt++))
|
||||
if ((attempt >= max_retries)); then
|
||||
# Max retries erreicht → nur noch "behalten" oder "abbrechen"
|
||||
fi
|
||||
vm_show_recovery_menu "$?" "$attempt"
|
||||
# Menü-Auswahl verarbeiten...
|
||||
done
|
||||
}
|
||||
```
|
||||
|
||||
#### Option B: Inline-Recovery in Legacy-Scripts
|
||||
|
||||
Für die 14 Legacy-Scripts (bis Migration auf `vm-core.func`):
|
||||
- Minimaler Patch: `error_handler()` um Recovery-Prompt erweitern
|
||||
- `cleanup_vmid` **nicht** sofort aufrufen, sondern erst nach User-Entscheidung
|
||||
|
||||
**Empfehlung:** Zunächst **nur `docker-vm.sh`** (30.1 % der Failures) mit Option A umsetzen. Legacy-Scripts als Phase 2 nach Migration.
|
||||
|
||||
### 4.3 `misc/api.func` — Keine Änderungen nötig
|
||||
|
||||
Exit-Code-Mapping (`explain_exit_code()`) und `categorize_error()` sind bereits universal (LXC + VM). Nach Merge von PR #11221 stehen 70+ Exit-Codes zur Verfügung. Falls dieser Branch vorher fertig ist, können die Codes aus `feature/smart-error-recovery` cherry-picked werden.
|
||||
|
||||
---
|
||||
|
||||
## 5. Wichtige Unterschiede LXC vs. VM Recovery
|
||||
|
||||
| LXC | VM |
|
||||
|---|---|
|
||||
| APT/DPKG In-Place-Repair im Container | **Nicht möglich** — kein Shell-Zugang während Install |
|
||||
| OOM-Retry mit x2 Ressourcen | **Funktioniert** — `qm set` kann RAM/CPU nachträglich ändern |
|
||||
| DNS-Override im Container (`/etc/resolv.conf`) | **Nicht anwendbar** — VM hat eigenes Netzwerk |
|
||||
| Container bleibt erhalten bei Repair | VM muss bei Retry **komplett neu erstellt** werden |
|
||||
| `build_container()` als zentrale Retry-Schleife | **Neue Wrapper-Funktion nötig** (`vm_create_with_recovery`) |
|
||||
| `pct exec` für In-Container-Zugriff | Kein Äquivalent (qemu-guest-agent nur wenn VM läuft + Agent installiert) |
|
||||
|
||||
---
|
||||
|
||||
## 6. Technische Fallstricke
|
||||
|
||||
### 6.1 VMID-Cleanup vor Retry
|
||||
|
||||
`cleanup_vmid` muss vollständig aufräumen:
|
||||
- `qm stop "$VMID" --skiplock` (falls Running)
|
||||
- `qm destroy "$VMID" --destroy-unreferenced-disks --purge`
|
||||
- Einige Scripts erzeugen zusätzliche Disks (`efidisk0`, `cloudinit`), die extra entfernt werden müssen
|
||||
|
||||
### 6.2 Image-Caching
|
||||
|
||||
`docker-vm.sh` cached Images in `/var/lib/vz/template/cache/`. Bei Download-Retry:
|
||||
- **Behalten**, wenn Download vollständig war (md5/sha-Check)
|
||||
- **Löschen**, wenn Corruption vermutet (curl-Fehler, xz-Validierung fehlgeschlagen)
|
||||
|
||||
### 6.3 Cloud-Init-State
|
||||
|
||||
Wenn Cloud-Init teilweise konfiguriert wurde, muss bei Retry der gesamte State zurückgesetzt werden:
|
||||
```bash
|
||||
qm set "$VMID" --delete cicustom
|
||||
qm set "$VMID" --delete ciuser
|
||||
qm set "$VMID" --delete cipassword
|
||||
```
|
||||
|
||||
### 6.4 Legacy-Scripts (14 Stück)
|
||||
|
||||
- Definieren `error_handler()` inline und sourcen nur `api.func`
|
||||
- Um dort Recovery einzubauen, entweder:
|
||||
- **Jedes Script einzeln patchen** (hohes Risiko, viel Duplikat)
|
||||
- **Erst Migration auf `vm-core.func`** (sauberer, aber größerer Scope)
|
||||
- **Empfehlung:** Migration priorisieren, Recovery danach trivial
|
||||
|
||||
### 6.5 `virt-customize` Fallback
|
||||
|
||||
`docker-vm.sh` hat bereits einen First-Boot-Fallback für Docker-Installation. Wenn `virt-customize` fehlschlägt:
|
||||
- Recovery sollte dies als **"soft failure"** behandeln
|
||||
- Aktiv den Fallback vorschlagen statt blindes Retry
|
||||
|
||||
### 6.6 Kein `pct exec`-Äquivalent
|
||||
|
||||
- Man kann **nicht "in die VM hinein reparieren"** wie bei LXC
|
||||
- `qm guest exec` existiert zwar (mit qemu-guest-agent), funktioniert aber nur wenn:
|
||||
- Die VM läuft
|
||||
- Der Guest Agent installiert ist
|
||||
- Genau das ist typischerweise der Punkt, an dem der Install fehlschlägt
|
||||
|
||||
---
|
||||
|
||||
## 7. Implementierungsreihenfolge
|
||||
|
||||
| Phase | Task | Dateien | Impact |
|
||||
|---|---|---|---|
|
||||
| **Phase 1** | `vm_error_handler_with_recovery()` Grundgerüst | `misc/vm-core.func` | Basis für alles |
|
||||
| **Phase 2** | `docker-vm.sh`: Recovery integrieren | `vm/docker-vm.sh` | 30.1 % der Failures |
|
||||
| **Phase 3** | Fehlererkennung (Download, Import, virt-customize) | `misc/vm-core.func` | Intelligente dynamische Menüoptionen |
|
||||
| **Phase 4** | `haos-vm.sh`: Recovery integrieren (Download-Retry) | `vm/haos-vm.sh` | Download-Corruption bereits teilweise vorhanden |
|
||||
| **Phase 5** | `debian-13-vm.sh` + `ubuntu-*-vm.sh` | `vm/debian-13-vm.sh`, etc. | Cloud-Image-Scripts |
|
||||
| **Phase 6** | `openwrt-vm.sh` (limitiert — nur Download/Import-Retry) | `vm/openwrt-vm.sh` | `sendkey`-Teil nicht retryable |
|
||||
|
||||
---
|
||||
|
||||
## 8. Test-Matrix
|
||||
|
||||
| Szenario | Erwartetes Verhalten |
|
||||
|---|---|
|
||||
| Download-Fehler (curl 6/7/28) | Menü: "Retry Download" + "Cache löschen" |
|
||||
| Disk-Import-Fehler | Menü: "Retry" + "Anderen Storage wählen" |
|
||||
| VMID-Konflikt | Menü: "Andere VMID" + "Bestehende VM zerstören" |
|
||||
| virt-customize-Fehler (docker-vm) | Menü: "Retry" + "First-Boot-Fallback nutzen" |
|
||||
| Storage voll | Menü: "Anderen Storage wählen" + "Disk verkleinern" |
|
||||
| Netzwerk-Timeout | Menü: "Retry" + "Abbrechen" |
|
||||
| 2× Retry erreicht | Nur noch "VM behalten" oder "Abbrechen" |
|
||||
| User wählt "VM behalten" | VM nicht zerstören, manuellen Zugang erklären |
|
||||
|
||||
---
|
||||
|
||||
## 9. Branch-Workflow
|
||||
|
||||
```bash
|
||||
# Neuen Branch erstellen (bereits geschehen):
|
||||
git checkout main
|
||||
git pull origin main
|
||||
git checkout -b feature/vm-smart-recovery
|
||||
|
||||
# Arbeit in Phasen committen:
|
||||
# Phase 1: git commit -m "feat(vm): add vm_error_handler_with_recovery to vm-core.func"
|
||||
# Phase 2: git commit -m "feat(vm): integrate smart recovery into docker-vm.sh"
|
||||
# etc.
|
||||
|
||||
# PR gegen main erstellen (NICHT gegen feature/smart-error-recovery)
|
||||
```
|
||||
|
||||
### Abhängigkeit zu PR #11221
|
||||
|
||||
Die `api.func`-Änderungen aus `feature/smart-error-recovery` (70+ Exit-Codes, `categorize_error()`) werden nach Merge von PR #11221 automatisch in `main` verfügbar sein.
|
||||
|
||||
- Falls VM-Branch **nach** PR #11221 Merge gestartet wird → alles da
|
||||
- Falls VM-Branch **vorher** fertig ist → `api.func` Codes aus `feature/smart-error-recovery` cherry-picken
|
||||
|
||||
---
|
||||
|
||||
## 10. Referenz: Exit-0-Bug (nur LXC, gefixt)
|
||||
|
||||
> Dieser Bug betrifft **nur LXC** (`misc/build.func`), nicht die VM-Scripts.
|
||||
|
||||
**Root Cause:** Der ERR-Trap in `build.func` rief `ensure_log_on_host` vor `post_update_to_api` auf. Da `ensure_log_on_host` mit Exit 0 returned, wurde `$?` auf 0 zurückgesetzt → Telemetrie meldete "failed/0" statt dem echten Exit-Code (~15-20 Records/Tag).
|
||||
|
||||
**Fix (PR #11221, Commit `2d7e707a0`):**
|
||||
```bash
|
||||
# Vorher (Bug):
|
||||
trap 'ensure_log_on_host; post_update_to_api "failed" "$?"' ERR
|
||||
|
||||
# Nachher (Fix):
|
||||
trap '_ERR_CODE=$?; ensure_log_on_host; post_update_to_api "failed" "$_ERR_CODE"' ERR
|
||||
```
|
||||
|
||||
**VM-Scripts nicht betroffen:** Diese erfassen `$?` korrekt als erste Zeile in `error_handler()`:
|
||||
```bash
|
||||
function error_handler() {
|
||||
local exit_code="$?" # Erste Zeile → korrekt
|
||||
...
|
||||
}
|
||||
```
|
||||
44
frontend/public/json/databasus.json
Normal file
44
frontend/public/json/databasus.json
Normal file
@@ -0,0 +1,44 @@
|
||||
{
|
||||
"name": "Databasus",
|
||||
"slug": "databasus",
|
||||
"categories": [
|
||||
7
|
||||
],
|
||||
"date_created": "2026-02-17",
|
||||
"type": "ct",
|
||||
"updateable": true,
|
||||
"privileged": false,
|
||||
"interface_port": 80,
|
||||
"documentation": "https://github.com/databasus/databasus",
|
||||
"website": "https://github.com/databasus/databasus",
|
||||
"logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/databasus.webp",
|
||||
"config_path": "/opt/databasus/.env",
|
||||
"description": "Free, open source and self-hosted solution for automated PostgreSQL backups. With multiple storage options, notifications, scheduling, and a beautiful web interface for managing database backups across multiple PostgreSQL instances.",
|
||||
"install_methods": [
|
||||
{
|
||||
"type": "default",
|
||||
"script": "ct/databasus.sh",
|
||||
"resources": {
|
||||
"cpu": 2,
|
||||
"ram": 2048,
|
||||
"hdd": 8,
|
||||
"os": "Debian",
|
||||
"version": "13"
|
||||
}
|
||||
}
|
||||
],
|
||||
"default_credentials": {
|
||||
"username": "admin@localhost",
|
||||
"password": "See /root/databasus.creds"
|
||||
},
|
||||
"notes": [
|
||||
{
|
||||
"text": "Supports PostgreSQL versions 12-18 with cloud and self-hosted instances",
|
||||
"type": "info"
|
||||
},
|
||||
{
|
||||
"text": "Features: Scheduled backups, multiple storage providers, notifications, encryption",
|
||||
"type": "info"
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1,5 +1,5 @@
|
||||
{
|
||||
"generated": "2026-02-16T12:14:16Z",
|
||||
"generated": "2026-02-17T06:22:06Z",
|
||||
"versions": [
|
||||
{
|
||||
"slug": "2fauth",
|
||||
@@ -193,9 +193,9 @@
|
||||
{
|
||||
"slug": "cleanuparr",
|
||||
"repo": "Cleanuparr/Cleanuparr",
|
||||
"version": "v2.6.2",
|
||||
"version": "v2.6.3",
|
||||
"pinned": false,
|
||||
"date": "2026-02-15T02:15:19Z"
|
||||
"date": "2026-02-16T22:41:25Z"
|
||||
},
|
||||
{
|
||||
"slug": "cloudreve",
|
||||
@@ -557,9 +557,9 @@
|
||||
{
|
||||
"slug": "immich-public-proxy",
|
||||
"repo": "alangrainger/immich-public-proxy",
|
||||
"version": "v1.15.2",
|
||||
"version": "v1.15.3",
|
||||
"pinned": false,
|
||||
"date": "2026-02-16T08:54:59Z"
|
||||
"date": "2026-02-16T22:54:27Z"
|
||||
},
|
||||
{
|
||||
"slug": "inspircd",
|
||||
@@ -578,16 +578,16 @@
|
||||
{
|
||||
"slug": "invoiceninja",
|
||||
"repo": "invoiceninja/invoiceninja",
|
||||
"version": "v5.12.60",
|
||||
"version": "v5.12.62",
|
||||
"pinned": false,
|
||||
"date": "2026-02-15T00:11:31Z"
|
||||
"date": "2026-02-17T03:23:48Z"
|
||||
},
|
||||
{
|
||||
"slug": "jackett",
|
||||
"repo": "Jackett/Jackett",
|
||||
"version": "v0.24.1127",
|
||||
"version": "v0.24.1140",
|
||||
"pinned": false,
|
||||
"date": "2026-02-16T08:43:41Z"
|
||||
"date": "2026-02-17T05:54:25Z"
|
||||
},
|
||||
{
|
||||
"slug": "jellystat",
|
||||
@@ -704,9 +704,9 @@
|
||||
{
|
||||
"slug": "librenms",
|
||||
"repo": "librenms/librenms",
|
||||
"version": "26.1.1",
|
||||
"version": "26.2.0",
|
||||
"pinned": false,
|
||||
"date": "2026-01-12T23:26:02Z"
|
||||
"date": "2026-02-16T12:15:13Z"
|
||||
},
|
||||
{
|
||||
"slug": "librespeed-rust",
|
||||
@@ -739,9 +739,9 @@
|
||||
{
|
||||
"slug": "linkstack",
|
||||
"repo": "linkstackorg/linkstack",
|
||||
"version": "v4.8.5",
|
||||
"version": "v4.8.4",
|
||||
"pinned": false,
|
||||
"date": "2026-01-26T18:46:52Z"
|
||||
"date": "2024-12-10T15:14:34Z"
|
||||
},
|
||||
{
|
||||
"slug": "linkwarden",
|
||||
@@ -802,9 +802,9 @@
|
||||
{
|
||||
"slug": "mealie",
|
||||
"repo": "mealie-recipes/mealie",
|
||||
"version": "v3.10.2",
|
||||
"version": "v3.11.0",
|
||||
"pinned": false,
|
||||
"date": "2026-02-04T23:32:32Z"
|
||||
"date": "2026-02-17T04:13:35Z"
|
||||
},
|
||||
{
|
||||
"slug": "mediamanager",
|
||||
@@ -956,9 +956,9 @@
|
||||
{
|
||||
"slug": "opencloud",
|
||||
"repo": "opencloud-eu/opencloud",
|
||||
"version": "v5.0.2",
|
||||
"version": "v5.1.0",
|
||||
"pinned": true,
|
||||
"date": "2026-02-05T16:29:01Z"
|
||||
"date": "2026-02-16T15:04:28Z"
|
||||
},
|
||||
{
|
||||
"slug": "opengist",
|
||||
@@ -1026,16 +1026,16 @@
|
||||
{
|
||||
"slug": "paperless-ngx",
|
||||
"repo": "paperless-ngx/paperless-ngx",
|
||||
"version": "v2.20.6",
|
||||
"version": "v2.20.7",
|
||||
"pinned": false,
|
||||
"date": "2026-01-31T07:30:27Z"
|
||||
"date": "2026-02-16T16:52:23Z"
|
||||
},
|
||||
{
|
||||
"slug": "patchmon",
|
||||
"repo": "PatchMon/PatchMon",
|
||||
"version": "v1.4.0",
|
||||
"version": "v1.4.1",
|
||||
"pinned": false,
|
||||
"date": "2026-02-13T10:39:03Z"
|
||||
"date": "2026-02-16T18:00:13Z"
|
||||
},
|
||||
{
|
||||
"slug": "paymenter",
|
||||
@@ -1362,9 +1362,9 @@
|
||||
{
|
||||
"slug": "slskd",
|
||||
"repo": "slskd/slskd",
|
||||
"version": "0.24.3",
|
||||
"version": "0.24.4",
|
||||
"pinned": false,
|
||||
"date": "2026-01-15T14:40:15Z"
|
||||
"date": "2026-02-16T16:50:17Z"
|
||||
},
|
||||
{
|
||||
"slug": "snipeit",
|
||||
@@ -1411,9 +1411,9 @@
|
||||
{
|
||||
"slug": "stirling-pdf",
|
||||
"repo": "Stirling-Tools/Stirling-PDF",
|
||||
"version": "v2.4.6",
|
||||
"version": "v2.5.0",
|
||||
"pinned": false,
|
||||
"date": "2026-02-12T00:01:19Z"
|
||||
"date": "2026-02-16T22:58:17Z"
|
||||
},
|
||||
{
|
||||
"slug": "streamlink-webui",
|
||||
@@ -1544,9 +1544,9 @@
|
||||
{
|
||||
"slug": "tunarr",
|
||||
"repo": "chrisbenincasa/tunarr",
|
||||
"version": "v1.1.12",
|
||||
"version": "v1.1.13",
|
||||
"pinned": false,
|
||||
"date": "2026-02-03T20:19:00Z"
|
||||
"date": "2026-02-16T16:16:17Z"
|
||||
},
|
||||
{
|
||||
"slug": "uhf",
|
||||
@@ -1628,9 +1628,9 @@
|
||||
{
|
||||
"slug": "wanderer",
|
||||
"repo": "meilisearch/meilisearch",
|
||||
"version": "v1.35.0",
|
||||
"version": "v1.35.1",
|
||||
"pinned": false,
|
||||
"date": "2026-02-02T09:57:03Z"
|
||||
"date": "2026-02-16T17:01:17Z"
|
||||
},
|
||||
{
|
||||
"slug": "warracker",
|
||||
|
||||
171
install/databasus-install.sh
Normal file
171
install/databasus-install.sh
Normal file
@@ -0,0 +1,171 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright (c) 2021-2026 community-scripts ORG
|
||||
# Author: MickLesk (CanbiZ)
|
||||
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
|
||||
# Source: https://github.com/databasus/databasus
|
||||
|
||||
source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
|
||||
color
|
||||
verb_ip6
|
||||
catch_errors
|
||||
setting_up_container
|
||||
network_check
|
||||
update_os
|
||||
|
||||
msg_info "Installing Dependencies"
|
||||
$STD apt install -y \
|
||||
nginx \
|
||||
valkey
|
||||
msg_ok "Installed Dependencies"
|
||||
|
||||
PG_VERSION="17" setup_postgresql
|
||||
setup_go
|
||||
NODE_VERSION="24" setup_nodejs
|
||||
|
||||
fetch_and_deploy_gh_release "databasus" "databasus/databasus" "tarball" "latest" "/opt/databasus"
|
||||
|
||||
msg_info "Building Databasus (Patience)"
|
||||
cd /opt/databasus/frontend
|
||||
$STD npm ci
|
||||
$STD npm run build
|
||||
cd /opt/databasus/backend
|
||||
$STD go mod tidy
|
||||
$STD go mod download
|
||||
$STD go install github.com/swaggo/swag/cmd/swag@latest
|
||||
$STD /root/go/bin/swag init -g cmd/main.go -o swagger
|
||||
$STD env CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -o databasus ./cmd/main.go
|
||||
mv /opt/databasus/backend/databasus /opt/databasus/databasus
|
||||
mkdir -p /databasus-data/{pgdata,temp,backups,data,logs}
|
||||
mkdir -p /opt/databasus/ui/build
|
||||
mkdir -p /opt/databasus/migrations
|
||||
cp -r /opt/databasus/frontend/dist/* /opt/databasus/ui/build/
|
||||
cp -r /opt/databasus/backend/migrations/* /opt/databasus/migrations/
|
||||
chown -R postgres:postgres /databasus-data
|
||||
msg_ok "Built Databasus"
|
||||
|
||||
msg_info "Configuring Databasus"
|
||||
JWT_SECRET=$(openssl rand -hex 32)
|
||||
ENCRYPTION_KEY=$(openssl rand -hex 32)
|
||||
# Create PostgreSQL version symlinks for compatibility
|
||||
for v in 12 13 14 15 16 18; do
|
||||
ln -sf /usr/lib/postgresql/17 /usr/lib/postgresql/$v
|
||||
done
|
||||
# Install goose for migrations
|
||||
$STD go install github.com/pressly/goose/v3/cmd/goose@latest
|
||||
ln -sf /root/go/bin/goose /usr/local/bin/goose
|
||||
cat <<EOF >/opt/databasus/.env
|
||||
# Environment
|
||||
ENV_MODE=production
|
||||
|
||||
# Server
|
||||
SERVER_PORT=4005
|
||||
SERVER_HOST=0.0.0.0
|
||||
|
||||
# Database
|
||||
DATABASE_DSN=host=localhost user=postgres password=postgres dbname=databasus port=5432 sslmode=disable
|
||||
DATABASE_URL=postgres://postgres:postgres@localhost:5432/databasus?sslmode=disable
|
||||
|
||||
# Migrations
|
||||
GOOSE_DRIVER=postgres
|
||||
GOOSE_DBSTRING=postgres://postgres:postgres@localhost:5432/databasus?sslmode=disable
|
||||
GOOSE_MIGRATION_DIR=/opt/databasus/migrations
|
||||
|
||||
# Valkey (Redis-compatible cache)
|
||||
VALKEY_HOST=localhost
|
||||
VALKEY_PORT=6379
|
||||
|
||||
# Security
|
||||
JWT_SECRET=${JWT_SECRET}
|
||||
ENCRYPTION_KEY=${ENCRYPTION_KEY}
|
||||
|
||||
# Paths
|
||||
DATA_DIR=/databasus-data/data
|
||||
BACKUP_DIR=/databasus-data/backups
|
||||
LOG_DIR=/databasus-data/logs
|
||||
EOF
|
||||
chown postgres:postgres /opt/databasus/.env
|
||||
chmod 600 /opt/databasus/.env
|
||||
msg_ok "Configured Databasus"
|
||||
|
||||
msg_info "Configuring Valkey"
|
||||
cat <<EOF >/etc/valkey/valkey.conf
|
||||
port 6379
|
||||
bind 127.0.0.1
|
||||
protected-mode yes
|
||||
save ""
|
||||
maxmemory 256mb
|
||||
maxmemory-policy allkeys-lru
|
||||
EOF
|
||||
systemctl enable -q --now valkey-server
|
||||
systemctl restart valkey-server
|
||||
msg_ok "Configured Valkey"
|
||||
|
||||
msg_info "Creating Database"
|
||||
# Configure PostgreSQL to allow local password auth for databasus
|
||||
PG_HBA="/etc/postgresql/17/main/pg_hba.conf"
|
||||
if ! grep -q "databasus" "$PG_HBA"; then
|
||||
sed -i '/^local\s*all\s*all/i local databasus postgres trust' "$PG_HBA"
|
||||
sed -i '/^host\s*all\s*all\s*127/i host databasus postgres 127.0.0.1/32 trust' "$PG_HBA"
|
||||
systemctl reload postgresql
|
||||
fi
|
||||
$STD sudo -u postgres psql -c "CREATE DATABASE databasus;" 2>/dev/null || true
|
||||
$STD sudo -u postgres psql -c "ALTER USER postgres WITH SUPERUSER CREATEROLE CREATEDB;" 2>/dev/null || true
|
||||
msg_ok "Created Database"
|
||||
|
||||
msg_info "Creating Databasus Service"
|
||||
cat <<EOF >/etc/systemd/system/databasus.service
|
||||
[Unit]
|
||||
Description=Databasus - Database Backup Management
|
||||
After=network.target postgresql.service valkey.service
|
||||
Requires=postgresql.service valkey.service
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
WorkingDirectory=/opt/databasus
|
||||
EnvironmentFile=/opt/databasus/.env
|
||||
ExecStart=/opt/databasus/databasus
|
||||
Restart=always
|
||||
RestartSec=5
|
||||
StandardOutput=journal
|
||||
StandardError=journal
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
EOF
|
||||
$STD systemctl daemon-reload
|
||||
$STD systemctl enable -q --now databasus
|
||||
msg_ok "Created Databasus Service"
|
||||
|
||||
msg_info "Configuring Nginx"
|
||||
cat <<EOF >/etc/nginx/sites-available/databasus
|
||||
server {
|
||||
listen 80;
|
||||
server_name _;
|
||||
|
||||
location / {
|
||||
proxy_pass http://127.0.0.1:4005;
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Upgrade \$http_upgrade;
|
||||
proxy_set_header Connection 'upgrade';
|
||||
proxy_set_header Host \$host;
|
||||
proxy_set_header X-Real-IP \$remote_addr;
|
||||
proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto \$scheme;
|
||||
proxy_cache_bypass \$http_upgrade;
|
||||
proxy_buffering off;
|
||||
proxy_read_timeout 86400s;
|
||||
proxy_send_timeout 86400s;
|
||||
}
|
||||
}
|
||||
EOF
|
||||
ln -sf /etc/nginx/sites-available/databasus /etc/nginx/sites-enabled/databasus
|
||||
rm -f /etc/nginx/sites-enabled/default
|
||||
$STD nginx -t
|
||||
$STD systemctl enable -q --now nginx
|
||||
$STD systemctl reload nginx
|
||||
msg_ok "Configured Nginx"
|
||||
|
||||
motd_ssh
|
||||
customize
|
||||
cleanup_lxc
|
||||
@@ -64,7 +64,7 @@ $STD sudo -u cool coolconfig set-admin-password --user=admin --password="$COOLPA
|
||||
echo "$COOLPASS" >~/.coolpass
|
||||
msg_ok "Installed Collabora Online"
|
||||
|
||||
fetch_and_deploy_gh_release "opencloud" "opencloud-eu/opencloud" "singlefile" "v5.0.2" "/usr/bin" "opencloud-*-linux-amd64"
|
||||
fetch_and_deploy_gh_release "opencloud" "opencloud-eu/opencloud" "singlefile" "v5.1.0" "/usr/bin" "opencloud-*-linux-amd64"
|
||||
|
||||
msg_info "Configuring OpenCloud"
|
||||
DATA_DIR="/var/lib/opencloud"
|
||||
|
||||
@@ -80,6 +80,7 @@ $STD php artisan p:user:make --no-interaction --admin=1 --email "$ADMIN_EMAIL" -
|
||||
echo "* * * * * php /opt/pterodactyl-panel/artisan schedule:run >> /dev/null 2>&1" | crontab -u www-data -
|
||||
chown -R www-data:www-data /opt/pterodactyl-panel/*
|
||||
chmod -R 755 /opt/pterodactyl-panel/storage/* /opt/pterodactyl-panel/bootstrap/cache/
|
||||
ln -s /opt/pterodactyl-panel /var/www/pterodactyl
|
||||
{
|
||||
echo ""
|
||||
echo "pterodactyl Admin Username: admin"
|
||||
|
||||
@@ -3427,6 +3427,7 @@ start() {
|
||||
VERBOSE="no"
|
||||
set_std_mode
|
||||
ensure_profile_loaded
|
||||
get_lxc_ip
|
||||
update_script
|
||||
update_motd_ip
|
||||
cleanup_lxc
|
||||
@@ -3454,6 +3455,7 @@ start() {
|
||||
;;
|
||||
esac
|
||||
ensure_profile_loaded
|
||||
get_lxc_ip
|
||||
update_script
|
||||
update_motd_ip
|
||||
cleanup_lxc
|
||||
|
||||
@@ -624,417 +624,3 @@ EOF
|
||||
qm set "$VMID" -description "$DESCRIPTION" >/dev/null
|
||||
|
||||
}
|
||||
|
||||
# ==============================================================================
|
||||
# SECTION: VM SMART RECOVERY
|
||||
# ==============================================================================
|
||||
|
||||
# Global error log for VM creation — captures stderr from critical commands
|
||||
VM_ERROR_LOG="${VM_ERROR_LOG:-/tmp/vm-install-$$.log}"
|
||||
VM_RECOVERY_ATTEMPT=${VM_RECOVERY_ATTEMPT:-0}
|
||||
VM_MAX_RETRIES=${VM_MAX_RETRIES:-2}
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# vm_log_cmd()
|
||||
#
|
||||
# - Wraps a command to capture stderr into VM_ERROR_LOG
|
||||
# - Passes stdout through normally
|
||||
# - Returns the original exit code
|
||||
# Usage: vm_log_cmd qm importdisk "$VMID" "$IMAGE" "$STORAGE"
|
||||
# ------------------------------------------------------------------------------
|
||||
vm_log_cmd() {
|
||||
"$@" 2>>"$VM_ERROR_LOG"
|
||||
}
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# is_vm_download_error()
|
||||
#
|
||||
# - Detects download failures based on exit code and error log
|
||||
# - Checks curl exit codes (6, 7, 22, 28, 35, 52, 56) and HTTP error patterns
|
||||
# - Returns 0 (true) if download error detected, 1 otherwise
|
||||
# ------------------------------------------------------------------------------
|
||||
is_vm_download_error() {
|
||||
local exit_code="${1:-0}"
|
||||
local log_file="${2:-$VM_ERROR_LOG}"
|
||||
|
||||
# curl-specific exit codes indicating download issues
|
||||
case "$exit_code" in
|
||||
6 | 7 | 22 | 28 | 35 | 52 | 56) return 0 ;;
|
||||
esac
|
||||
|
||||
# Check log for download-related patterns
|
||||
if [[ -s "$log_file" ]]; then
|
||||
if grep -qiE "curl.*failed|download.*failed|HTTP.*[45][0-9]{2}|Could not resolve|Connection refused|Connection timed out|SSL.*error" "$log_file" 2>/dev/null; then
|
||||
return 0
|
||||
fi
|
||||
fi
|
||||
return 1
|
||||
}
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# is_vm_disk_import_error()
|
||||
#
|
||||
# - Detects disk import failures (qm importdisk / qm disk import)
|
||||
# - Checks for storage allocation and format conversion errors
|
||||
# - Returns 0 (true) if disk import error detected, 1 otherwise
|
||||
# ------------------------------------------------------------------------------
|
||||
is_vm_disk_import_error() {
|
||||
local exit_code="${1:-0}"
|
||||
local log_file="${2:-$VM_ERROR_LOG}"
|
||||
|
||||
if [[ -s "$log_file" ]]; then
|
||||
if grep -qiE "importdisk.*failed|disk import.*error|storage.*allocation.*failed|qcow2.*error|raw.*error|pvesm.*alloc.*failed|unable to create|volume.*already exists" "$log_file" 2>/dev/null; then
|
||||
return 0
|
||||
fi
|
||||
fi
|
||||
return 1
|
||||
}
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# is_vm_virt_customize_error()
|
||||
#
|
||||
# - Detects virt-customize / libguestfs failures
|
||||
# - Checks for guestfs, supermin, appliance boot errors
|
||||
# - Returns 0 (true) if virt-customize error detected, 1 otherwise
|
||||
# ------------------------------------------------------------------------------
|
||||
is_vm_virt_customize_error() {
|
||||
local exit_code="${1:-0}"
|
||||
local log_file="${2:-$VM_ERROR_LOG}"
|
||||
|
||||
if [[ -s "$log_file" ]]; then
|
||||
if grep -qiE "virt-customize|libguestfs|guestfs|supermin|appliance.*boot|virt-.*failed|launch.*failed" "$log_file" 2>/dev/null; then
|
||||
return 0
|
||||
fi
|
||||
fi
|
||||
return 1
|
||||
}
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# is_vm_vmid_conflict()
|
||||
#
|
||||
# - Detects VMID conflicts (VM already exists)
|
||||
# - Returns 0 (true) if conflict detected, 1 otherwise
|
||||
# ------------------------------------------------------------------------------
|
||||
is_vm_vmid_conflict() {
|
||||
local exit_code="${1:-0}"
|
||||
local log_file="${2:-$VM_ERROR_LOG}"
|
||||
|
||||
if [[ -s "$log_file" ]]; then
|
||||
if grep -qiE "already exists|VM $VMID already|unable to create VM|VMID.*in use" "$log_file" 2>/dev/null; then
|
||||
return 0
|
||||
fi
|
||||
fi
|
||||
return 1
|
||||
}
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# is_vm_storage_full()
|
||||
#
|
||||
# - Detects storage full / space exhaustion errors
|
||||
# - Returns 0 (true) if storage space issue detected, 1 otherwise
|
||||
# ------------------------------------------------------------------------------
|
||||
is_vm_storage_full() {
|
||||
local exit_code="${1:-0}"
|
||||
local log_file="${2:-$VM_ERROR_LOG}"
|
||||
|
||||
if [[ -s "$log_file" ]]; then
|
||||
if grep -qiE "not enough space|no space left|storage.*full|disk quota|ENOSPC|insufficient.*space|thin pool.*full" "$log_file" 2>/dev/null; then
|
||||
return 0
|
||||
fi
|
||||
fi
|
||||
return 1
|
||||
}
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# is_vm_network_error()
|
||||
#
|
||||
# - Detects general network/DNS errors beyond download failures
|
||||
# - Returns 0 (true) if network issue detected, 1 otherwise
|
||||
# ------------------------------------------------------------------------------
|
||||
is_vm_network_error() {
|
||||
local exit_code="${1:-0}"
|
||||
local log_file="${2:-$VM_ERROR_LOG}"
|
||||
|
||||
# Network-related curl/wget exit codes
|
||||
case "$exit_code" in
|
||||
6 | 7 | 28 | 52 | 56) return 0 ;;
|
||||
esac
|
||||
|
||||
if [[ -s "$log_file" ]]; then
|
||||
if grep -qiE "Name or service not known|Temporary failure in name resolution|Network is unreachable|No route to host|DNS.*failed|could not resolve" "$log_file" 2>/dev/null; then
|
||||
return 0
|
||||
fi
|
||||
fi
|
||||
return 1
|
||||
}
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# vm_classify_error()
|
||||
#
|
||||
# - Classifies a VM creation error into a category
|
||||
# - Order matters: most specific checks first
|
||||
# - Returns category string via stdout
|
||||
# - Categories: vmid_conflict, storage_full, download, disk_import,
|
||||
# virt_customize, network, unknown
|
||||
# ------------------------------------------------------------------------------
|
||||
vm_classify_error() {
|
||||
local exit_code="${1:-0}"
|
||||
local log_file="${2:-$VM_ERROR_LOG}"
|
||||
|
||||
if is_vm_vmid_conflict "$exit_code" "$log_file"; then
|
||||
echo "vmid_conflict"
|
||||
elif is_vm_storage_full "$exit_code" "$log_file"; then
|
||||
echo "storage_full"
|
||||
elif is_vm_download_error "$exit_code" "$log_file"; then
|
||||
echo "download"
|
||||
elif is_vm_disk_import_error "$exit_code" "$log_file"; then
|
||||
echo "disk_import"
|
||||
elif is_vm_virt_customize_error "$exit_code" "$log_file"; then
|
||||
echo "virt_customize"
|
||||
elif is_vm_network_error "$exit_code" "$log_file"; then
|
||||
echo "network"
|
||||
else
|
||||
echo "unknown"
|
||||
fi
|
||||
}
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# vm_show_recovery_menu()
|
||||
#
|
||||
# - Displays a whiptail menu with recovery options after a VM creation failure
|
||||
# - Options are dynamically built based on error category
|
||||
# - Returns the selected option via stdout
|
||||
# - Arguments:
|
||||
# $1: exit_code
|
||||
# $2: error_category (from vm_classify_error)
|
||||
# $3: current attempt number
|
||||
# ------------------------------------------------------------------------------
|
||||
vm_show_recovery_menu() {
|
||||
local exit_code="${1:-1}"
|
||||
local error_category="${2:-unknown}"
|
||||
local attempt="${3:-1}"
|
||||
|
||||
local menu_items=()
|
||||
local menu_height=12
|
||||
local item_count=0
|
||||
|
||||
# --- Dynamic options based on error category ---
|
||||
|
||||
# Retry (always available unless max retries reached)
|
||||
if ((attempt < VM_MAX_RETRIES)); then
|
||||
case "$error_category" in
|
||||
download)
|
||||
menu_items+=("RETRY_DOWNLOAD" "🔄 Retry download (clear cache & re-download)" "ON")
|
||||
((item_count++))
|
||||
;;
|
||||
disk_import)
|
||||
menu_items+=("RETRY" "🔄 Retry VM creation" "ON")
|
||||
((item_count++))
|
||||
;;
|
||||
virt_customize)
|
||||
menu_items+=("RETRY" "🔄 Retry VM creation" "ON")
|
||||
((item_count++))
|
||||
menu_items+=("SKIP_CUSTOMIZE" "⏭️ Skip virt-customize (use first-boot fallback)" "OFF")
|
||||
((item_count++))
|
||||
;;
|
||||
network)
|
||||
menu_items+=("RETRY" "🔄 Retry VM creation" "ON")
|
||||
((item_count++))
|
||||
;;
|
||||
vmid_conflict)
|
||||
menu_items+=("NEW_VMID" "🆔 Choose a different VM ID" "ON")
|
||||
((item_count++))
|
||||
;;
|
||||
storage_full)
|
||||
menu_items+=("RETRY_SETTINGS" "⚙️ Retry with different settings (storage/disk)" "ON")
|
||||
((item_count++))
|
||||
;;
|
||||
*)
|
||||
menu_items+=("RETRY" "🔄 Retry VM creation" "ON")
|
||||
((item_count++))
|
||||
;;
|
||||
esac
|
||||
|
||||
# Retry with different resources (always offered)
|
||||
menu_items+=("RETRY_SETTINGS" "⚙️ Retry with different settings (RAM/CPU/Disk)" "OFF")
|
||||
((item_count++))
|
||||
fi
|
||||
|
||||
# Keep VM for debugging (always available)
|
||||
menu_items+=("KEEP" "🔍 Keep partial VM for manual debugging" "OFF")
|
||||
((item_count++))
|
||||
|
||||
# Abort (always available)
|
||||
menu_items+=("ABORT" "❌ Destroy VM and exit" "OFF")
|
||||
((item_count++))
|
||||
|
||||
menu_height=$((item_count + 10))
|
||||
|
||||
# Error info for title
|
||||
local title="VM CREATION FAILED"
|
||||
local body="Exit code: ${exit_code} | Category: ${error_category}\nAttempt: ${attempt}/${VM_MAX_RETRIES}\n\nChoose a recovery action:"
|
||||
|
||||
if ((attempt >= VM_MAX_RETRIES)); then
|
||||
body="Exit code: ${exit_code} | Category: ${error_category}\n⚠️ Maximum retries (${VM_MAX_RETRIES}) reached.\n\nChoose an action:"
|
||||
fi
|
||||
|
||||
local choice
|
||||
choice=$(whiptail --backtitle "Proxmox VE Helper Scripts" --title "$title" \
|
||||
--radiolist "$body" "$menu_height" 72 "$item_count" \
|
||||
"${menu_items[@]}" 3>&1 1>&2 2>&3) || choice="ABORT"
|
||||
|
||||
echo "$choice"
|
||||
}
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# vm_handle_recovery()
|
||||
#
|
||||
# - Main recovery handler called from error_handler or a wrapper
|
||||
# - Classifies the error, shows recovery menu, and executes the chosen action
|
||||
# - Arguments:
|
||||
# $1: exit_code
|
||||
# $2: line_number
|
||||
# $3: failed_command
|
||||
# $4: cleanup_fn — function to call for VM cleanup (default: cleanup_vmid)
|
||||
# $5: retry_fn — function to re-invoke for full retry (required for retry)
|
||||
# - Uses global: VM_ERROR_LOG, VM_RECOVERY_ATTEMPT, VM_MAX_RETRIES, VMID
|
||||
# - Returns: 0 if retry was chosen (caller should re-run), 1 if abort/keep
|
||||
# ------------------------------------------------------------------------------
|
||||
vm_handle_recovery() {
|
||||
local exit_code="${1:-1}"
|
||||
local line_number="${2:-?}"
|
||||
local failed_command="${3:-unknown}"
|
||||
local cleanup_fn="${4:-cleanup_vmid}"
|
||||
local retry_fn="${5:-}"
|
||||
|
||||
# Stop any running spinner
|
||||
stop_spinner 2>/dev/null || true
|
||||
|
||||
# Classify the error
|
||||
local error_category
|
||||
error_category=$(vm_classify_error "$exit_code" "$VM_ERROR_LOG")
|
||||
|
||||
((VM_RECOVERY_ATTEMPT++))
|
||||
|
||||
# Show error details
|
||||
echo ""
|
||||
msg_error "VM creation failed in line ${line_number}"
|
||||
msg_error "Exit code: ${exit_code} | Category: ${error_category}"
|
||||
msg_error "Command: ${failed_command}"
|
||||
|
||||
# Show last few lines of error log if available
|
||||
if [[ -s "$VM_ERROR_LOG" ]]; then
|
||||
echo -e "\n${TAB}${YW}--- Last 5 lines of error log ---${CL}"
|
||||
tail -n 5 "$VM_ERROR_LOG" 2>/dev/null | while IFS= read -r line; do
|
||||
echo -e "${TAB} ${line}"
|
||||
done
|
||||
echo -e "${TAB}${YW}----------------------------------${CL}\n"
|
||||
fi
|
||||
|
||||
# Show recovery menu
|
||||
local choice
|
||||
choice=$(vm_show_recovery_menu "$exit_code" "$error_category" "$VM_RECOVERY_ATTEMPT")
|
||||
|
||||
case "$choice" in
|
||||
RETRY | RETRY_DOWNLOAD)
|
||||
msg_info "Cleaning up failed VM ${VMID} for retry"
|
||||
"$cleanup_fn" 2>/dev/null || true
|
||||
rm -f "$VM_ERROR_LOG"
|
||||
rm -f "${WORK_FILE:-}" 2>/dev/null
|
||||
|
||||
if [[ "$choice" == "RETRY_DOWNLOAD" ]]; then
|
||||
# Clear cached image
|
||||
if [[ -n "${CACHE_FILE:-}" && -f "$CACHE_FILE" ]]; then
|
||||
msg_info "Clearing cached image: $(basename "$CACHE_FILE")"
|
||||
rm -f "$CACHE_FILE"
|
||||
msg_ok "Cache cleared"
|
||||
fi
|
||||
fi
|
||||
|
||||
msg_ok "Ready for retry (attempt $((VM_RECOVERY_ATTEMPT + 1))/${VM_MAX_RETRIES})"
|
||||
|
||||
if [[ -n "$retry_fn" ]]; then
|
||||
# Re-invoke the retry function — caller loop handles this
|
||||
return 0
|
||||
else
|
||||
msg_warn "No retry function provided — please re-run the script manually"
|
||||
return 1
|
||||
fi
|
||||
;;
|
||||
|
||||
SKIP_CUSTOMIZE)
|
||||
msg_info "Cleaning up failed VM ${VMID} for retry (skipping virt-customize)"
|
||||
"$cleanup_fn" 2>/dev/null || true
|
||||
rm -f "$VM_ERROR_LOG"
|
||||
rm -f "${WORK_FILE:-}" 2>/dev/null
|
||||
# Set flag so docker-vm.sh skips virt-customize
|
||||
export SKIP_VIRT_CUSTOMIZE="yes"
|
||||
msg_ok "Will use first-boot fallback for package installation"
|
||||
|
||||
if [[ -n "$retry_fn" ]]; then
|
||||
return 0
|
||||
else
|
||||
msg_warn "No retry function provided — please re-run the script manually"
|
||||
return 1
|
||||
fi
|
||||
;;
|
||||
|
||||
RETRY_SETTINGS)
|
||||
msg_info "Cleaning up failed VM ${VMID} for retry with new settings"
|
||||
"$cleanup_fn" 2>/dev/null || true
|
||||
rm -f "$VM_ERROR_LOG"
|
||||
rm -f "${WORK_FILE:-}" 2>/dev/null
|
||||
|
||||
# Let user choose new settings via advanced_settings if available
|
||||
if declare -f advanced_settings >/dev/null 2>&1; then
|
||||
header_info 2>/dev/null || true
|
||||
echo -e "${ADVANCED:-}${BOLD}${RD}Reconfigure VM Settings${CL}"
|
||||
advanced_settings
|
||||
else
|
||||
msg_warn "advanced_settings() not available — using current settings"
|
||||
fi
|
||||
|
||||
if [[ -n "$retry_fn" ]]; then
|
||||
return 0
|
||||
else
|
||||
msg_warn "No retry function provided — please re-run the script manually"
|
||||
return 1
|
||||
fi
|
||||
;;
|
||||
|
||||
NEW_VMID)
|
||||
msg_info "Cleaning up conflicting VM ${VMID}"
|
||||
"$cleanup_fn" 2>/dev/null || true
|
||||
rm -f "$VM_ERROR_LOG"
|
||||
rm -f "${WORK_FILE:-}" 2>/dev/null
|
||||
VMID=$(get_valid_nextid)
|
||||
echo -e "${CONTAINERID:-}${BOLD}${DGN}New Virtual Machine ID: ${BGN}${VMID}${CL}"
|
||||
msg_ok "Using new VMID: ${VMID}"
|
||||
|
||||
if [[ -n "$retry_fn" ]]; then
|
||||
return 0
|
||||
else
|
||||
msg_warn "No retry function provided — please re-run the script manually"
|
||||
return 1
|
||||
fi
|
||||
;;
|
||||
|
||||
KEEP)
|
||||
msg_warn "Keeping partial VM ${VMID} for manual debugging"
|
||||
msg_warn "You can inspect it with: qm config ${VMID}"
|
||||
msg_warn "To remove it later: qm destroy ${VMID} --destroy-unreferenced-disks --purge"
|
||||
# Report failure to telemetry
|
||||
post_update_to_api "failed" "$exit_code" 2>/dev/null || true
|
||||
exit "$exit_code"
|
||||
;;
|
||||
|
||||
ABORT | *)
|
||||
msg_info "Destroying failed VM ${VMID}"
|
||||
"$cleanup_fn" 2>/dev/null || true
|
||||
rm -f "$VM_ERROR_LOG"
|
||||
post_update_to_api "failed" "$exit_code" 2>/dev/null || true
|
||||
msg_error "VM creation aborted by user"
|
||||
exit "$exit_code"
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
@@ -65,63 +65,13 @@ trap 'error_handler $LINENO "$BASH_COMMAND"' ERR
|
||||
trap cleanup EXIT
|
||||
trap 'post_update_to_api "failed" "INTERRUPTED"' SIGINT
|
||||
trap 'post_update_to_api "failed" "TERMINATED"' SIGTERM
|
||||
|
||||
# Smart recovery state
|
||||
VM_CREATION_PHASE="no"
|
||||
VM_RECOVERY_ATTEMPT=0
|
||||
VM_MAX_RETRIES=2
|
||||
|
||||
function error_handler() {
|
||||
local exit_code="$?"
|
||||
local line_number="$1"
|
||||
local command="$2"
|
||||
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
|
||||
echo -e "\n$error_message\n"
|
||||
|
||||
# During VM creation phase: offer recovery menu instead of immediate cleanup
|
||||
if [[ "$VM_CREATION_PHASE" == "yes" && $VM_RECOVERY_ATTEMPT -lt $VM_MAX_RETRIES ]]; then
|
||||
((VM_RECOVERY_ATTEMPT++))
|
||||
trap - ERR
|
||||
set +e
|
||||
|
||||
local choice
|
||||
choice=$(whiptail --backtitle "Proxmox VE Helper Scripts" --title "VM CREATION FAILED" \
|
||||
--radiolist "Exit code: ${exit_code} | Attempt: ${VM_RECOVERY_ATTEMPT}/${VM_MAX_RETRIES}\nFailed command: ${command}\n\nChoose a recovery action:" 16 72 4 \
|
||||
"RETRY" "Retry VM creation" "ON" \
|
||||
"SKIP_CUSTOMIZE" "Retry and skip image customization" "OFF" \
|
||||
"KEEP" "Keep partial VM for debugging" "OFF" \
|
||||
"ABORT" "Destroy VM and exit" "OFF" \
|
||||
3>&1 1>&2 2>&3) || choice="ABORT"
|
||||
|
||||
case "$choice" in
|
||||
RETRY | SKIP_CUSTOMIZE)
|
||||
msg_info "Cleaning up failed VM ${VMID} for retry"
|
||||
cleanup_vmid 2>/dev/null || true
|
||||
rm -f "${WORK_FILE:-}" 2>/dev/null
|
||||
[[ "$choice" == "SKIP_CUSTOMIZE" ]] && export SKIP_VIRT_CUSTOMIZE="yes"
|
||||
msg_ok "Ready for retry (attempt $((VM_RECOVERY_ATTEMPT + 1))/${VM_MAX_RETRIES})"
|
||||
set -e
|
||||
trap 'error_handler $LINENO "$BASH_COMMAND"' ERR
|
||||
create_vm
|
||||
exit $?
|
||||
;;
|
||||
KEEP)
|
||||
echo -e "\n${YW} Keeping partial VM ${VMID} for debugging${CL}"
|
||||
echo -e " Inspect: qm config ${VMID}"
|
||||
echo -e " Remove: qm destroy ${VMID} --destroy-unreferenced-disks --purge\n"
|
||||
post_update_to_api "failed" "$exit_code"
|
||||
exit "$exit_code"
|
||||
;;
|
||||
*)
|
||||
post_update_to_api "failed" "$exit_code"
|
||||
cleanup_vmid
|
||||
exit "$exit_code"
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
|
||||
# Default: no recovery (max retries exceeded or outside creation phase)
|
||||
post_update_to_api "failed" "${exit_code}"
|
||||
echo -e "\n$error_message\n"
|
||||
cleanup_vmid
|
||||
}
|
||||
|
||||
@@ -535,130 +485,125 @@ fi
|
||||
msg_ok "Using ${CL}${BL}$STORAGE${CL} ${GN}for Storage Location."
|
||||
msg_ok "Virtual Machine ID is ${CL}${BL}$VMID${CL}."
|
||||
|
||||
create_vm() {
|
||||
# ==============================================================================
|
||||
# PREREQUISITES
|
||||
# ==============================================================================
|
||||
if ! command -v virt-customize &>/dev/null; then
|
||||
msg_info "Installing libguestfs-tools"
|
||||
apt-get update >/dev/null 2>&1
|
||||
apt-get install -y libguestfs-tools >/dev/null 2>&1
|
||||
msg_ok "Installed libguestfs-tools"
|
||||
fi
|
||||
# ==============================================================================
|
||||
# PREREQUISITES
|
||||
# ==============================================================================
|
||||
if ! command -v virt-customize &>/dev/null; then
|
||||
msg_info "Installing libguestfs-tools"
|
||||
apt-get update >/dev/null 2>&1
|
||||
apt-get install -y libguestfs-tools >/dev/null 2>&1
|
||||
msg_ok "Installed libguestfs-tools"
|
||||
fi
|
||||
|
||||
msg_info "Retrieving the URL for the Debian 13 Qcow2 Disk Image"
|
||||
if [ "$CLOUD_INIT" == "yes" ]; then
|
||||
URL=https://cloud.debian.org/images/cloud/trixie/latest/debian-13-genericcloud-amd64.qcow2
|
||||
else
|
||||
URL=https://cloud.debian.org/images/cloud/trixie/latest/debian-13-nocloud-amd64.qcow2
|
||||
fi
|
||||
sleep 2
|
||||
msg_ok "${CL}${BL}${URL}${CL}"
|
||||
curl -f#SL -o "$(basename "$URL")" "$URL"
|
||||
echo -en "\e[1A\e[0K"
|
||||
FILE=$(basename $URL)
|
||||
msg_ok "Downloaded ${CL}${BL}${FILE}${CL}"
|
||||
msg_info "Retrieving the URL for the Debian 13 Qcow2 Disk Image"
|
||||
if [ "$CLOUD_INIT" == "yes" ]; then
|
||||
URL=https://cloud.debian.org/images/cloud/trixie/latest/debian-13-genericcloud-amd64.qcow2
|
||||
else
|
||||
URL=https://cloud.debian.org/images/cloud/trixie/latest/debian-13-nocloud-amd64.qcow2
|
||||
fi
|
||||
sleep 2
|
||||
msg_ok "${CL}${BL}${URL}${CL}"
|
||||
curl -f#SL -o "$(basename "$URL")" "$URL"
|
||||
echo -en "\e[1A\e[0K"
|
||||
FILE=$(basename $URL)
|
||||
msg_ok "Downloaded ${CL}${BL}${FILE}${CL}"
|
||||
|
||||
# ==============================================================================
|
||||
# IMAGE CUSTOMIZATION
|
||||
# ==============================================================================
|
||||
WORK_FILE=$(mktemp --suffix=.qcow2)
|
||||
cp "$FILE" "$WORK_FILE"
|
||||
# ==============================================================================
|
||||
# IMAGE CUSTOMIZATION
|
||||
# ==============================================================================
|
||||
msg_info "Customizing ${FILE} image"
|
||||
|
||||
if [[ "${SKIP_VIRT_CUSTOMIZE:-}" != "yes" ]]; then
|
||||
msg_info "Customizing ${FILE} image"
|
||||
WORK_FILE=$(mktemp --suffix=.qcow2)
|
||||
cp "$FILE" "$WORK_FILE"
|
||||
|
||||
# Set hostname
|
||||
virt-customize -q -a "$WORK_FILE" --hostname "${HN}" >/dev/null 2>&1
|
||||
# Set hostname
|
||||
virt-customize -q -a "$WORK_FILE" --hostname "${HN}" >/dev/null 2>&1
|
||||
|
||||
# Prepare for unique machine-id on first boot
|
||||
virt-customize -q -a "$WORK_FILE" --run-command "truncate -s 0 /etc/machine-id" >/dev/null 2>&1
|
||||
virt-customize -q -a "$WORK_FILE" --run-command "rm -f /var/lib/dbus/machine-id" >/dev/null 2>&1
|
||||
# Prepare for unique machine-id on first boot
|
||||
virt-customize -q -a "$WORK_FILE" --run-command "truncate -s 0 /etc/machine-id" >/dev/null 2>&1
|
||||
virt-customize -q -a "$WORK_FILE" --run-command "rm -f /var/lib/dbus/machine-id" >/dev/null 2>&1
|
||||
|
||||
# Disable systemd-firstboot to prevent interactive prompts blocking the console
|
||||
virt-customize -q -a "$WORK_FILE" --run-command "systemctl disable systemd-firstboot.service 2>/dev/null; rm -f /etc/systemd/system/sysinit.target.wants/systemd-firstboot.service; ln -sf /dev/null /etc/systemd/system/systemd-firstboot.service" >/dev/null 2>&1 || true
|
||||
# Disable systemd-firstboot to prevent interactive prompts blocking the console
|
||||
virt-customize -q -a "$WORK_FILE" --run-command "systemctl disable systemd-firstboot.service 2>/dev/null; rm -f /etc/systemd/system/sysinit.target.wants/systemd-firstboot.service; ln -sf /dev/null /etc/systemd/system/systemd-firstboot.service" >/dev/null 2>&1 || true
|
||||
|
||||
# Pre-seed firstboot settings so it won't prompt even if triggered
|
||||
virt-customize -q -a "$WORK_FILE" --run-command "echo 'Etc/UTC' > /etc/timezone && ln -sf /usr/share/zoneinfo/Etc/UTC /etc/localtime" >/dev/null 2>&1 || true
|
||||
virt-customize -q -a "$WORK_FILE" --run-command "touch /etc/locale.conf" >/dev/null 2>&1 || true
|
||||
# Pre-seed firstboot settings so it won't prompt even if triggered
|
||||
virt-customize -q -a "$WORK_FILE" --run-command "echo 'Etc/UTC' > /etc/timezone && ln -sf /usr/share/zoneinfo/Etc/UTC /etc/localtime" >/dev/null 2>&1 || true
|
||||
virt-customize -q -a "$WORK_FILE" --run-command "touch /etc/locale.conf" >/dev/null 2>&1 || true
|
||||
|
||||
if [ "$CLOUD_INIT" == "yes" ]; then
|
||||
# Cloud-Init handles SSH and login
|
||||
virt-customize -q -a "$WORK_FILE" --run-command "sed -i 's/^#*PermitRootLogin.*/PermitRootLogin yes/' /etc/ssh/sshd_config" >/dev/null 2>&1 || true
|
||||
virt-customize -q -a "$WORK_FILE" --run-command "sed -i 's/^#*PasswordAuthentication.*/PasswordAuthentication yes/' /etc/ssh/sshd_config" >/dev/null 2>&1 || true
|
||||
else
|
||||
# Configure auto-login on serial console (ttyS0) and virtual console (tty1)
|
||||
virt-customize -q -a "$WORK_FILE" --run-command "mkdir -p /etc/systemd/system/serial-getty@ttyS0.service.d" >/dev/null 2>&1 || true
|
||||
virt-customize -q -a "$WORK_FILE" --run-command 'cat > /etc/systemd/system/serial-getty@ttyS0.service.d/autologin.conf << EOF
|
||||
if [ "$CLOUD_INIT" == "yes" ]; then
|
||||
# Cloud-Init handles SSH and login
|
||||
virt-customize -q -a "$WORK_FILE" --run-command "sed -i 's/^#*PermitRootLogin.*/PermitRootLogin yes/' /etc/ssh/sshd_config" >/dev/null 2>&1 || true
|
||||
virt-customize -q -a "$WORK_FILE" --run-command "sed -i 's/^#*PasswordAuthentication.*/PasswordAuthentication yes/' /etc/ssh/sshd_config" >/dev/null 2>&1 || true
|
||||
else
|
||||
# Configure auto-login on serial console (ttyS0) and virtual console (tty1)
|
||||
virt-customize -q -a "$WORK_FILE" --run-command "mkdir -p /etc/systemd/system/serial-getty@ttyS0.service.d" >/dev/null 2>&1 || true
|
||||
virt-customize -q -a "$WORK_FILE" --run-command 'cat > /etc/systemd/system/serial-getty@ttyS0.service.d/autologin.conf << EOF
|
||||
[Service]
|
||||
ExecStart=
|
||||
ExecStart=-/sbin/agetty --autologin root --noclear %I \$TERM
|
||||
EOF' >/dev/null 2>&1 || true
|
||||
virt-customize -q -a "$WORK_FILE" --run-command "mkdir -p /etc/systemd/system/getty@tty1.service.d" >/dev/null 2>&1 || true
|
||||
virt-customize -q -a "$WORK_FILE" --run-command 'cat > /etc/systemd/system/getty@tty1.service.d/autologin.conf << EOF
|
||||
virt-customize -q -a "$WORK_FILE" --run-command "mkdir -p /etc/systemd/system/getty@tty1.service.d" >/dev/null 2>&1 || true
|
||||
virt-customize -q -a "$WORK_FILE" --run-command 'cat > /etc/systemd/system/getty@tty1.service.d/autologin.conf << EOF
|
||||
[Service]
|
||||
ExecStart=
|
||||
ExecStart=-/sbin/agetty --autologin root --noclear %I \$TERM
|
||||
EOF' >/dev/null 2>&1 || true
|
||||
fi
|
||||
fi
|
||||
|
||||
msg_ok "Customized image"
|
||||
else
|
||||
msg_ok "Skipped image customization (hostname and login not pre-configured)"
|
||||
fi
|
||||
msg_ok "Customized image"
|
||||
|
||||
STORAGE_TYPE=$(pvesm status -storage "$STORAGE" | awk 'NR>1 {print $2}')
|
||||
case $STORAGE_TYPE in
|
||||
nfs | dir)
|
||||
DISK_EXT=".qcow2"
|
||||
DISK_REF="$VMID/"
|
||||
DISK_IMPORT="-format qcow2"
|
||||
THIN=""
|
||||
;;
|
||||
btrfs)
|
||||
DISK_EXT=".raw"
|
||||
DISK_REF="$VMID/"
|
||||
DISK_IMPORT="-format raw"
|
||||
FORMAT=",efitype=4m"
|
||||
THIN=""
|
||||
;;
|
||||
*)
|
||||
DISK_EXT=""
|
||||
DISK_REF=""
|
||||
DISK_IMPORT="-format raw"
|
||||
;;
|
||||
esac
|
||||
for i in {0,1}; do
|
||||
disk="DISK$i"
|
||||
eval DISK"${i}"=vm-"${VMID}"-disk-"${i}"${DISK_EXT:-}
|
||||
eval DISK"${i}"_REF="${STORAGE}":"${DISK_REF:-}"${!disk}
|
||||
done
|
||||
STORAGE_TYPE=$(pvesm status -storage "$STORAGE" | awk 'NR>1 {print $2}')
|
||||
case $STORAGE_TYPE in
|
||||
nfs | dir)
|
||||
DISK_EXT=".qcow2"
|
||||
DISK_REF="$VMID/"
|
||||
DISK_IMPORT="-format qcow2"
|
||||
THIN=""
|
||||
;;
|
||||
btrfs)
|
||||
DISK_EXT=".raw"
|
||||
DISK_REF="$VMID/"
|
||||
DISK_IMPORT="-format raw"
|
||||
FORMAT=",efitype=4m"
|
||||
THIN=""
|
||||
;;
|
||||
*)
|
||||
DISK_EXT=""
|
||||
DISK_REF=""
|
||||
DISK_IMPORT="-format raw"
|
||||
;;
|
||||
esac
|
||||
for i in {0,1}; do
|
||||
disk="DISK$i"
|
||||
eval DISK"${i}"=vm-"${VMID}"-disk-"${i}"${DISK_EXT:-}
|
||||
eval DISK"${i}"_REF="${STORAGE}":"${DISK_REF:-}"${!disk}
|
||||
done
|
||||
|
||||
msg_info "Creating a Debian 13 VM"
|
||||
qm create $VMID -agent 1${MACHINE} -tablet 0 -localtime 1 -bios ovmf${CPU_TYPE} -cores $CORE_COUNT -memory $RAM_SIZE \
|
||||
-name $HN -tags community-script -net0 virtio,bridge=$BRG,macaddr=$MAC$VLAN$MTU -onboot 1 -ostype l26 -scsihw virtio-scsi-pci
|
||||
pvesm alloc $STORAGE $VMID $DISK0 4M 1>&/dev/null
|
||||
qm importdisk $VMID ${WORK_FILE} $STORAGE ${DISK_IMPORT:-} 1>&/dev/null
|
||||
if [ "$CLOUD_INIT" == "yes" ]; then
|
||||
qm set $VMID \
|
||||
-efidisk0 ${DISK0_REF}${FORMAT} \
|
||||
-scsi0 ${DISK1_REF},${DISK_CACHE}${THIN}size=${DISK_SIZE} \
|
||||
-scsi1 ${STORAGE}:cloudinit \
|
||||
-boot order=scsi0 \
|
||||
-serial0 socket >/dev/null
|
||||
else
|
||||
qm set $VMID \
|
||||
-efidisk0 ${DISK0_REF}${FORMAT} \
|
||||
-scsi0 ${DISK1_REF},${DISK_CACHE}${THIN}size=${DISK_SIZE} \
|
||||
-boot order=scsi0 \
|
||||
-serial0 socket >/dev/null
|
||||
fi
|
||||
msg_info "Creating a Debian 13 VM"
|
||||
qm create $VMID -agent 1${MACHINE} -tablet 0 -localtime 1 -bios ovmf${CPU_TYPE} -cores $CORE_COUNT -memory $RAM_SIZE \
|
||||
-name $HN -tags community-script -net0 virtio,bridge=$BRG,macaddr=$MAC$VLAN$MTU -onboot 1 -ostype l26 -scsihw virtio-scsi-pci
|
||||
pvesm alloc $STORAGE $VMID $DISK0 4M 1>&/dev/null
|
||||
qm importdisk $VMID ${WORK_FILE} $STORAGE ${DISK_IMPORT:-} 1>&/dev/null
|
||||
if [ "$CLOUD_INIT" == "yes" ]; then
|
||||
qm set $VMID \
|
||||
-efidisk0 ${DISK0_REF}${FORMAT} \
|
||||
-scsi0 ${DISK1_REF},${DISK_CACHE}${THIN}size=${DISK_SIZE} \
|
||||
-scsi1 ${STORAGE}:cloudinit \
|
||||
-boot order=scsi0 \
|
||||
-serial0 socket >/dev/null
|
||||
else
|
||||
qm set $VMID \
|
||||
-efidisk0 ${DISK0_REF}${FORMAT} \
|
||||
-scsi0 ${DISK1_REF},${DISK_CACHE}${THIN}size=${DISK_SIZE} \
|
||||
-boot order=scsi0 \
|
||||
-serial0 socket >/dev/null
|
||||
fi
|
||||
|
||||
# Clean up work file
|
||||
rm -f "$WORK_FILE"
|
||||
# Clean up work file
|
||||
rm -f "$WORK_FILE"
|
||||
|
||||
DESCRIPTION=$(
|
||||
cat <<EOF
|
||||
DESCRIPTION=$(
|
||||
cat <<EOF
|
||||
<div align='center'>
|
||||
<a href='https://Helper-Scripts.com' target='_blank' rel='noopener noreferrer'>
|
||||
<img src='https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/images/logo-81x112.png' alt='Logo' style='width:81px;height:112px;'/>
|
||||
@@ -686,27 +631,22 @@ EOF' >/dev/null 2>&1 || true
|
||||
</span>
|
||||
</div>
|
||||
EOF
|
||||
)
|
||||
qm set $VMID -description "$DESCRIPTION" >/dev/null
|
||||
if [ -n "$DISK_SIZE" ]; then
|
||||
msg_info "Resizing disk to $DISK_SIZE GB"
|
||||
qm resize $VMID scsi0 ${DISK_SIZE} >/dev/null
|
||||
else
|
||||
msg_info "Using default disk size of $DEFAULT_DISK_SIZE GB"
|
||||
qm resize $VMID scsi0 ${DEFAULT_DISK_SIZE} >/dev/null
|
||||
fi
|
||||
)
|
||||
qm set $VMID -description "$DESCRIPTION" >/dev/null
|
||||
if [ -n "$DISK_SIZE" ]; then
|
||||
msg_info "Resizing disk to $DISK_SIZE GB"
|
||||
qm resize $VMID scsi0 ${DISK_SIZE} >/dev/null
|
||||
else
|
||||
msg_info "Using default disk size of $DEFAULT_DISK_SIZE GB"
|
||||
qm resize $VMID scsi0 ${DEFAULT_DISK_SIZE} >/dev/null
|
||||
fi
|
||||
|
||||
msg_ok "Created a Debian 13 VM ${CL}${BL}(${HN})"
|
||||
if [ "$START_VM" == "yes" ]; then
|
||||
msg_info "Starting Debian 13 VM"
|
||||
qm start $VMID
|
||||
msg_ok "Started Debian 13 VM"
|
||||
fi
|
||||
msg_ok "Created a Debian 13 VM ${CL}${BL}(${HN})"
|
||||
if [ "$START_VM" == "yes" ]; then
|
||||
msg_info "Starting Debian 13 VM"
|
||||
qm start $VMID
|
||||
msg_ok "Started Debian 13 VM"
|
||||
fi
|
||||
|
||||
msg_ok "Completed successfully!\n"
|
||||
echo "More Info at https://github.com/community-scripts/ProxmoxVE/discussions/836"
|
||||
} # end create_vm
|
||||
|
||||
VM_CREATION_PHASE="yes"
|
||||
create_vm
|
||||
VM_CREATION_PHASE="no"
|
||||
msg_ok "Completed successfully!\n"
|
||||
echo "More Info at https://github.com/community-scripts/ProxmoxVE/discussions/836"
|
||||
|
||||
448
vm/docker-vm.sh
448
vm/docker-vm.sh
@@ -40,32 +40,10 @@ trap cleanup EXIT
|
||||
trap 'post_update_to_api "failed" "INTERRUPTED"' SIGINT
|
||||
trap 'post_update_to_api "failed" "TERMINATED"' SIGTERM
|
||||
|
||||
# Flag to control whether recovery menu is shown (set during create_vm)
|
||||
VM_CREATION_PHASE="no"
|
||||
|
||||
function error_handler() {
|
||||
local exit_code="$?"
|
||||
local line_number="$1"
|
||||
local command="$2"
|
||||
|
||||
# During VM creation phase: use smart recovery if available
|
||||
if [[ "$VM_CREATION_PHASE" == "yes" ]] && declare -f vm_handle_recovery >/dev/null 2>&1; then
|
||||
# Temporarily disable ERR trap + set -e to prevent recursion during recovery menu
|
||||
trap - ERR
|
||||
set +e
|
||||
|
||||
if vm_handle_recovery "$exit_code" "$line_number" "$command" "cleanup_vmid" "create_vm"; then
|
||||
# Recovery chose retry — re-invoke create_vm with traps restored
|
||||
set -e
|
||||
trap 'error_handler $LINENO "$BASH_COMMAND"' ERR
|
||||
create_vm
|
||||
exit $?
|
||||
fi
|
||||
# Recovery chose abort/keep — vm_handle_recovery already called exit
|
||||
exit "$exit_code"
|
||||
fi
|
||||
|
||||
# Default error handling (outside VM creation phase)
|
||||
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
|
||||
post_update_to_api "failed" "${exit_code}"
|
||||
echo -e "\n$error_message\n"
|
||||
@@ -459,87 +437,74 @@ if ! command -v virt-customize &>/dev/null; then
|
||||
fi
|
||||
|
||||
# ==============================================================================
|
||||
# VM CREATION FUNCTION (wrapped for smart recovery retry)
|
||||
# IMAGE DOWNLOAD
|
||||
# ==============================================================================
|
||||
create_vm() {
|
||||
msg_info "Retrieving the URL for the ${OS_DISPLAY} Qcow2 Disk Image"
|
||||
URL=$(get_image_url)
|
||||
CACHE_DIR="/var/lib/vz/template/cache"
|
||||
CACHE_FILE="$CACHE_DIR/$(basename "$URL")"
|
||||
mkdir -p "$CACHE_DIR"
|
||||
msg_ok "${CL}${BL}${URL}${CL}"
|
||||
|
||||
# Reset error log for this attempt
|
||||
VM_ERROR_LOG="/tmp/vm-install-${VMID}.log"
|
||||
: >"$VM_ERROR_LOG"
|
||||
if [[ ! -s "$CACHE_FILE" ]]; then
|
||||
curl -f#SL -o "$CACHE_FILE" "$URL"
|
||||
echo -en "\e[1A\e[0K"
|
||||
msg_ok "Downloaded ${CL}${BL}$(basename "$CACHE_FILE")${CL}"
|
||||
else
|
||||
msg_ok "Using cached image ${CL}${BL}$(basename "$CACHE_FILE")${CL}"
|
||||
fi
|
||||
|
||||
# ==============================================================================
|
||||
# IMAGE DOWNLOAD
|
||||
# ==============================================================================
|
||||
msg_info "Retrieving the URL for the ${OS_DISPLAY} Qcow2 Disk Image"
|
||||
URL=$(get_image_url)
|
||||
CACHE_DIR="/var/lib/vz/template/cache"
|
||||
CACHE_FILE="$CACHE_DIR/$(basename "$URL")"
|
||||
mkdir -p "$CACHE_DIR"
|
||||
msg_ok "${CL}${BL}${URL}${CL}"
|
||||
# ==============================================================================
|
||||
# STORAGE TYPE DETECTION
|
||||
# ==============================================================================
|
||||
STORAGE_TYPE=$(pvesm status -storage "$STORAGE" | awk 'NR>1 {print $2}')
|
||||
case $STORAGE_TYPE in
|
||||
nfs | dir)
|
||||
DISK_EXT=".qcow2"
|
||||
DISK_REF="$VMID/"
|
||||
DISK_IMPORT="--format qcow2"
|
||||
THIN=""
|
||||
;;
|
||||
btrfs)
|
||||
DISK_EXT=".raw"
|
||||
DISK_REF="$VMID/"
|
||||
DISK_IMPORT="--format raw"
|
||||
FORMAT=",efitype=4m"
|
||||
THIN=""
|
||||
;;
|
||||
*)
|
||||
DISK_EXT=""
|
||||
DISK_REF=""
|
||||
DISK_IMPORT="--format raw"
|
||||
;;
|
||||
esac
|
||||
|
||||
if [[ ! -s "$CACHE_FILE" ]]; then
|
||||
curl -f#SL -o "$CACHE_FILE" "$URL"
|
||||
echo -en "\e[1A\e[0K"
|
||||
msg_ok "Downloaded ${CL}${BL}$(basename "$CACHE_FILE")${CL}"
|
||||
else
|
||||
msg_ok "Using cached image ${CL}${BL}$(basename "$CACHE_FILE")${CL}"
|
||||
fi
|
||||
# ==============================================================================
|
||||
# IMAGE CUSTOMIZATION WITH DOCKER
|
||||
# ==============================================================================
|
||||
msg_info "Preparing ${OS_DISPLAY} image with Docker"
|
||||
|
||||
# ==============================================================================
|
||||
# STORAGE TYPE DETECTION
|
||||
# ==============================================================================
|
||||
STORAGE_TYPE=$(pvesm status -storage "$STORAGE" | awk 'NR>1 {print $2}')
|
||||
case $STORAGE_TYPE in
|
||||
nfs | dir)
|
||||
DISK_EXT=".qcow2"
|
||||
DISK_REF="$VMID/"
|
||||
DISK_IMPORT="--format qcow2"
|
||||
THIN=""
|
||||
;;
|
||||
btrfs)
|
||||
DISK_EXT=".raw"
|
||||
DISK_REF="$VMID/"
|
||||
DISK_IMPORT="--format raw"
|
||||
FORMAT=",efitype=4m"
|
||||
THIN=""
|
||||
;;
|
||||
*)
|
||||
DISK_EXT=""
|
||||
DISK_REF=""
|
||||
DISK_IMPORT="--format raw"
|
||||
;;
|
||||
esac
|
||||
WORK_FILE=$(mktemp --suffix=.qcow2)
|
||||
cp "$CACHE_FILE" "$WORK_FILE"
|
||||
|
||||
# ==============================================================================
|
||||
# IMAGE CUSTOMIZATION WITH DOCKER
|
||||
# ==============================================================================
|
||||
msg_info "Preparing ${OS_DISPLAY} image with Docker"
|
||||
export LIBGUESTFS_BACKEND_SETTINGS=dns=8.8.8.8,1.1.1.1
|
||||
|
||||
WORK_FILE=$(mktemp --suffix=.qcow2)
|
||||
cp "$CACHE_FILE" "$WORK_FILE"
|
||||
DOCKER_PREINSTALLED="no"
|
||||
|
||||
export LIBGUESTFS_BACKEND_SETTINGS=dns=8.8.8.8,1.1.1.1
|
||||
# Install qemu-guest-agent and Docker during image customization
|
||||
msg_info "Installing base packages in image"
|
||||
if virt-customize -a "$WORK_FILE" --install qemu-guest-agent,curl,ca-certificates >/dev/null 2>&1; then
|
||||
msg_ok "Installed base packages"
|
||||
|
||||
DOCKER_PREINSTALLED="no"
|
||||
msg_info "Installing Docker (this may take 2-5 minutes)"
|
||||
if virt-customize -q -a "$WORK_FILE" --run-command "curl -fsSL https://get.docker.com | sh" >/dev/null 2>&1 &&
|
||||
virt-customize -q -a "$WORK_FILE" --run-command "systemctl enable docker" >/dev/null 2>&1; then
|
||||
msg_ok "Installed Docker"
|
||||
|
||||
# Install qemu-guest-agent and Docker during image customization
|
||||
# Skip if recovery set SKIP_VIRT_CUSTOMIZE (virt-customize failed before)
|
||||
if [[ "${SKIP_VIRT_CUSTOMIZE:-}" == "yes" ]]; then
|
||||
msg_ok "Skipping virt-customize (using first-boot fallback)"
|
||||
else
|
||||
msg_info "Installing base packages in image"
|
||||
if virt-customize -a "$WORK_FILE" --install qemu-guest-agent,curl,ca-certificates 2>>"$VM_ERROR_LOG" >/dev/null; then
|
||||
msg_ok "Installed base packages"
|
||||
|
||||
msg_info "Installing Docker (this may take 2-5 minutes)"
|
||||
if virt-customize -q -a "$WORK_FILE" --run-command "curl -fsSL https://get.docker.com | sh" >/dev/null 2>&1 &&
|
||||
virt-customize -q -a "$WORK_FILE" --run-command "systemctl enable docker" >/dev/null 2>&1; then
|
||||
msg_ok "Installed Docker"
|
||||
|
||||
msg_info "Configuring Docker daemon"
|
||||
# Optimize Docker daemon configuration
|
||||
virt-customize -q -a "$WORK_FILE" --run-command "mkdir -p /etc/docker" >/dev/null 2>&1
|
||||
virt-customize -q -a "$WORK_FILE" --run-command 'cat > /etc/docker/daemon.json << EOF
|
||||
msg_info "Configuring Docker daemon"
|
||||
# Optimize Docker daemon configuration
|
||||
virt-customize -q -a "$WORK_FILE" --run-command "mkdir -p /etc/docker" >/dev/null 2>&1
|
||||
virt-customize -q -a "$WORK_FILE" --run-command 'cat > /etc/docker/daemon.json << EOF
|
||||
{
|
||||
"storage-driver": "overlay2",
|
||||
"log-driver": "json-file",
|
||||
@@ -549,46 +514,45 @@ create_vm() {
|
||||
}
|
||||
}
|
||||
EOF' >/dev/null 2>&1
|
||||
DOCKER_PREINSTALLED="yes"
|
||||
msg_ok "Configured Docker daemon"
|
||||
else
|
||||
msg_ok "Docker will be installed on first boot"
|
||||
fi
|
||||
else
|
||||
msg_ok "Packages will be installed on first boot"
|
||||
fi
|
||||
fi
|
||||
|
||||
msg_info "Finalizing image (hostname, SSH config)"
|
||||
# Set hostname and prepare for unique machine-id
|
||||
virt-customize -q -a "$WORK_FILE" --hostname "${HN}" >/dev/null 2>&1
|
||||
virt-customize -q -a "$WORK_FILE" --run-command "truncate -s 0 /etc/machine-id" >/dev/null 2>&1
|
||||
virt-customize -q -a "$WORK_FILE" --run-command "rm -f /var/lib/dbus/machine-id" >/dev/null 2>&1
|
||||
|
||||
# Configure SSH for Cloud-Init
|
||||
if [ "$USE_CLOUD_INIT" = "yes" ]; then
|
||||
virt-customize -q -a "$WORK_FILE" --run-command "sed -i 's/^#*PermitRootLogin.*/PermitRootLogin yes/' /etc/ssh/sshd_config" >/dev/null 2>&1 || true
|
||||
virt-customize -q -a "$WORK_FILE" --run-command "sed -i 's/^#*PasswordAuthentication.*/PasswordAuthentication yes/' /etc/ssh/sshd_config" >/dev/null 2>&1 || true
|
||||
DOCKER_PREINSTALLED="yes"
|
||||
msg_ok "Configured Docker daemon"
|
||||
else
|
||||
# Configure auto-login for nocloud images (no Cloud-Init)
|
||||
virt-customize -q -a "$WORK_FILE" --run-command "mkdir -p /etc/systemd/system/serial-getty@ttyS0.service.d" >/dev/null 2>&1 || true
|
||||
virt-customize -q -a "$WORK_FILE" --run-command 'cat > /etc/systemd/system/serial-getty@ttyS0.service.d/autologin.conf << EOF
|
||||
[Service]
|
||||
ExecStart=
|
||||
ExecStart=-/sbin/agetty --autologin root --noclear %I \$TERM
|
||||
EOF' >/dev/null 2>&1 || true
|
||||
virt-customize -q -a "$WORK_FILE" --run-command "mkdir -p /etc/systemd/system/getty@tty1.service.d" >/dev/null 2>&1 || true
|
||||
virt-customize -q -a "$WORK_FILE" --run-command 'cat > /etc/systemd/system/getty@tty1.service.d/autologin.conf << EOF
|
||||
[Service]
|
||||
ExecStart=
|
||||
ExecStart=-/sbin/agetty --autologin root --noclear %I \$TERM
|
||||
EOF' >/dev/null 2>&1 || true
|
||||
msg_ok "Docker will be installed on first boot"
|
||||
fi
|
||||
msg_ok "Finalized image"
|
||||
else
|
||||
msg_ok "Packages will be installed on first boot"
|
||||
fi
|
||||
|
||||
# Create first-boot Docker install script (fallback if virt-customize failed)
|
||||
if [ "$DOCKER_PREINSTALLED" = "no" ]; then
|
||||
virt-customize -q -a "$WORK_FILE" --run-command 'cat > /root/install-docker.sh << "DOCKERSCRIPT"
|
||||
msg_info "Finalizing image (hostname, SSH config)"
|
||||
# Set hostname and prepare for unique machine-id
|
||||
virt-customize -q -a "$WORK_FILE" --hostname "${HN}" >/dev/null 2>&1
|
||||
virt-customize -q -a "$WORK_FILE" --run-command "truncate -s 0 /etc/machine-id" >/dev/null 2>&1
|
||||
virt-customize -q -a "$WORK_FILE" --run-command "rm -f /var/lib/dbus/machine-id" >/dev/null 2>&1
|
||||
|
||||
# Configure SSH for Cloud-Init
|
||||
if [ "$USE_CLOUD_INIT" = "yes" ]; then
|
||||
virt-customize -q -a "$WORK_FILE" --run-command "sed -i 's/^#*PermitRootLogin.*/PermitRootLogin yes/' /etc/ssh/sshd_config" >/dev/null 2>&1 || true
|
||||
virt-customize -q -a "$WORK_FILE" --run-command "sed -i 's/^#*PasswordAuthentication.*/PasswordAuthentication yes/' /etc/ssh/sshd_config" >/dev/null 2>&1 || true
|
||||
else
|
||||
# Configure auto-login for nocloud images (no Cloud-Init)
|
||||
virt-customize -q -a "$WORK_FILE" --run-command "mkdir -p /etc/systemd/system/serial-getty@ttyS0.service.d" >/dev/null 2>&1 || true
|
||||
virt-customize -q -a "$WORK_FILE" --run-command 'cat > /etc/systemd/system/serial-getty@ttyS0.service.d/autologin.conf << EOF
|
||||
[Service]
|
||||
ExecStart=
|
||||
ExecStart=-/sbin/agetty --autologin root --noclear %I \$TERM
|
||||
EOF' >/dev/null 2>&1 || true
|
||||
virt-customize -q -a "$WORK_FILE" --run-command "mkdir -p /etc/systemd/system/getty@tty1.service.d" >/dev/null 2>&1 || true
|
||||
virt-customize -q -a "$WORK_FILE" --run-command 'cat > /etc/systemd/system/getty@tty1.service.d/autologin.conf << EOF
|
||||
[Service]
|
||||
ExecStart=
|
||||
ExecStart=-/sbin/agetty --autologin root --noclear %I \$TERM
|
||||
EOF' >/dev/null 2>&1 || true
|
||||
fi
|
||||
msg_ok "Finalized image"
|
||||
|
||||
# Create first-boot Docker install script (fallback if virt-customize failed)
|
||||
if [ "$DOCKER_PREINSTALLED" = "no" ]; then
|
||||
virt-customize -q -a "$WORK_FILE" --run-command 'cat > /root/install-docker.sh << "DOCKERSCRIPT"
|
||||
#!/bin/bash
|
||||
exec > /var/log/install-docker.log 2>&1
|
||||
echo "[$(date)] Starting Docker installation"
|
||||
@@ -619,7 +583,7 @@ echo "[$(date)] Docker installation completed"
|
||||
DOCKERSCRIPT
|
||||
chmod +x /root/install-docker.sh' >/dev/null 2>&1
|
||||
|
||||
virt-customize -q -a "$WORK_FILE" --run-command 'cat > /etc/systemd/system/install-docker.service << "DOCKERSERVICE"
|
||||
virt-customize -q -a "$WORK_FILE" --run-command 'cat > /etc/systemd/system/install-docker.service << "DOCKERSERVICE"
|
||||
[Unit]
|
||||
Description=Install Docker on First Boot
|
||||
After=network-online.target
|
||||
@@ -635,123 +599,113 @@ RemainAfterExit=yes
|
||||
WantedBy=multi-user.target
|
||||
DOCKERSERVICE
|
||||
systemctl enable install-docker.service' >/dev/null 2>&1
|
||||
fi
|
||||
fi
|
||||
|
||||
# Resize disk to target size
|
||||
msg_info "Resizing disk image to ${DISK_SIZE}"
|
||||
qemu-img resize "$WORK_FILE" "${DISK_SIZE}" >/dev/null 2>&1
|
||||
msg_ok "Resized disk image"
|
||||
|
||||
# ==============================================================================
|
||||
# VM CREATION
|
||||
# ==============================================================================
|
||||
msg_info "Creating Docker VM shell"
|
||||
|
||||
qm create $VMID -agent 1${MACHINE} -tablet 0 -localtime 1 -bios ovmf${CPU_TYPE} -cores $CORE_COUNT -memory $RAM_SIZE \
|
||||
-name $HN -tags community-script -net0 virtio,bridge=$BRG,macaddr=$MAC$VLAN$MTU -onboot 1 -ostype l26 -scsihw virtio-scsi-pci 2>>"$VM_ERROR_LOG" >/dev/null
|
||||
|
||||
msg_ok "Created VM shell"
|
||||
|
||||
# ==============================================================================
|
||||
# DISK IMPORT
|
||||
# ==============================================================================
|
||||
msg_info "Importing disk into storage ($STORAGE)"
|
||||
|
||||
if qm disk import --help >/dev/null 2>&1; then
|
||||
IMPORT_CMD=(qm disk import)
|
||||
else
|
||||
IMPORT_CMD=(qm importdisk)
|
||||
fi
|
||||
|
||||
IMPORT_OUT="$("${IMPORT_CMD[@]}" "$VMID" "$WORK_FILE" "$STORAGE" ${DISK_IMPORT:-} 2> >(tee -a "$VM_ERROR_LOG") || true)"
|
||||
DISK_REF_IMPORTED="$(printf '%s\n' "$IMPORT_OUT" | sed -n "s/.*successfully imported disk '\([^']\+\)'.*/\1/p" | tr -d "\r\"'")"
|
||||
[[ -z "$DISK_REF_IMPORTED" ]] && DISK_REF_IMPORTED="$(pvesm list "$STORAGE" | awk -v id="$VMID" '$5 ~ ("vm-"id"-disk-") {print $1":"$5}' | sort | tail -n1)"
|
||||
[[ -z "$DISK_REF_IMPORTED" ]] && {
|
||||
msg_error "Unable to determine imported disk reference."
|
||||
echo "$IMPORT_OUT"
|
||||
exit 1
|
||||
}
|
||||
|
||||
msg_ok "Imported disk (${CL}${BL}${DISK_REF_IMPORTED}${CL})"
|
||||
|
||||
# Clean up work file
|
||||
rm -f "$WORK_FILE"
|
||||
|
||||
# ==============================================================================
|
||||
# VM CONFIGURATION
|
||||
# ==============================================================================
|
||||
msg_info "Attaching EFI and root disk"
|
||||
|
||||
qm set "$VMID" \
|
||||
--efidisk0 "${STORAGE}:0,efitype=4m" \
|
||||
--scsi0 "${DISK_REF_IMPORTED},${DISK_CACHE}${THIN%,}" \
|
||||
--boot order=scsi0 \
|
||||
--serial0 socket >/dev/null
|
||||
|
||||
qm set $VMID --agent enabled=1 >/dev/null
|
||||
|
||||
msg_ok "Attached EFI and root disk"
|
||||
|
||||
# Set VM description
|
||||
set_description
|
||||
|
||||
# Cloud-Init configuration
|
||||
if [ "$USE_CLOUD_INIT" = "yes" ]; then
|
||||
msg_info "Configuring Cloud-Init"
|
||||
setup_cloud_init "$VMID" "$STORAGE" "$HN" "yes"
|
||||
msg_ok "Cloud-Init configured"
|
||||
fi
|
||||
|
||||
# Start VM
|
||||
if [ "$START_VM" == "yes" ]; then
|
||||
msg_info "Starting Docker VM"
|
||||
qm start $VMID >/dev/null 2>&1
|
||||
msg_ok "Started Docker VM"
|
||||
fi
|
||||
|
||||
# ==============================================================================
|
||||
# FINAL OUTPUT
|
||||
# ==============================================================================
|
||||
VM_IP=""
|
||||
if [ "$START_VM" == "yes" ]; then
|
||||
set +e
|
||||
for i in {1..10}; do
|
||||
VM_IP=$(qm guest cmd "$VMID" network-get-interfaces 2>/dev/null |
|
||||
jq -r '.[] | select(.name != "lo") | ."ip-addresses"[]? | select(."ip-address-type" == "ipv4") | ."ip-address"' 2>/dev/null |
|
||||
grep -v "^127\." | head -1) || true
|
||||
[ -n "$VM_IP" ] && break
|
||||
sleep 3
|
||||
done
|
||||
set -e
|
||||
fi
|
||||
|
||||
echo -e "\n${INFO}${BOLD}${GN}Docker VM Configuration Summary:${CL}"
|
||||
echo -e "${TAB}${DGN}VM ID: ${BGN}${VMID}${CL}"
|
||||
echo -e "${TAB}${DGN}Hostname: ${BGN}${HN}${CL}"
|
||||
echo -e "${TAB}${DGN}OS: ${BGN}${OS_DISPLAY}${CL}"
|
||||
[ -n "$VM_IP" ] && echo -e "${TAB}${DGN}IP Address: ${BGN}${VM_IP}${CL}"
|
||||
|
||||
if [ "$DOCKER_PREINSTALLED" = "yes" ]; then
|
||||
echo -e "${TAB}${DGN}Docker: ${BGN}Pre-installed (via get.docker.com)${CL}"
|
||||
else
|
||||
echo -e "${TAB}${DGN}Docker: ${BGN}Installing on first boot${CL}"
|
||||
echo -e "${TAB}${YW}⚠️ Wait 2-3 minutes for installation to complete${CL}"
|
||||
echo -e "${TAB}${YW}⚠️ Check progress: ${BL}cat /var/log/install-docker.log${CL}"
|
||||
fi
|
||||
|
||||
if [ "$USE_CLOUD_INIT" = "yes" ]; then
|
||||
display_cloud_init_info "$VMID" "$HN" 2>/dev/null || true
|
||||
fi
|
||||
|
||||
post_update_to_api "done" "none"
|
||||
msg_ok "Completed successfully!\n"
|
||||
|
||||
} # end of create_vm()
|
||||
# Resize disk to target size
|
||||
msg_info "Resizing disk image to ${DISK_SIZE}"
|
||||
qemu-img resize "$WORK_FILE" "${DISK_SIZE}" >/dev/null 2>&1
|
||||
msg_ok "Resized disk image"
|
||||
|
||||
# ==============================================================================
|
||||
# VM CREATION WITH SMART RECOVERY
|
||||
# VM CREATION
|
||||
# ==============================================================================
|
||||
VM_CREATION_PHASE="yes"
|
||||
create_vm
|
||||
VM_CREATION_PHASE="no"
|
||||
rm -f "$VM_ERROR_LOG" 2>/dev/null || true
|
||||
msg_info "Creating Docker VM shell"
|
||||
|
||||
qm create $VMID -agent 1${MACHINE} -tablet 0 -localtime 1 -bios ovmf${CPU_TYPE} -cores $CORE_COUNT -memory $RAM_SIZE \
|
||||
-name $HN -tags community-script -net0 virtio,bridge=$BRG,macaddr=$MAC$VLAN$MTU -onboot 1 -ostype l26 -scsihw virtio-scsi-pci >/dev/null
|
||||
|
||||
msg_ok "Created VM shell"
|
||||
|
||||
# ==============================================================================
|
||||
# DISK IMPORT
|
||||
# ==============================================================================
|
||||
msg_info "Importing disk into storage ($STORAGE)"
|
||||
|
||||
if qm disk import --help >/dev/null 2>&1; then
|
||||
IMPORT_CMD=(qm disk import)
|
||||
else
|
||||
IMPORT_CMD=(qm importdisk)
|
||||
fi
|
||||
|
||||
IMPORT_OUT="$("${IMPORT_CMD[@]}" "$VMID" "$WORK_FILE" "$STORAGE" ${DISK_IMPORT:-} 2>&1 || true)"
|
||||
DISK_REF_IMPORTED="$(printf '%s\n' "$IMPORT_OUT" | sed -n "s/.*successfully imported disk '\([^']\+\)'.*/\1/p" | tr -d "\r\"'")"
|
||||
[[ -z "$DISK_REF_IMPORTED" ]] && DISK_REF_IMPORTED="$(pvesm list "$STORAGE" | awk -v id="$VMID" '$5 ~ ("vm-"id"-disk-") {print $1":"$5}' | sort | tail -n1)"
|
||||
[[ -z "$DISK_REF_IMPORTED" ]] && {
|
||||
msg_error "Unable to determine imported disk reference."
|
||||
echo "$IMPORT_OUT"
|
||||
exit 1
|
||||
}
|
||||
|
||||
msg_ok "Imported disk (${CL}${BL}${DISK_REF_IMPORTED}${CL})"
|
||||
|
||||
# Clean up work file
|
||||
rm -f "$WORK_FILE"
|
||||
|
||||
# ==============================================================================
|
||||
# VM CONFIGURATION
|
||||
# ==============================================================================
|
||||
msg_info "Attaching EFI and root disk"
|
||||
|
||||
qm set "$VMID" \
|
||||
--efidisk0 "${STORAGE}:0,efitype=4m" \
|
||||
--scsi0 "${DISK_REF_IMPORTED},${DISK_CACHE}${THIN%,}" \
|
||||
--boot order=scsi0 \
|
||||
--serial0 socket >/dev/null
|
||||
|
||||
qm set $VMID --agent enabled=1 >/dev/null
|
||||
|
||||
msg_ok "Attached EFI and root disk"
|
||||
|
||||
# Set VM description
|
||||
set_description
|
||||
|
||||
# Cloud-Init configuration
|
||||
if [ "$USE_CLOUD_INIT" = "yes" ]; then
|
||||
msg_info "Configuring Cloud-Init"
|
||||
setup_cloud_init "$VMID" "$STORAGE" "$HN" "yes"
|
||||
msg_ok "Cloud-Init configured"
|
||||
fi
|
||||
|
||||
# Start VM
|
||||
if [ "$START_VM" == "yes" ]; then
|
||||
msg_info "Starting Docker VM"
|
||||
qm start $VMID >/dev/null 2>&1
|
||||
msg_ok "Started Docker VM"
|
||||
fi
|
||||
|
||||
# ==============================================================================
|
||||
# FINAL OUTPUT
|
||||
# ==============================================================================
|
||||
VM_IP=""
|
||||
if [ "$START_VM" == "yes" ]; then
|
||||
set +e
|
||||
for i in {1..10}; do
|
||||
VM_IP=$(qm guest cmd "$VMID" network-get-interfaces 2>/dev/null |
|
||||
jq -r '.[] | select(.name != "lo") | ."ip-addresses"[]? | select(."ip-address-type" == "ipv4") | ."ip-address"' 2>/dev/null |
|
||||
grep -v "^127\." | head -1) || true
|
||||
[ -n "$VM_IP" ] && break
|
||||
sleep 3
|
||||
done
|
||||
set -e
|
||||
fi
|
||||
|
||||
echo -e "\n${INFO}${BOLD}${GN}Docker VM Configuration Summary:${CL}"
|
||||
echo -e "${TAB}${DGN}VM ID: ${BGN}${VMID}${CL}"
|
||||
echo -e "${TAB}${DGN}Hostname: ${BGN}${HN}${CL}"
|
||||
echo -e "${TAB}${DGN}OS: ${BGN}${OS_DISPLAY}${CL}"
|
||||
[ -n "$VM_IP" ] && echo -e "${TAB}${DGN}IP Address: ${BGN}${VM_IP}${CL}"
|
||||
|
||||
if [ "$DOCKER_PREINSTALLED" = "yes" ]; then
|
||||
echo -e "${TAB}${DGN}Docker: ${BGN}Pre-installed (via get.docker.com)${CL}"
|
||||
else
|
||||
echo -e "${TAB}${DGN}Docker: ${BGN}Installing on first boot${CL}"
|
||||
echo -e "${TAB}${YW}⚠️ Wait 2-3 minutes for installation to complete${CL}"
|
||||
echo -e "${TAB}${YW}⚠️ Check progress: ${BL}cat /var/log/install-docker.log${CL}"
|
||||
fi
|
||||
|
||||
if [ "$USE_CLOUD_INIT" = "yes" ]; then
|
||||
display_cloud_init_info "$VMID" "$HN" 2>/dev/null || true
|
||||
fi
|
||||
|
||||
post_update_to_api "done" "none"
|
||||
msg_ok "Completed successfully!\n"
|
||||
|
||||
190
vm/haos-vm.sh
190
vm/haos-vm.sh
@@ -69,65 +69,13 @@ trap 'error_handler $LINENO "$BASH_COMMAND"' ERR
|
||||
trap cleanup EXIT
|
||||
trap 'post_update_to_api "failed" "INTERRUPTED"' SIGINT
|
||||
trap 'post_update_to_api "failed" "TERMINATED"' SIGTERM
|
||||
|
||||
# Smart recovery state
|
||||
VM_CREATION_PHASE="no"
|
||||
VM_RECOVERY_ATTEMPT=0
|
||||
VM_MAX_RETRIES=2
|
||||
|
||||
function error_handler() {
|
||||
local exit_code="$?"
|
||||
local line_number="$1"
|
||||
local command="$2"
|
||||
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
|
||||
echo -e "\n$error_message\n"
|
||||
|
||||
# During VM creation phase: offer recovery menu instead of immediate cleanup
|
||||
if [[ "$VM_CREATION_PHASE" == "yes" && $VM_RECOVERY_ATTEMPT -lt $VM_MAX_RETRIES ]]; then
|
||||
((VM_RECOVERY_ATTEMPT++))
|
||||
trap - ERR
|
||||
set +e
|
||||
|
||||
local choice
|
||||
choice=$(whiptail --backtitle "Proxmox VE Helper Scripts" --title "VM CREATION FAILED" \
|
||||
--radiolist "Exit code: ${exit_code} | Attempt: ${VM_RECOVERY_ATTEMPT}/${VM_MAX_RETRIES}\nFailed command: ${command}\n\nChoose a recovery action:" 16 72 4 \
|
||||
"RETRY" "Retry VM creation" "ON" \
|
||||
"RETRY_DOWNLOAD" "Retry with fresh download (clear cache)" "OFF" \
|
||||
"KEEP" "Keep partial VM for debugging" "OFF" \
|
||||
"ABORT" "Destroy VM and exit" "OFF" \
|
||||
3>&1 1>&2 2>&3) || choice="ABORT"
|
||||
|
||||
case "$choice" in
|
||||
RETRY | RETRY_DOWNLOAD)
|
||||
msg_info "Cleaning up failed VM ${VMID} for retry"
|
||||
cleanup_vmid 2>/dev/null || true
|
||||
if [[ "$choice" == "RETRY_DOWNLOAD" && -n "${CACHE_FILE:-}" ]]; then
|
||||
rm -f "$CACHE_FILE"
|
||||
msg_ok "Cleared cached image"
|
||||
fi
|
||||
msg_ok "Ready for retry (attempt $((VM_RECOVERY_ATTEMPT + 1))/${VM_MAX_RETRIES})"
|
||||
set -e
|
||||
trap 'error_handler $LINENO "$BASH_COMMAND"' ERR
|
||||
create_vm
|
||||
exit $?
|
||||
;;
|
||||
KEEP)
|
||||
echo -e "\n${YW} Keeping partial VM ${VMID} for debugging${CL}"
|
||||
echo -e " Inspect: qm config ${VMID}"
|
||||
echo -e " Remove: qm destroy ${VMID} --destroy-unreferenced-disks --purge\n"
|
||||
post_update_to_api "failed" "$exit_code"
|
||||
exit "$exit_code"
|
||||
;;
|
||||
*)
|
||||
post_update_to_api "failed" "$exit_code"
|
||||
cleanup_vmid
|
||||
exit "$exit_code"
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
|
||||
# Default: no recovery (max retries exceeded or outside creation phase)
|
||||
post_update_to_api "failed" "${exit_code}"
|
||||
echo -e "\n$error_message\n"
|
||||
cleanup_vmid
|
||||
}
|
||||
|
||||
@@ -606,65 +554,64 @@ fi
|
||||
msg_ok "Using ${CL}${BL}$STORAGE${CL} ${GN}for Storage Location."
|
||||
msg_ok "Virtual Machine ID is ${CL}${BL}$VMID${CL}."
|
||||
|
||||
create_vm() {
|
||||
var_version="${BRANCH}"
|
||||
msg_info "Retrieving the URL for Home Assistant ${BRANCH} Disk Image"
|
||||
if [ "$BRANCH" == "$dev" ]; then
|
||||
URL="https://os-artifacts.home-assistant.io/${BRANCH}/haos_ova-${BRANCH}.qcow2.xz"
|
||||
else
|
||||
URL="https://github.com/home-assistant/operating-system/releases/download/${BRANCH}/haos_ova-${BRANCH}.qcow2.xz"
|
||||
fi
|
||||
var_version="${BRANCH}"
|
||||
msg_info "Retrieving the URL for Home Assistant ${BRANCH} Disk Image"
|
||||
if [ "$BRANCH" == "$dev" ]; then
|
||||
URL="https://os-artifacts.home-assistant.io/${BRANCH}/haos_ova-${BRANCH}.qcow2.xz"
|
||||
else
|
||||
URL="https://github.com/home-assistant/operating-system/releases/download/${BRANCH}/haos_ova-${BRANCH}.qcow2.xz"
|
||||
fi
|
||||
|
||||
CACHE_DIR="/var/lib/vz/template/cache"
|
||||
CACHE_FILE="$CACHE_DIR/$(basename "$URL")"
|
||||
FILE_IMG="/var/lib/vz/template/tmp/${CACHE_FILE##*/%.xz}" # .qcow2
|
||||
CACHE_DIR="/var/lib/vz/template/cache"
|
||||
CACHE_FILE="$CACHE_DIR/$(basename "$URL")"
|
||||
FILE_IMG="/var/lib/vz/template/tmp/${CACHE_FILE##*/%.xz}" # .qcow2
|
||||
|
||||
mkdir -p "$CACHE_DIR" "$(dirname "$FILE_IMG")"
|
||||
msg_ok "${CL}${BL}${URL}${CL}"
|
||||
mkdir -p "$CACHE_DIR" "$(dirname "$FILE_IMG")"
|
||||
msg_ok "${CL}${BL}${URL}${CL}"
|
||||
|
||||
download_and_validate_xz "$URL" "$CACHE_FILE"
|
||||
download_and_validate_xz "$URL" "$CACHE_FILE"
|
||||
|
||||
msg_info "Creating Home Assistant OS VM shell"
|
||||
qm create $VMID -machine q35 -bios ovmf -agent 1 -tablet 0 -localtime 1 ${CPU_TYPE} \
|
||||
-cores "$CORE_COUNT" -memory "$RAM_SIZE" -name "$HN" -tags community-script \
|
||||
-net0 "virtio,bridge=$BRG,macaddr=$MAC$VLAN$MTU" -onboot 1 -ostype l26 -scsihw virtio-scsi-pci >/dev/null
|
||||
msg_ok "Created VM shell"
|
||||
msg_info "Creating Home Assistant OS VM shell"
|
||||
qm create $VMID -machine q35 -bios ovmf -agent 1 -tablet 0 -localtime 1 ${CPU_TYPE} \
|
||||
-cores "$CORE_COUNT" -memory "$RAM_SIZE" -name "$HN" -tags community-script \
|
||||
-net0 "virtio,bridge=$BRG,macaddr=$MAC$VLAN$MTU" -onboot 1 -ostype l26 -scsihw virtio-scsi-pci >/dev/null
|
||||
msg_ok "Created VM shell"
|
||||
|
||||
extract_xz_with_pv "$CACHE_FILE" "$FILE_IMG"
|
||||
extract_xz_with_pv "$CACHE_FILE" "$FILE_IMG"
|
||||
|
||||
msg_info "Importing disk into storage ($STORAGE)"
|
||||
if qm disk import --help >/dev/null 2>&1; then
|
||||
IMPORT_CMD=(qm disk import)
|
||||
else
|
||||
IMPORT_CMD=(qm importdisk)
|
||||
fi
|
||||
IMPORT_OUT="$("${IMPORT_CMD[@]}" "$VMID" "$FILE_IMG" "$STORAGE" --format raw 2>&1 || true)"
|
||||
DISK_REF="$(printf '%s\n' "$IMPORT_OUT" | sed -n "s/.*successfully imported disk '\([^']\+\)'.*/\1/p" | tr -d "\r\"'")"
|
||||
[[ -z "$DISK_REF" ]] && DISK_REF="$(pvesm list "$STORAGE" | awk -v id="$VMID" '$5 ~ ("vm-"id"-disk-") {print $1":"$5}' | sort | tail -n1)"
|
||||
[[ -z "$DISK_REF" ]] && {
|
||||
msg_error "Unable to determine imported disk reference."
|
||||
echo "$IMPORT_OUT"
|
||||
exit 1
|
||||
}
|
||||
msg_ok "Imported disk (${CL}${BL}${DISK_REF}${CL})"
|
||||
msg_info "Importing disk into storage ($STORAGE)"
|
||||
if qm disk import --help >/dev/null 2>&1; then
|
||||
IMPORT_CMD=(qm disk import)
|
||||
else
|
||||
IMPORT_CMD=(qm importdisk)
|
||||
fi
|
||||
IMPORT_OUT="$("${IMPORT_CMD[@]}" "$VMID" "$FILE_IMG" "$STORAGE" --format raw 2>&1 || true)"
|
||||
DISK_REF="$(printf '%s\n' "$IMPORT_OUT" | sed -n "s/.*successfully imported disk '\([^']\+\)'.*/\1/p" | tr -d "\r\"'")"
|
||||
[[ -z "$DISK_REF" ]] && DISK_REF="$(pvesm list "$STORAGE" | awk -v id="$VMID" '$5 ~ ("vm-"id"-disk-") {print $1":"$5}' | sort | tail -n1)"
|
||||
[[ -z "$DISK_REF" ]] && {
|
||||
msg_error "Unable to determine imported disk reference."
|
||||
echo "$IMPORT_OUT"
|
||||
exit 1
|
||||
}
|
||||
msg_ok "Imported disk (${CL}${BL}${DISK_REF}${CL})"
|
||||
|
||||
rm -f "$FILE_IMG"
|
||||
rm -f "$FILE_IMG"
|
||||
|
||||
msg_info "Attaching EFI and root disk"
|
||||
qm set $VMID \
|
||||
--efidisk0 ${STORAGE}:0,efitype=4m \
|
||||
--scsi0 ${DISK_REF},ssd=1,discard=on \
|
||||
--boot order=scsi0 \
|
||||
--serial0 socket >/dev/null
|
||||
qm set $VMID --agent enabled=1 >/dev/null
|
||||
msg_ok "Attached EFI and root disk"
|
||||
msg_info "Attaching EFI and root disk"
|
||||
qm set $VMID \
|
||||
--efidisk0 ${STORAGE}:0,efitype=4m \
|
||||
--scsi0 ${DISK_REF},ssd=1,discard=on \
|
||||
--boot order=scsi0 \
|
||||
--serial0 socket >/dev/null
|
||||
qm set $VMID --agent enabled=1 >/dev/null
|
||||
msg_ok "Attached EFI and root disk"
|
||||
|
||||
msg_info "Resizing disk to $DISK_SIZE"
|
||||
qm resize $VMID scsi0 ${DISK_SIZE} >/dev/null
|
||||
msg_ok "Resized disk"
|
||||
msg_info "Resizing disk to $DISK_SIZE"
|
||||
qm resize $VMID scsi0 ${DISK_SIZE} >/dev/null
|
||||
msg_ok "Resized disk"
|
||||
|
||||
DESCRIPTION=$(
|
||||
cat <<EOF
|
||||
DESCRIPTION=$(
|
||||
cat <<EOF
|
||||
<div align='center'>
|
||||
<a href='https://Helper-Scripts.com' target='_blank' rel='noopener noreferrer'>
|
||||
<img src='https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/images/logo-81x112.png' alt='Logo' style='width:81px;height:112px;'/>
|
||||
@@ -692,27 +639,22 @@ create_vm() {
|
||||
</span>
|
||||
</div>
|
||||
EOF
|
||||
)
|
||||
qm set $VMID -description "$DESCRIPTION" >/dev/null
|
||||
msg_ok "Created Homeassistant OS VM ${CL}${BL}(${HN})"
|
||||
)
|
||||
qm set $VMID -description "$DESCRIPTION" >/dev/null
|
||||
msg_ok "Created Homeassistant OS VM ${CL}${BL}(${HN})"
|
||||
|
||||
if whiptail --backtitle "Proxmox VE Helper Scripts" --title "Image Cache" \
|
||||
--yesno "Keep downloaded Home Assistant OS image for future VMs?\n\nFile: $CACHE_FILE" 10 70; then
|
||||
msg_ok "Keeping cached image"
|
||||
else
|
||||
rm -f "$CACHE_FILE"
|
||||
msg_ok "Deleted cached image"
|
||||
fi
|
||||
if whiptail --backtitle "Proxmox VE Helper Scripts" --title "Image Cache" \
|
||||
--yesno "Keep downloaded Home Assistant OS image for future VMs?\n\nFile: $CACHE_FILE" 10 70; then
|
||||
msg_ok "Keeping cached image"
|
||||
else
|
||||
rm -f "$CACHE_FILE"
|
||||
msg_ok "Deleted cached image"
|
||||
fi
|
||||
|
||||
if [ "$START_VM" == "yes" ]; then
|
||||
msg_info "Starting Home Assistant OS VM"
|
||||
qm start $VMID
|
||||
msg_ok "Started Home Assistant OS VM"
|
||||
fi
|
||||
post_update_to_api "done" "none"
|
||||
msg_ok "Completed successfully!\n"
|
||||
} # end create_vm
|
||||
|
||||
VM_CREATION_PHASE="yes"
|
||||
create_vm
|
||||
VM_CREATION_PHASE="no"
|
||||
if [ "$START_VM" == "yes" ]; then
|
||||
msg_info "Starting Home Assistant OS VM"
|
||||
qm start $VMID
|
||||
msg_ok "Started Home Assistant OS VM"
|
||||
fi
|
||||
post_update_to_api "done" "none"
|
||||
msg_ok "Completed successfully!\n"
|
||||
|
||||
249
vm/openwrt-vm.sh
249
vm/openwrt-vm.sh
@@ -70,61 +70,13 @@ trap 'error_handler $LINENO "$BASH_COMMAND"' ERR
|
||||
trap cleanup EXIT
|
||||
trap 'post_update_to_api "failed" "INTERRUPTED"' SIGINT
|
||||
trap 'post_update_to_api "failed" "TERMINATED"' SIGTERM
|
||||
|
||||
# Smart recovery state
|
||||
VM_CREATION_PHASE="no"
|
||||
VM_RECOVERY_ATTEMPT=0
|
||||
VM_MAX_RETRIES=2
|
||||
|
||||
function error_handler() {
|
||||
local exit_code="$?"
|
||||
local line_number="$1"
|
||||
local command="$2"
|
||||
post_update_to_api "failed" "$exit_code"
|
||||
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
|
||||
echo -e "\n$error_message\n"
|
||||
|
||||
# During VM creation phase: offer recovery menu instead of immediate cleanup
|
||||
if [[ "$VM_CREATION_PHASE" == "yes" && $VM_RECOVERY_ATTEMPT -lt $VM_MAX_RETRIES ]]; then
|
||||
((VM_RECOVERY_ATTEMPT++))
|
||||
trap - ERR
|
||||
set +e
|
||||
set +o pipefail
|
||||
|
||||
local choice
|
||||
choice=$(whiptail --backtitle "Proxmox VE Helper Scripts" --title "VM CREATION FAILED" \
|
||||
--radiolist "Exit code: ${exit_code} | Attempt: ${VM_RECOVERY_ATTEMPT}/${VM_MAX_RETRIES}\nFailed command: ${command}\n\nChoose a recovery action:" 16 72 3 \
|
||||
"RETRY" "Retry VM creation" "ON" \
|
||||
"KEEP" "Keep partial VM for debugging" "OFF" \
|
||||
"ABORT" "Destroy VM and exit" "OFF" \
|
||||
3>&1 1>&2 2>&3) || choice="ABORT"
|
||||
|
||||
case "$choice" in
|
||||
RETRY)
|
||||
msg_info "Cleaning up failed VM ${VMID} for retry"
|
||||
cleanup_vmid 2>/dev/null || true
|
||||
msg_ok "Ready for retry (attempt $((VM_RECOVERY_ATTEMPT + 1))/${VM_MAX_RETRIES})"
|
||||
set -Eeo pipefail
|
||||
trap 'error_handler $LINENO "$BASH_COMMAND"' ERR
|
||||
create_vm
|
||||
exit $?
|
||||
;;
|
||||
KEEP)
|
||||
echo -e "\n${YW} Keeping partial VM ${VMID} for debugging${CL}"
|
||||
echo -e " Inspect: qm config ${VMID}"
|
||||
echo -e " Remove: qm destroy ${VMID} --destroy-unreferenced-disks --purge\n"
|
||||
post_update_to_api "failed" "$exit_code"
|
||||
exit "$exit_code"
|
||||
;;
|
||||
*)
|
||||
post_update_to_api "failed" "$exit_code"
|
||||
cleanup_vmid
|
||||
exit "$exit_code"
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
|
||||
# Default: no recovery (max retries exceeded or outside creation phase)
|
||||
post_update_to_api "failed" "$exit_code"
|
||||
cleanup_vmid
|
||||
}
|
||||
|
||||
@@ -568,59 +520,57 @@ else
|
||||
fi
|
||||
msg_ok "Using ${CL}${BL}$STORAGE${CL} ${GN}for Storage Location."
|
||||
msg_ok "Virtual Machine ID is ${CL}${BL}$VMID${CL}."
|
||||
msg_info "Getting URL for OpenWrt Disk Image"
|
||||
|
||||
create_vm() {
|
||||
msg_info "Getting URL for OpenWrt Disk Image"
|
||||
response=$(curl -fsSL https://openwrt.org)
|
||||
stableversion=$(echo "$response" | sed -n 's/.*Current stable release - OpenWrt \([0-9.]\+\).*/\1/p' | head -n 1)
|
||||
URL="https://downloads.openwrt.org/releases/$stableversion/targets/x86/64/openwrt-$stableversion-x86-64-generic-ext4-combined.img.gz"
|
||||
|
||||
response=$(curl -fsSL https://openwrt.org)
|
||||
stableversion=$(echo "$response" | sed -n 's/.*Current stable release - OpenWrt \([0-9.]\+\).*/\1/p' | head -n 1)
|
||||
URL="https://downloads.openwrt.org/releases/$stableversion/targets/x86/64/openwrt-$stableversion-x86-64-generic-ext4-combined.img.gz"
|
||||
msg_ok "${CL}${BL}${URL}${CL}"
|
||||
curl -f#SL -o "$(basename "$URL")" "$URL"
|
||||
FILE=$(basename "$URL")
|
||||
msg_ok "Downloaded ${CL}${BL}$FILE${CL}"
|
||||
|
||||
msg_ok "${CL}${BL}${URL}${CL}"
|
||||
curl -f#SL -o "$(basename "$URL")" "$URL"
|
||||
FILE=$(basename "$URL")
|
||||
msg_ok "Downloaded ${CL}${BL}$FILE${CL}"
|
||||
gunzip -f "$FILE" >/dev/null 2>&1 || true
|
||||
FILE="${FILE%.*}"
|
||||
msg_ok "Extracted OpenWrt Disk Image ${CL}${BL}$FILE${CL}"
|
||||
|
||||
gunzip -f "$FILE" >/dev/null 2>&1 || true
|
||||
FILE="${FILE%.*}"
|
||||
msg_ok "Extracted OpenWrt Disk Image ${CL}${BL}$FILE${CL}"
|
||||
msg_info "Creating OpenWrt VM"
|
||||
qm create $VMID -cores $CORE_COUNT -memory $RAM_SIZE -name $HN \
|
||||
-onboot 1 -ostype l26 -scsihw virtio-scsi-pci --tablet 0
|
||||
if [[ "$(pvesm status | awk -v s=$STORAGE '$1==s {print $2}')" == "dir" ]]; then
|
||||
qm set $VMID -efidisk0 ${STORAGE}:0,efitype=4m,size=4M
|
||||
else
|
||||
pvesm alloc $STORAGE $VMID vm-$VMID-disk-0 4M >/dev/null
|
||||
qm set $VMID -efidisk0 ${STORAGE}:vm-$VMID-disk-0,efitype=4m,size=4M
|
||||
fi
|
||||
|
||||
msg_info "Creating OpenWrt VM"
|
||||
qm create $VMID -cores $CORE_COUNT -memory $RAM_SIZE -name $HN \
|
||||
-onboot 1 -ostype l26 -scsihw virtio-scsi-pci --tablet 0
|
||||
if [[ "$(pvesm status | awk -v s=$STORAGE '$1==s {print $2}')" == "dir" ]]; then
|
||||
qm set $VMID -efidisk0 ${STORAGE}:0,efitype=4m,size=4M
|
||||
else
|
||||
pvesm alloc $STORAGE $VMID vm-$VMID-disk-0 4M >/dev/null
|
||||
qm set $VMID -efidisk0 ${STORAGE}:vm-$VMID-disk-0,efitype=4m,size=4M
|
||||
fi
|
||||
IMPORT_OUT="$(qm importdisk $VMID $FILE $STORAGE --format raw 2>&1 || true)"
|
||||
DISK_REF="$(printf '%s\n' "$IMPORT_OUT" | sed -n "s/.*successfully imported disk '\([^']\+\)'.*/\1/p")"
|
||||
|
||||
IMPORT_OUT="$(qm importdisk $VMID $FILE $STORAGE --format raw 2>&1 || true)"
|
||||
DISK_REF="$(printf '%s\n' "$IMPORT_OUT" | sed -n "s/.*successfully imported disk '\([^']\+\)'.*/\1/p")"
|
||||
if [[ -z "$DISK_REF" ]]; then
|
||||
DISK_REF="$(pvesm list "$STORAGE" | awk -v id="$VMID" '$1 ~ ("vm-"id"-disk-") {print $1}' | sort | tail -n1)"
|
||||
fi
|
||||
|
||||
if [[ -z "$DISK_REF" ]]; then
|
||||
DISK_REF="$(pvesm list "$STORAGE" | awk -v id="$VMID" '$1 ~ ("vm-"id"-disk-") {print $1}' | sort | tail -n1)"
|
||||
fi
|
||||
if [[ -z "$DISK_REF" ]]; then
|
||||
msg_error "Unable to determine imported disk reference."
|
||||
echo "$IMPORT_OUT"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ -z "$DISK_REF" ]]; then
|
||||
msg_error "Unable to determine imported disk reference."
|
||||
echo "$IMPORT_OUT"
|
||||
exit 1
|
||||
fi
|
||||
qm set $VMID \
|
||||
-efidisk0 ${STORAGE}:0,efitype=4m,size=4M \
|
||||
-scsi0 ${DISK_REF} \
|
||||
-boot order=scsi0 \
|
||||
-tags community-script >/dev/null
|
||||
msg_ok "Attached disk"
|
||||
|
||||
qm set $VMID \
|
||||
-efidisk0 ${STORAGE}:0,efitype=4m,size=4M \
|
||||
-scsi0 ${DISK_REF} \
|
||||
-boot order=scsi0 \
|
||||
-tags community-script >/dev/null
|
||||
msg_ok "Attached disk"
|
||||
msg_info "Resizing disk to ${DISK_SIZE}"
|
||||
qm disk resize "$VMID" scsi0 "${DISK_SIZE}" >/dev/null
|
||||
msg_ok "Resized disk to ${DISK_SIZE}"
|
||||
|
||||
msg_info "Resizing disk to ${DISK_SIZE}"
|
||||
qm disk resize "$VMID" scsi0 "${DISK_SIZE}" >/dev/null
|
||||
msg_ok "Resized disk to ${DISK_SIZE}"
|
||||
|
||||
DESCRIPTION=$(
|
||||
cat <<EOF
|
||||
DESCRIPTION=$(
|
||||
cat <<EOF
|
||||
<div align='center'>
|
||||
<a href='https://Helper-Scripts.com' target='_blank' rel='noopener noreferrer'>
|
||||
<img src='https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/images/logo-81x112.png' alt='Logo' style='width:81px;height:112px;'/>
|
||||
@@ -648,71 +598,66 @@ create_vm() {
|
||||
</span>
|
||||
</div>
|
||||
EOF
|
||||
)
|
||||
qm set $VMID -description "$DESCRIPTION" >/dev/null
|
||||
|
||||
msg_ok "Created OpenWrt VM ${CL}${BL}(${HN})"
|
||||
msg_info "OpenWrt is being started in order to configure the network interfaces."
|
||||
qm start $VMID
|
||||
sleep 15
|
||||
msg_info "Waiting for OpenWrt to boot..."
|
||||
for i in {1..30}; do
|
||||
if qm status "$VMID" | grep -q "running"; then
|
||||
sleep 5
|
||||
msg_ok "OpenWrt is running"
|
||||
break
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
|
||||
msg_ok "Network interfaces are being configured as OpenWrt initiates."
|
||||
)
|
||||
qm set $VMID -description "$DESCRIPTION" >/dev/null
|
||||
|
||||
msg_ok "Created OpenWrt VM ${CL}${BL}(${HN})"
|
||||
msg_info "OpenWrt is being started in order to configure the network interfaces."
|
||||
qm start $VMID
|
||||
sleep 15
|
||||
msg_info "Waiting for OpenWrt to boot..."
|
||||
for i in {1..30}; do
|
||||
if qm status "$VMID" | grep -q "running"; then
|
||||
send_line_to_vm ""
|
||||
send_line_to_vm "uci delete network.@device[0]"
|
||||
send_line_to_vm "uci set network.wan=interface"
|
||||
send_line_to_vm "uci set network.wan.device=eth1"
|
||||
send_line_to_vm "uci set network.wan.proto=dhcp"
|
||||
send_line_to_vm "uci delete network.lan"
|
||||
send_line_to_vm "uci set network.lan=interface"
|
||||
send_line_to_vm "uci set network.lan.device=eth0"
|
||||
send_line_to_vm "uci set network.lan.proto=static"
|
||||
send_line_to_vm "uci set network.lan.ipaddr=${LAN_IP_ADDR}"
|
||||
send_line_to_vm "uci set network.lan.netmask=${LAN_NETMASK}"
|
||||
send_line_to_vm "uci commit"
|
||||
send_line_to_vm "halt"
|
||||
msg_ok "Network interfaces configured in OpenWrt"
|
||||
else
|
||||
msg_error "VM is not running"
|
||||
exit 1
|
||||
sleep 5
|
||||
msg_ok "OpenWrt is running"
|
||||
break
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
|
||||
msg_info "Waiting for OpenWrt to shut down..."
|
||||
until qm status "$VMID" | grep -q "stopped"; do
|
||||
sleep 2
|
||||
done
|
||||
msg_ok "OpenWrt has shut down"
|
||||
msg_ok "Network interfaces are being configured as OpenWrt initiates."
|
||||
|
||||
msg_info "Adding bridge interfaces on Proxmox side"
|
||||
qm set $VMID \
|
||||
-net0 virtio,bridge=${LAN_BRG},macaddr=${LAN_MAC}${LAN_VLAN}${MTU} \
|
||||
-net1 virtio,bridge=${BRG},macaddr=${MAC}${VLAN}${MTU} >/dev/null
|
||||
msg_ok "Bridge interfaces added"
|
||||
if qm status "$VMID" | grep -q "running"; then
|
||||
send_line_to_vm ""
|
||||
send_line_to_vm "uci delete network.@device[0]"
|
||||
send_line_to_vm "uci set network.wan=interface"
|
||||
send_line_to_vm "uci set network.wan.device=eth1"
|
||||
send_line_to_vm "uci set network.wan.proto=dhcp"
|
||||
send_line_to_vm "uci delete network.lan"
|
||||
send_line_to_vm "uci set network.lan=interface"
|
||||
send_line_to_vm "uci set network.lan.device=eth0"
|
||||
send_line_to_vm "uci set network.lan.proto=static"
|
||||
send_line_to_vm "uci set network.lan.ipaddr=${LAN_IP_ADDR}"
|
||||
send_line_to_vm "uci set network.lan.netmask=${LAN_NETMASK}"
|
||||
send_line_to_vm "uci commit"
|
||||
send_line_to_vm "halt"
|
||||
msg_ok "Network interfaces configured in OpenWrt"
|
||||
else
|
||||
msg_error "VM is not running"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ "$START_VM" = "yes" ]; then
|
||||
msg_info "Starting OpenWrt VM"
|
||||
qm start $VMID
|
||||
msg_ok "Started OpenWrt VM"
|
||||
fi
|
||||
msg_info "Waiting for OpenWrt to shut down..."
|
||||
until qm status "$VMID" | grep -q "stopped"; do
|
||||
sleep 2
|
||||
done
|
||||
msg_ok "OpenWrt has shut down"
|
||||
|
||||
VLAN_FINISH=""
|
||||
if [ -z "$VLAN" ] && [ "$VLAN2" != "999" ]; then
|
||||
VLAN_FINISH=" Please remember to adjust the VLAN tags to suit your network."
|
||||
fi
|
||||
post_update_to_api "done" "none"
|
||||
msg_ok "Completed Successfully!${VLAN_FINISH:+\n$VLAN_FINISH}"
|
||||
} # end create_vm
|
||||
msg_info "Adding bridge interfaces on Proxmox side"
|
||||
qm set $VMID \
|
||||
-net0 virtio,bridge=${LAN_BRG},macaddr=${LAN_MAC}${LAN_VLAN}${MTU} \
|
||||
-net1 virtio,bridge=${BRG},macaddr=${MAC}${VLAN}${MTU} >/dev/null
|
||||
msg_ok "Bridge interfaces added"
|
||||
|
||||
VM_CREATION_PHASE="yes"
|
||||
create_vm
|
||||
VM_CREATION_PHASE="no"
|
||||
if [ "$START_VM" = "yes" ]; then
|
||||
msg_info "Starting OpenWrt VM"
|
||||
qm start $VMID
|
||||
msg_ok "Started OpenWrt VM"
|
||||
fi
|
||||
|
||||
VLAN_FINISH=""
|
||||
if [ -z "$VLAN" ] && [ "$VLAN2" != "999" ]; then
|
||||
VLAN_FINISH=" Please remember to adjust the VLAN tags to suit your network."
|
||||
fi
|
||||
post_update_to_api "done" "none"
|
||||
msg_ok "Completed Successfully!${VLAN_FINISH:+\n$VLAN_FINISH}"
|
||||
|
||||
@@ -62,60 +62,13 @@ trap 'error_handler $LINENO "$BASH_COMMAND"' ERR
|
||||
trap cleanup EXIT
|
||||
trap 'post_update_to_api "failed" "INTERRUPTED"' SIGINT
|
||||
trap 'post_update_to_api "failed" "TERMINATED"' SIGTERM
|
||||
|
||||
# Smart recovery state
|
||||
VM_CREATION_PHASE="no"
|
||||
VM_RECOVERY_ATTEMPT=0
|
||||
VM_MAX_RETRIES=2
|
||||
|
||||
function error_handler() {
|
||||
local exit_code="$?"
|
||||
local line_number="$1"
|
||||
local command="$2"
|
||||
post_update_to_api "failed" "$exit_code"
|
||||
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
|
||||
echo -e "\n$error_message\n"
|
||||
|
||||
# During VM creation phase: offer recovery menu instead of immediate cleanup
|
||||
if [[ "$VM_CREATION_PHASE" == "yes" && $VM_RECOVERY_ATTEMPT -lt $VM_MAX_RETRIES ]]; then
|
||||
((VM_RECOVERY_ATTEMPT++))
|
||||
trap - ERR
|
||||
set +e
|
||||
|
||||
local choice
|
||||
choice=$(whiptail --backtitle "Proxmox VE Helper Scripts" --title "VM CREATION FAILED" \
|
||||
--radiolist "Exit code: ${exit_code} | Attempt: ${VM_RECOVERY_ATTEMPT}/${VM_MAX_RETRIES}\nFailed command: ${command}\n\nChoose a recovery action:" 16 72 3 \
|
||||
"RETRY" "Retry VM creation" "ON" \
|
||||
"KEEP" "Keep partial VM for debugging" "OFF" \
|
||||
"ABORT" "Destroy VM and exit" "OFF" \
|
||||
3>&1 1>&2 2>&3) || choice="ABORT"
|
||||
|
||||
case "$choice" in
|
||||
RETRY)
|
||||
msg_info "Cleaning up failed VM ${VMID} for retry"
|
||||
cleanup_vmid 2>/dev/null || true
|
||||
msg_ok "Ready for retry (attempt $((VM_RECOVERY_ATTEMPT + 1))/${VM_MAX_RETRIES})"
|
||||
set -e
|
||||
trap 'error_handler $LINENO "$BASH_COMMAND"' ERR
|
||||
create_vm
|
||||
exit $?
|
||||
;;
|
||||
KEEP)
|
||||
echo -e "\n${YW} Keeping partial VM ${VMID} for debugging${CL}"
|
||||
echo -e " Inspect: qm config ${VMID}"
|
||||
echo -e " Remove: qm destroy ${VMID} --destroy-unreferenced-disks --purge\n"
|
||||
post_update_to_api "failed" "$exit_code"
|
||||
exit "$exit_code"
|
||||
;;
|
||||
*)
|
||||
post_update_to_api "failed" "$exit_code"
|
||||
cleanup_vmid
|
||||
exit "$exit_code"
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
|
||||
# Default: no recovery (max retries exceeded or outside creation phase)
|
||||
post_update_to_api "failed" "$exit_code"
|
||||
cleanup_vmid
|
||||
}
|
||||
|
||||
@@ -513,57 +466,55 @@ else
|
||||
fi
|
||||
msg_ok "Using ${CL}${BL}$STORAGE${CL} ${GN}for Storage Location."
|
||||
msg_ok "Virtual Machine ID is ${CL}${BL}$VMID${CL}."
|
||||
msg_info "Retrieving the URL for the Ubuntu 22.04 Disk Image"
|
||||
URL=https://cloud-images.ubuntu.com/jammy/current/jammy-server-cloudimg-amd64.img
|
||||
sleep 2
|
||||
msg_ok "${CL}${BL}${URL}${CL}"
|
||||
curl -f#SL -o "$(basename "$URL")" "$URL"
|
||||
echo -en "\e[1A\e[0K"
|
||||
FILE=$(basename $URL)
|
||||
msg_ok "Downloaded ${CL}${BL}${FILE}${CL}"
|
||||
|
||||
create_vm() {
|
||||
msg_info "Retrieving the URL for the Ubuntu 22.04 Disk Image"
|
||||
URL=https://cloud-images.ubuntu.com/jammy/current/jammy-server-cloudimg-amd64.img
|
||||
sleep 2
|
||||
msg_ok "${CL}${BL}${URL}${CL}"
|
||||
curl -f#SL -o "$(basename "$URL")" "$URL"
|
||||
echo -en "\e[1A\e[0K"
|
||||
FILE=$(basename $URL)
|
||||
msg_ok "Downloaded ${CL}${BL}${FILE}${CL}"
|
||||
STORAGE_TYPE=$(pvesm status -storage $STORAGE | awk 'NR>1 {print $2}')
|
||||
case $STORAGE_TYPE in
|
||||
nfs | dir | cifs)
|
||||
DISK_EXT=".qcow2"
|
||||
DISK_REF="$VMID/"
|
||||
DISK_IMPORT="-format qcow2"
|
||||
THIN=""
|
||||
;;
|
||||
btrfs)
|
||||
DISK_EXT=".raw"
|
||||
DISK_REF="$VMID/"
|
||||
DISK_IMPORT="-format raw"
|
||||
FORMAT=",efitype=4m"
|
||||
THIN=""
|
||||
;;
|
||||
*)
|
||||
DISK_EXT=""
|
||||
DISK_REF=""
|
||||
DISK_IMPORT="-format raw"
|
||||
;;
|
||||
esac
|
||||
for i in {0,1}; do
|
||||
disk="DISK$i"
|
||||
eval DISK${i}=vm-${VMID}-disk-${i}${DISK_EXT:-}
|
||||
eval DISK${i}_REF=${STORAGE}:${DISK_REF:-}${!disk}
|
||||
done
|
||||
|
||||
STORAGE_TYPE=$(pvesm status -storage $STORAGE | awk 'NR>1 {print $2}')
|
||||
case $STORAGE_TYPE in
|
||||
nfs | dir | cifs)
|
||||
DISK_EXT=".qcow2"
|
||||
DISK_REF="$VMID/"
|
||||
DISK_IMPORT="-format qcow2"
|
||||
THIN=""
|
||||
;;
|
||||
btrfs)
|
||||
DISK_EXT=".raw"
|
||||
DISK_REF="$VMID/"
|
||||
DISK_IMPORT="-format raw"
|
||||
FORMAT=",efitype=4m"
|
||||
THIN=""
|
||||
;;
|
||||
*)
|
||||
DISK_EXT=""
|
||||
DISK_REF=""
|
||||
DISK_IMPORT="-format raw"
|
||||
;;
|
||||
esac
|
||||
for i in {0,1}; do
|
||||
disk="DISK$i"
|
||||
eval DISK${i}=vm-${VMID}-disk-${i}${DISK_EXT:-}
|
||||
eval DISK${i}_REF=${STORAGE}:${DISK_REF:-}${!disk}
|
||||
done
|
||||
|
||||
msg_info "Creating a Ubuntu 22.04 VM"
|
||||
qm create $VMID -agent 1${MACHINE} -tablet 0 -localtime 1 -bios ovmf${CPU_TYPE} -cores $CORE_COUNT -memory $RAM_SIZE \
|
||||
-name $HN -tags community-script -net0 virtio,bridge=$BRG,macaddr=$MAC$VLAN$MTU -onboot 1 -ostype l26 -scsihw virtio-scsi-pci
|
||||
pvesm alloc $STORAGE $VMID $DISK0 4M 1>&/dev/null
|
||||
qm importdisk $VMID ${FILE} $STORAGE ${DISK_IMPORT:-} 1>&/dev/null
|
||||
qm set $VMID \
|
||||
-efidisk0 ${DISK0_REF}${FORMAT} \
|
||||
-scsi0 ${DISK1_REF},${DISK_CACHE}${THIN}size=${DISK_SIZE} \
|
||||
-ide2 ${STORAGE}:cloudinit \
|
||||
-boot order=scsi0 \
|
||||
-serial0 socket >/dev/null
|
||||
DESCRIPTION=$(
|
||||
cat <<EOF
|
||||
msg_info "Creating a Ubuntu 22.04 VM"
|
||||
qm create $VMID -agent 1${MACHINE} -tablet 0 -localtime 1 -bios ovmf${CPU_TYPE} -cores $CORE_COUNT -memory $RAM_SIZE \
|
||||
-name $HN -tags community-script -net0 virtio,bridge=$BRG,macaddr=$MAC$VLAN$MTU -onboot 1 -ostype l26 -scsihw virtio-scsi-pci
|
||||
pvesm alloc $STORAGE $VMID $DISK0 4M 1>&/dev/null
|
||||
qm importdisk $VMID ${FILE} $STORAGE ${DISK_IMPORT:-} 1>&/dev/null
|
||||
qm set $VMID \
|
||||
-efidisk0 ${DISK0_REF}${FORMAT} \
|
||||
-scsi0 ${DISK1_REF},${DISK_CACHE}${THIN}size=${DISK_SIZE} \
|
||||
-ide2 ${STORAGE}:cloudinit \
|
||||
-boot order=scsi0 \
|
||||
-serial0 socket >/dev/null
|
||||
DESCRIPTION=$(
|
||||
cat <<EOF
|
||||
<div align='center'>
|
||||
<a href='https://Helper-Scripts.com' target='_blank' rel='noopener noreferrer'>
|
||||
<img src='https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/images/logo-81x112.png' alt='Logo' style='width:81px;height:112px;'/>
|
||||
@@ -591,28 +542,23 @@ create_vm() {
|
||||
</span>
|
||||
</div>
|
||||
EOF
|
||||
)
|
||||
qm set $VMID -description "$DESCRIPTION" >/dev/null
|
||||
if [ -n "$DISK_SIZE" ]; then
|
||||
msg_info "Resizing disk to $DISK_SIZE GB"
|
||||
qm resize $VMID scsi0 ${DISK_SIZE} >/dev/null
|
||||
else
|
||||
msg_info "Using default disk size of $DEFAULT_DISK_SIZE GB"
|
||||
qm resize $VMID scsi0 ${DEFAULT_DISK_SIZE} >/dev/null
|
||||
fi
|
||||
)
|
||||
qm set $VMID -description "$DESCRIPTION" >/dev/null
|
||||
if [ -n "$DISK_SIZE" ]; then
|
||||
msg_info "Resizing disk to $DISK_SIZE GB"
|
||||
qm resize $VMID scsi0 ${DISK_SIZE} >/dev/null
|
||||
else
|
||||
msg_info "Using default disk size of $DEFAULT_DISK_SIZE GB"
|
||||
qm resize $VMID scsi0 ${DEFAULT_DISK_SIZE} >/dev/null
|
||||
fi
|
||||
|
||||
msg_ok "Created a Ubuntu 22.04 VM ${CL}${BL}(${HN})"
|
||||
if [ "$START_VM" == "yes" ]; then
|
||||
msg_info "Starting Ubuntu 22.04 VM"
|
||||
qm start $VMID
|
||||
msg_ok "Started Ubuntu 22.04 VM"
|
||||
fi
|
||||
post_update_to_api "done" "none"
|
||||
msg_ok "Completed successfully!\n"
|
||||
echo -e "Setup Cloud-Init before starting \n
|
||||
msg_ok "Created a Ubuntu 22.04 VM ${CL}${BL}(${HN})"
|
||||
if [ "$START_VM" == "yes" ]; then
|
||||
msg_info "Starting Ubuntu 22.04 VM"
|
||||
qm start $VMID
|
||||
msg_ok "Started Ubuntu 22.04 VM"
|
||||
fi
|
||||
post_update_to_api "done" "none"
|
||||
msg_ok "Completed successfully!\n"
|
||||
echo -e "Setup Cloud-Init before starting \n
|
||||
More info at https://github.com/community-scripts/ProxmoxVE/discussions/272 \n"
|
||||
} # end create_vm
|
||||
|
||||
VM_CREATION_PHASE="yes"
|
||||
create_vm
|
||||
VM_CREATION_PHASE="no"
|
||||
|
||||
@@ -65,60 +65,13 @@ trap 'error_handler $LINENO "$BASH_COMMAND"' ERR
|
||||
trap cleanup EXIT
|
||||
trap 'post_update_to_api "failed" "INTERRUPTED"' SIGINT
|
||||
trap 'post_update_to_api "failed" "TERMINATED"' SIGTERM
|
||||
|
||||
# Smart recovery state
|
||||
VM_CREATION_PHASE="no"
|
||||
VM_RECOVERY_ATTEMPT=0
|
||||
VM_MAX_RETRIES=2
|
||||
|
||||
function error_handler() {
|
||||
local exit_code="$?"
|
||||
local line_number="$1"
|
||||
local command="$2"
|
||||
post_update_to_api "failed" "$exit_code"
|
||||
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
|
||||
echo -e "\n$error_message\n"
|
||||
|
||||
# During VM creation phase: offer recovery menu instead of immediate cleanup
|
||||
if [[ "$VM_CREATION_PHASE" == "yes" && $VM_RECOVERY_ATTEMPT -lt $VM_MAX_RETRIES ]]; then
|
||||
((VM_RECOVERY_ATTEMPT++))
|
||||
trap - ERR
|
||||
set +e
|
||||
|
||||
local choice
|
||||
choice=$(whiptail --backtitle "Proxmox VE Helper Scripts" --title "VM CREATION FAILED" \
|
||||
--radiolist "Exit code: ${exit_code} | Attempt: ${VM_RECOVERY_ATTEMPT}/${VM_MAX_RETRIES}\nFailed command: ${command}\n\nChoose a recovery action:" 16 72 3 \
|
||||
"RETRY" "Retry VM creation" "ON" \
|
||||
"KEEP" "Keep partial VM for debugging" "OFF" \
|
||||
"ABORT" "Destroy VM and exit" "OFF" \
|
||||
3>&1 1>&2 2>&3) || choice="ABORT"
|
||||
|
||||
case "$choice" in
|
||||
RETRY)
|
||||
msg_info "Cleaning up failed VM ${VMID} for retry"
|
||||
cleanup_vmid 2>/dev/null || true
|
||||
msg_ok "Ready for retry (attempt $((VM_RECOVERY_ATTEMPT + 1))/${VM_MAX_RETRIES})"
|
||||
set -e
|
||||
trap 'error_handler $LINENO "$BASH_COMMAND"' ERR
|
||||
create_vm
|
||||
exit $?
|
||||
;;
|
||||
KEEP)
|
||||
echo -e "\n${YW} Keeping partial VM ${VMID} for debugging${CL}"
|
||||
echo -e " Inspect: qm config ${VMID}"
|
||||
echo -e " Remove: qm destroy ${VMID} --destroy-unreferenced-disks --purge\n"
|
||||
post_update_to_api "failed" "$exit_code"
|
||||
exit "$exit_code"
|
||||
;;
|
||||
*)
|
||||
post_update_to_api "failed" "$exit_code"
|
||||
cleanup_vmid
|
||||
exit "$exit_code"
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
|
||||
# Default: no recovery (max retries exceeded or outside creation phase)
|
||||
post_update_to_api "failed" "$exit_code"
|
||||
cleanup_vmid
|
||||
}
|
||||
|
||||
@@ -515,57 +468,55 @@ else
|
||||
fi
|
||||
msg_ok "Using ${CL}${BL}$STORAGE${CL} ${GN}for Storage Location."
|
||||
msg_ok "Virtual Machine ID is ${CL}${BL}$VMID${CL}."
|
||||
msg_info "Retrieving the URL for the Ubuntu 24.04 Disk Image"
|
||||
URL=https://cloud-images.ubuntu.com/noble/current/noble-server-cloudimg-amd64.img
|
||||
sleep 2
|
||||
msg_ok "${CL}${BL}${URL}${CL}"
|
||||
curl -f#SL -o "$(basename "$URL")" "$URL"
|
||||
echo -en "\e[1A\e[0K"
|
||||
FILE=$(basename $URL)
|
||||
msg_ok "Downloaded ${CL}${BL}${FILE}${CL}"
|
||||
|
||||
create_vm() {
|
||||
msg_info "Retrieving the URL for the Ubuntu 24.04 Disk Image"
|
||||
URL=https://cloud-images.ubuntu.com/noble/current/noble-server-cloudimg-amd64.img
|
||||
sleep 2
|
||||
msg_ok "${CL}${BL}${URL}${CL}"
|
||||
curl -f#SL -o "$(basename "$URL")" "$URL"
|
||||
echo -en "\e[1A\e[0K"
|
||||
FILE=$(basename $URL)
|
||||
msg_ok "Downloaded ${CL}${BL}${FILE}${CL}"
|
||||
STORAGE_TYPE=$(pvesm status -storage $STORAGE | awk 'NR>1 {print $2}')
|
||||
case $STORAGE_TYPE in
|
||||
nfs | dir | cifs)
|
||||
DISK_EXT=".qcow2"
|
||||
DISK_REF="$VMID/"
|
||||
DISK_IMPORT="-format qcow2"
|
||||
THIN=""
|
||||
;;
|
||||
btrfs)
|
||||
DISK_EXT=".raw"
|
||||
DISK_REF="$VMID/"
|
||||
DISK_IMPORT="-format raw"
|
||||
FORMAT=",efitype=4m"
|
||||
THIN=""
|
||||
;;
|
||||
*)
|
||||
DISK_EXT=""
|
||||
DISK_REF=""
|
||||
DISK_IMPORT="-format raw"
|
||||
;;
|
||||
esac
|
||||
for i in {0,1}; do
|
||||
disk="DISK$i"
|
||||
eval DISK${i}=vm-${VMID}-disk-${i}${DISK_EXT:-}
|
||||
eval DISK${i}_REF=${STORAGE}:${DISK_REF:-}${!disk}
|
||||
done
|
||||
|
||||
STORAGE_TYPE=$(pvesm status -storage $STORAGE | awk 'NR>1 {print $2}')
|
||||
case $STORAGE_TYPE in
|
||||
nfs | dir | cifs)
|
||||
DISK_EXT=".qcow2"
|
||||
DISK_REF="$VMID/"
|
||||
DISK_IMPORT="-format qcow2"
|
||||
THIN=""
|
||||
;;
|
||||
btrfs)
|
||||
DISK_EXT=".raw"
|
||||
DISK_REF="$VMID/"
|
||||
DISK_IMPORT="-format raw"
|
||||
FORMAT=",efitype=4m"
|
||||
THIN=""
|
||||
;;
|
||||
*)
|
||||
DISK_EXT=""
|
||||
DISK_REF=""
|
||||
DISK_IMPORT="-format raw"
|
||||
;;
|
||||
esac
|
||||
for i in {0,1}; do
|
||||
disk="DISK$i"
|
||||
eval DISK${i}=vm-${VMID}-disk-${i}${DISK_EXT:-}
|
||||
eval DISK${i}_REF=${STORAGE}:${DISK_REF:-}${!disk}
|
||||
done
|
||||
|
||||
msg_info "Creating a Ubuntu 24.04 VM"
|
||||
qm create $VMID -agent 1${MACHINE} -tablet 0 -localtime 1 -bios ovmf${CPU_TYPE} -cores $CORE_COUNT -memory $RAM_SIZE \
|
||||
-name $HN -tags community-script -net0 virtio,bridge=$BRG,macaddr=$MAC$VLAN$MTU -onboot 1 -ostype l26 -scsihw virtio-scsi-pci
|
||||
pvesm alloc $STORAGE $VMID $DISK0 4M 1>&/dev/null
|
||||
qm importdisk $VMID ${FILE} $STORAGE ${DISK_IMPORT:-} 1>&/dev/null
|
||||
qm set $VMID \
|
||||
-efidisk0 ${DISK0_REF}${FORMAT} \
|
||||
-scsi0 ${DISK1_REF},${DISK_CACHE}${THIN}size=${DISK_SIZE} \
|
||||
-ide2 ${STORAGE}:cloudinit \
|
||||
-boot order=scsi0 \
|
||||
-serial0 socket >/dev/null
|
||||
DESCRIPTION=$(
|
||||
cat <<EOF
|
||||
msg_info "Creating a Ubuntu 24.04 VM"
|
||||
qm create $VMID -agent 1${MACHINE} -tablet 0 -localtime 1 -bios ovmf${CPU_TYPE} -cores $CORE_COUNT -memory $RAM_SIZE \
|
||||
-name $HN -tags community-script -net0 virtio,bridge=$BRG,macaddr=$MAC$VLAN$MTU -onboot 1 -ostype l26 -scsihw virtio-scsi-pci
|
||||
pvesm alloc $STORAGE $VMID $DISK0 4M 1>&/dev/null
|
||||
qm importdisk $VMID ${FILE} $STORAGE ${DISK_IMPORT:-} 1>&/dev/null
|
||||
qm set $VMID \
|
||||
-efidisk0 ${DISK0_REF}${FORMAT} \
|
||||
-scsi0 ${DISK1_REF},${DISK_CACHE}${THIN}size=${DISK_SIZE} \
|
||||
-ide2 ${STORAGE}:cloudinit \
|
||||
-boot order=scsi0 \
|
||||
-serial0 socket >/dev/null
|
||||
DESCRIPTION=$(
|
||||
cat <<EOF
|
||||
<div align='center'>
|
||||
<a href='https://Helper-Scripts.com' target='_blank' rel='noopener noreferrer'>
|
||||
<img src='https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/images/logo-81x112.png' alt='Logo' style='width:81px;height:112px;'/>
|
||||
@@ -593,28 +544,23 @@ create_vm() {
|
||||
</span>
|
||||
</div>
|
||||
EOF
|
||||
)
|
||||
qm set $VMID -description "$DESCRIPTION" >/dev/null
|
||||
if [ -n "$DISK_SIZE" ]; then
|
||||
msg_info "Resizing disk to $DISK_SIZE GB"
|
||||
qm resize $VMID scsi0 ${DISK_SIZE} >/dev/null
|
||||
else
|
||||
msg_info "Using default disk size of $DEFAULT_DISK_SIZE GB"
|
||||
qm resize $VMID scsi0 ${DEFAULT_DISK_SIZE} >/dev/null
|
||||
fi
|
||||
)
|
||||
qm set $VMID -description "$DESCRIPTION" >/dev/null
|
||||
if [ -n "$DISK_SIZE" ]; then
|
||||
msg_info "Resizing disk to $DISK_SIZE GB"
|
||||
qm resize $VMID scsi0 ${DISK_SIZE} >/dev/null
|
||||
else
|
||||
msg_info "Using default disk size of $DEFAULT_DISK_SIZE GB"
|
||||
qm resize $VMID scsi0 ${DEFAULT_DISK_SIZE} >/dev/null
|
||||
fi
|
||||
|
||||
msg_ok "Created a Ubuntu 24.04 VM ${CL}${BL}(${HN})"
|
||||
if [ "$START_VM" == "yes" ]; then
|
||||
msg_info "Starting Ubuntu 24.04 VM"
|
||||
qm start $VMID
|
||||
msg_ok "Started Ubuntu 24.04 VM"
|
||||
fi
|
||||
post_update_to_api "done" "none"
|
||||
msg_ok "Completed successfully!\n"
|
||||
echo -e "Setup Cloud-Init before starting \n
|
||||
msg_ok "Created a Ubuntu 24.04 VM ${CL}${BL}(${HN})"
|
||||
if [ "$START_VM" == "yes" ]; then
|
||||
msg_info "Starting Ubuntu 24.04 VM"
|
||||
qm start $VMID
|
||||
msg_ok "Started Ubuntu 24.04 VM"
|
||||
fi
|
||||
post_update_to_api "done" "none"
|
||||
msg_ok "Completed successfully!\n"
|
||||
echo -e "Setup Cloud-Init before starting \n
|
||||
More info at https://github.com/community-scripts/ProxmoxVE/discussions/272 \n"
|
||||
} # end create_vm
|
||||
|
||||
VM_CREATION_PHASE="yes"
|
||||
create_vm
|
||||
VM_CREATION_PHASE="no"
|
||||
|
||||
@@ -64,60 +64,13 @@ trap 'error_handler $LINENO "$BASH_COMMAND"' ERR
|
||||
trap cleanup EXIT
|
||||
trap 'post_update_to_api "failed" "INTERRUPTED"' SIGINT
|
||||
trap 'post_update_to_api "failed" "TERMINATED"' SIGTERM
|
||||
|
||||
# Smart recovery state
|
||||
VM_CREATION_PHASE="no"
|
||||
VM_RECOVERY_ATTEMPT=0
|
||||
VM_MAX_RETRIES=2
|
||||
|
||||
function error_handler() {
|
||||
local exit_code="$?"
|
||||
local line_number="$1"
|
||||
local command="$2"
|
||||
post_update_to_api "failed" "$exit_code"
|
||||
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
|
||||
echo -e "\n$error_message\n"
|
||||
|
||||
# During VM creation phase: offer recovery menu instead of immediate cleanup
|
||||
if [[ "$VM_CREATION_PHASE" == "yes" && $VM_RECOVERY_ATTEMPT -lt $VM_MAX_RETRIES ]]; then
|
||||
((VM_RECOVERY_ATTEMPT++))
|
||||
trap - ERR
|
||||
set +e
|
||||
|
||||
local choice
|
||||
choice=$(whiptail --backtitle "Proxmox VE Helper Scripts" --title "VM CREATION FAILED" \
|
||||
--radiolist "Exit code: ${exit_code} | Attempt: ${VM_RECOVERY_ATTEMPT}/${VM_MAX_RETRIES}\nFailed command: ${command}\n\nChoose a recovery action:" 16 72 3 \
|
||||
"RETRY" "Retry VM creation" "ON" \
|
||||
"KEEP" "Keep partial VM for debugging" "OFF" \
|
||||
"ABORT" "Destroy VM and exit" "OFF" \
|
||||
3>&1 1>&2 2>&3) || choice="ABORT"
|
||||
|
||||
case "$choice" in
|
||||
RETRY)
|
||||
msg_info "Cleaning up failed VM ${VMID} for retry"
|
||||
cleanup_vmid 2>/dev/null || true
|
||||
msg_ok "Ready for retry (attempt $((VM_RECOVERY_ATTEMPT + 1))/${VM_MAX_RETRIES})"
|
||||
set -e
|
||||
trap 'error_handler $LINENO "$BASH_COMMAND"' ERR
|
||||
create_vm
|
||||
exit $?
|
||||
;;
|
||||
KEEP)
|
||||
echo -e "\n${YW} Keeping partial VM ${VMID} for debugging${CL}"
|
||||
echo -e " Inspect: qm config ${VMID}"
|
||||
echo -e " Remove: qm destroy ${VMID} --destroy-unreferenced-disks --purge\n"
|
||||
post_update_to_api "failed" "$exit_code"
|
||||
exit "$exit_code"
|
||||
;;
|
||||
*)
|
||||
post_update_to_api "failed" "$exit_code"
|
||||
cleanup_vmid
|
||||
exit "$exit_code"
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
|
||||
# Default: no recovery (max retries exceeded or outside creation phase)
|
||||
post_update_to_api "failed" "$exit_code"
|
||||
cleanup_vmid
|
||||
}
|
||||
|
||||
@@ -514,57 +467,55 @@ else
|
||||
fi
|
||||
msg_ok "Using ${CL}${BL}$STORAGE${CL} ${GN}for Storage Location."
|
||||
msg_ok "Virtual Machine ID is ${CL}${BL}$VMID${CL}."
|
||||
msg_info "Retrieving the URL for the Ubuntu 25.04 Disk Image"
|
||||
URL=https://cloud-images.ubuntu.com/plucky/current/plucky-server-cloudimg-amd64.img
|
||||
sleep 2
|
||||
msg_ok "${CL}${BL}${URL}${CL}"
|
||||
curl -f#SL -o "$(basename "$URL")" "$URL"
|
||||
echo -en "\e[1A\e[0K"
|
||||
FILE=$(basename $URL)
|
||||
msg_ok "Downloaded ${CL}${BL}${FILE}${CL}"
|
||||
|
||||
create_vm() {
|
||||
msg_info "Retrieving the URL for the Ubuntu 25.04 Disk Image"
|
||||
URL=https://cloud-images.ubuntu.com/plucky/current/plucky-server-cloudimg-amd64.img
|
||||
sleep 2
|
||||
msg_ok "${CL}${BL}${URL}${CL}"
|
||||
curl -f#SL -o "$(basename "$URL")" "$URL"
|
||||
echo -en "\e[1A\e[0K"
|
||||
FILE=$(basename $URL)
|
||||
msg_ok "Downloaded ${CL}${BL}${FILE}${CL}"
|
||||
STORAGE_TYPE=$(pvesm status -storage $STORAGE | awk 'NR>1 {print $2}')
|
||||
case $STORAGE_TYPE in
|
||||
nfs | dir | cifs)
|
||||
DISK_EXT=".qcow2"
|
||||
DISK_REF="$VMID/"
|
||||
DISK_IMPORT="-format qcow2"
|
||||
THIN=""
|
||||
;;
|
||||
btrfs)
|
||||
DISK_EXT=".raw"
|
||||
DISK_REF="$VMID/"
|
||||
DISK_IMPORT="-format raw"
|
||||
FORMAT=",efitype=4m"
|
||||
THIN=""
|
||||
;;
|
||||
*)
|
||||
DISK_EXT=""
|
||||
DISK_REF=""
|
||||
DISK_IMPORT="-format raw"
|
||||
;;
|
||||
esac
|
||||
for i in {0,1}; do
|
||||
disk="DISK$i"
|
||||
eval DISK${i}=vm-${VMID}-disk-${i}${DISK_EXT:-}
|
||||
eval DISK${i}_REF=${STORAGE}:${DISK_REF:-}${!disk}
|
||||
done
|
||||
|
||||
STORAGE_TYPE=$(pvesm status -storage $STORAGE | awk 'NR>1 {print $2}')
|
||||
case $STORAGE_TYPE in
|
||||
nfs | dir | cifs)
|
||||
DISK_EXT=".qcow2"
|
||||
DISK_REF="$VMID/"
|
||||
DISK_IMPORT="-format qcow2"
|
||||
THIN=""
|
||||
;;
|
||||
btrfs)
|
||||
DISK_EXT=".raw"
|
||||
DISK_REF="$VMID/"
|
||||
DISK_IMPORT="-format raw"
|
||||
FORMAT=",efitype=4m"
|
||||
THIN=""
|
||||
;;
|
||||
*)
|
||||
DISK_EXT=""
|
||||
DISK_REF=""
|
||||
DISK_IMPORT="-format raw"
|
||||
;;
|
||||
esac
|
||||
for i in {0,1}; do
|
||||
disk="DISK$i"
|
||||
eval DISK${i}=vm-${VMID}-disk-${i}${DISK_EXT:-}
|
||||
eval DISK${i}_REF=${STORAGE}:${DISK_REF:-}${!disk}
|
||||
done
|
||||
|
||||
msg_info "Creating a Ubuntu 25.04 VM"
|
||||
qm create $VMID -agent 1${MACHINE} -tablet 0 -localtime 1 -bios ovmf${CPU_TYPE} -cores $CORE_COUNT -memory $RAM_SIZE \
|
||||
-name $HN -tags community-script -net0 virtio,bridge=$BRG,macaddr=$MAC$VLAN$MTU -onboot 1 -ostype l26 -scsihw virtio-scsi-pci
|
||||
pvesm alloc $STORAGE $VMID $DISK0 4M 1>&/dev/null
|
||||
qm importdisk $VMID ${FILE} $STORAGE ${DISK_IMPORT:-} 1>&/dev/null
|
||||
qm set $VMID \
|
||||
-efidisk0 ${DISK0_REF}${FORMAT} \
|
||||
-scsi0 ${DISK1_REF},${DISK_CACHE}${THIN}size=${DISK_SIZE} \
|
||||
-ide2 ${STORAGE}:cloudinit \
|
||||
-boot order=scsi0 \
|
||||
-serial0 socket >/dev/null
|
||||
DESCRIPTION=$(
|
||||
cat <<EOF
|
||||
msg_info "Creating a Ubuntu 25.04 VM"
|
||||
qm create $VMID -agent 1${MACHINE} -tablet 0 -localtime 1 -bios ovmf${CPU_TYPE} -cores $CORE_COUNT -memory $RAM_SIZE \
|
||||
-name $HN -tags community-script -net0 virtio,bridge=$BRG,macaddr=$MAC$VLAN$MTU -onboot 1 -ostype l26 -scsihw virtio-scsi-pci
|
||||
pvesm alloc $STORAGE $VMID $DISK0 4M 1>&/dev/null
|
||||
qm importdisk $VMID ${FILE} $STORAGE ${DISK_IMPORT:-} 1>&/dev/null
|
||||
qm set $VMID \
|
||||
-efidisk0 ${DISK0_REF}${FORMAT} \
|
||||
-scsi0 ${DISK1_REF},${DISK_CACHE}${THIN}size=${DISK_SIZE} \
|
||||
-ide2 ${STORAGE}:cloudinit \
|
||||
-boot order=scsi0 \
|
||||
-serial0 socket >/dev/null
|
||||
DESCRIPTION=$(
|
||||
cat <<EOF
|
||||
<div align='center'>
|
||||
<a href='https://Helper-Scripts.com' target='_blank' rel='noopener noreferrer'>
|
||||
<img src='https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/images/logo-81x112.png' alt='Logo' style='width:81px;height:112px;'/>
|
||||
@@ -592,28 +543,23 @@ create_vm() {
|
||||
</span>
|
||||
</div>
|
||||
EOF
|
||||
)
|
||||
qm set $VMID -description "$DESCRIPTION" >/dev/null
|
||||
if [ -n "$DISK_SIZE" ]; then
|
||||
msg_info "Resizing disk to $DISK_SIZE GB"
|
||||
qm resize $VMID scsi0 ${DISK_SIZE} >/dev/null
|
||||
else
|
||||
msg_info "Using default disk size of $DEFAULT_DISK_SIZE GB"
|
||||
qm resize $VMID scsi0 ${DEFAULT_DISK_SIZE} >/dev/null
|
||||
fi
|
||||
)
|
||||
qm set $VMID -description "$DESCRIPTION" >/dev/null
|
||||
if [ -n "$DISK_SIZE" ]; then
|
||||
msg_info "Resizing disk to $DISK_SIZE GB"
|
||||
qm resize $VMID scsi0 ${DISK_SIZE} >/dev/null
|
||||
else
|
||||
msg_info "Using default disk size of $DEFAULT_DISK_SIZE GB"
|
||||
qm resize $VMID scsi0 ${DEFAULT_DISK_SIZE} >/dev/null
|
||||
fi
|
||||
|
||||
msg_ok "Created a Ubuntu 25.04 VM ${CL}${BL}(${HN})"
|
||||
if [ "$START_VM" == "yes" ]; then
|
||||
msg_info "Starting Ubuntu 25.04 VM"
|
||||
qm start $VMID
|
||||
msg_ok "Started Ubuntu 25.04 VM"
|
||||
fi
|
||||
post_update_to_api "done" "none"
|
||||
msg_ok "Completed successfully!\n"
|
||||
echo -e "Setup Cloud-Init before starting \n
|
||||
msg_ok "Created a Ubuntu 25.04 VM ${CL}${BL}(${HN})"
|
||||
if [ "$START_VM" == "yes" ]; then
|
||||
msg_info "Starting Ubuntu 25.04 VM"
|
||||
qm start $VMID
|
||||
msg_ok "Started Ubuntu 25.04 VM"
|
||||
fi
|
||||
post_update_to_api "done" "none"
|
||||
msg_ok "Completed successfully!\n"
|
||||
echo -e "Setup Cloud-Init before starting \n
|
||||
More info at https://github.com/community-scripts/ProxmoxVE/discussions/272 \n"
|
||||
} # end create_vm
|
||||
|
||||
VM_CREATION_PHASE="yes"
|
||||
create_vm
|
||||
VM_CREATION_PHASE="no"
|
||||
|
||||
Reference in New Issue
Block a user