Compare commits

..

1 Commits

Author SHA1 Message Date
CanbiZ (MickLesk)
4b6c6e8765 fix(elementsynapse): prevent systemd invoke failure during apt install in LXC 2026-02-10 13:31:01 +01:00
67 changed files with 1120 additions and 2014 deletions

View File

@@ -89,15 +89,9 @@ jobs:
slug=$(jq -r '.slug // empty' "$json_file" 2>/dev/null) slug=$(jq -r '.slug // empty' "$json_file" 2>/dev/null)
[[ -z "$slug" ]] && continue [[ -z "$slug" ]] && continue
# Find corresponding script (install script or addon script) # Find corresponding install script
install_script="" install_script="install/${slug}-install.sh"
if [[ -f "install/${slug}-install.sh" ]]; then [[ ! -f "$install_script" ]] && continue
install_script="install/${slug}-install.sh"
elif [[ -f "tools/addon/${slug}.sh" ]]; then
install_script="tools/addon/${slug}.sh"
else
continue
fi
# Look for fetch_and_deploy_gh_release calls # Look for fetch_and_deploy_gh_release calls
# Pattern: fetch_and_deploy_gh_release "app" "owner/repo" ["mode"] ["version"] # Pattern: fetch_and_deploy_gh_release "app" "owner/repo" ["mode"] ["version"]

View File

@@ -401,137 +401,11 @@ Exercise vigilance regarding copycat or coat-tailing sites that seek to exploit
</details> </details>
## 2026-02-13
### 🚀 Updated Scripts
- #### 🐞 Bug Fixes
- OpenWebUI: pin numba constraint [@MickLesk](https://github.com/MickLesk) ([#11874](https://github.com/community-scripts/ProxmoxVE/pull/11874))
- Planka: add migrate step to update function [@ZimmermannLeon](https://github.com/ZimmermannLeon) ([#11877](https://github.com/community-scripts/ProxmoxVE/pull/11877))
- Pangolin: switch sqlite-specific back to generic [@MickLesk](https://github.com/MickLesk) ([#11868](https://github.com/community-scripts/ProxmoxVE/pull/11868))
- [Hotfix] Jotty: Copy contents of config backup into /opt/jotty/config [@vhsdream](https://github.com/vhsdream) ([#11864](https://github.com/community-scripts/ProxmoxVE/pull/11864))
- #### 🔧 Refactor
- chore(donetick): add config entry for v0.1.73 [@tomfrenzel](https://github.com/tomfrenzel) ([#11872](https://github.com/community-scripts/ProxmoxVE/pull/11872))
- Refactor: Radicale [@vhsdream](https://github.com/vhsdream) ([#11850](https://github.com/community-scripts/ProxmoxVE/pull/11850))
### 💾 Core
- #### 🔧 Refactor
- core: retry reporting with fallback payloads [@MickLesk](https://github.com/MickLesk) ([#11885](https://github.com/community-scripts/ProxmoxVE/pull/11885))
### 📡 API
- #### ✨ New Features
- error-handler: Implement json_escape and enhance error handling [@MickLesk](https://github.com/MickLesk) ([#11875](https://github.com/community-scripts/ProxmoxVE/pull/11875))
### 🌐 Website
- #### 📝 Script Information
- SQLServer-2025: add PVE9/Kernel 6.x incompatibility warning [@MickLesk](https://github.com/MickLesk) ([#11829](https://github.com/community-scripts/ProxmoxVE/pull/11829))
## 2026-02-12
### 🚀 Updated Scripts
- #### 🐞 Bug Fixes
- EMQX: increase disk to 6GB and add optional MQ disable prompt [@MickLesk](https://github.com/MickLesk) ([#11844](https://github.com/community-scripts/ProxmoxVE/pull/11844))
- Increased the Grafana container default disk size. [@shtefko](https://github.com/shtefko) ([#11840](https://github.com/community-scripts/ProxmoxVE/pull/11840))
- Pangolin: Update database generation command in install script [@tremor021](https://github.com/tremor021) ([#11825](https://github.com/community-scripts/ProxmoxVE/pull/11825))
- Deluge: add python3-setuptools as dep [@MickLesk](https://github.com/MickLesk) ([#11833](https://github.com/community-scripts/ProxmoxVE/pull/11833))
- Dispatcharr: migrate to uv sync [@MickLesk](https://github.com/MickLesk) ([#11831](https://github.com/community-scripts/ProxmoxVE/pull/11831))
- #### ✨ New Features
- Archlinux-VM: fix LVM/LVM-thin storage and improve error reporting | VM's add correct exit_code for analytics [@MickLesk](https://github.com/MickLesk) ([#11842](https://github.com/community-scripts/ProxmoxVE/pull/11842))
- Debian13-VM: Optimize First Boot & add noCloud/Cloud Selection [@MickLesk](https://github.com/MickLesk) ([#11810](https://github.com/community-scripts/ProxmoxVE/pull/11810))
### 💾 Core
- #### ✨ New Features
- tools.func: auto-detect binary vs armored GPG keys in setup_deb822_repo [@MickLesk](https://github.com/MickLesk) ([#11841](https://github.com/community-scripts/ProxmoxVE/pull/11841))
- core: remove old Go API and extend misc/api.func with new backend [@MickLesk](https://github.com/MickLesk) ([#11822](https://github.com/community-scripts/ProxmoxVE/pull/11822))
- #### 🔧 Refactor
- error_handler: prevent stuck 'installing' status [@MickLesk](https://github.com/MickLesk) ([#11845](https://github.com/community-scripts/ProxmoxVE/pull/11845))
### 🧰 Tools
- #### 🐞 Bug Fixes
- Tailscale: fix DNS check and keyrings directory issues [@MickLesk](https://github.com/MickLesk) ([#11837](https://github.com/community-scripts/ProxmoxVE/pull/11837))
## 2026-02-11
### 🆕 New Scripts
- Draw.io ([#11788](https://github.com/community-scripts/ProxmoxVE/pull/11788))
### 🚀 Updated Scripts
- #### 🐞 Bug Fixes
- dispatcharr: include port 9191 in success-message [@MickLesk](https://github.com/MickLesk) ([#11808](https://github.com/community-scripts/ProxmoxVE/pull/11808))
- fix: make donetick 0.1.71 compatible [@tomfrenzel](https://github.com/tomfrenzel) ([#11804](https://github.com/community-scripts/ProxmoxVE/pull/11804))
- Kasm: Support new version URL format without hash suffix [@MickLesk](https://github.com/MickLesk) ([#11787](https://github.com/community-scripts/ProxmoxVE/pull/11787))
- LibreTranslate: Remove Torch [@tremor021](https://github.com/tremor021) ([#11783](https://github.com/community-scripts/ProxmoxVE/pull/11783))
- Snowshare: fix update script [@TuroYT](https://github.com/TuroYT) ([#11726](https://github.com/community-scripts/ProxmoxVE/pull/11726))
- #### ✨ New Features
- [Feature] OpenCloud: support PosixFS Collaborative Mode [@vhsdream](https://github.com/vhsdream) ([#11806](https://github.com/community-scripts/ProxmoxVE/pull/11806))
### 💾 Core
- #### 🔧 Refactor
- core: respect EDITOR variable for config editing [@ls-root](https://github.com/ls-root) ([#11693](https://github.com/community-scripts/ProxmoxVE/pull/11693))
### 📚 Documentation
- Fix formatting in kutt.json notes section [@tiagodenoronha](https://github.com/tiagodenoronha) ([#11774](https://github.com/community-scripts/ProxmoxVE/pull/11774))
## 2026-02-10 ## 2026-02-10
### 🚀 Updated Scripts
- #### 🐞 Bug Fixes
- Immich: Pin version to 2.5.6 [@vhsdream](https://github.com/vhsdream) ([#11775](https://github.com/community-scripts/ProxmoxVE/pull/11775))
- Libretranslate: Fix setuptools [@tremor021](https://github.com/tremor021) ([#11772](https://github.com/community-scripts/ProxmoxVE/pull/11772))
- Element Synapse: prevent systemd invoke failure during apt install [@MickLesk](https://github.com/MickLesk) ([#11758](https://github.com/community-scripts/ProxmoxVE/pull/11758))
- #### ✨ New Features
- Refactor: Slskd & Soularr [@vhsdream](https://github.com/vhsdream) ([#11674](https://github.com/community-scripts/ProxmoxVE/pull/11674))
### 🗑️ Deleted Scripts ### 🗑️ Deleted Scripts
- move paperless-exporter from LXC to addon ([#11737](https://github.com/community-scripts/ProxmoxVE/pull/11737)) - paperless-exporter ([#11737](https://github.com/community-scripts/ProxmoxVE/pull/11737))
### 🧰 Tools
- #### 🐞 Bug Fixes
- feat: improve storage parsing & add guestname [@carlosmaroot](https://github.com/carlosmaroot) ([#11752](https://github.com/community-scripts/ProxmoxVE/pull/11752))
### 📂 Github
- Github-Version Workflow: include addon scripts in extraction [@MickLesk](https://github.com/MickLesk) ([#11757](https://github.com/community-scripts/ProxmoxVE/pull/11757))
### 🌐 Website
- #### 📝 Script Information
- Snowshare: fix typo in config file path on website [@BirdMakingStuff](https://github.com/BirdMakingStuff) ([#11754](https://github.com/community-scripts/ProxmoxVE/pull/11754))
## 2026-02-09 ## 2026-02-09

5
api/.env.example Normal file
View File

@@ -0,0 +1,5 @@
MONGO_USER=
MONGO_PASSWORD=
MONGO_IP=
MONGO_PORT=
MONGO_DATABASE=

23
api/go.mod Normal file
View File

@@ -0,0 +1,23 @@
module proxmox-api
go 1.24.0
require (
github.com/gorilla/mux v1.8.1
github.com/joho/godotenv v1.5.1
github.com/rs/cors v1.11.1
go.mongodb.org/mongo-driver v1.17.2
)
require (
github.com/golang/snappy v0.0.4 // indirect
github.com/klauspost/compress v1.16.7 // indirect
github.com/montanaflynn/stats v0.7.1 // indirect
github.com/xdg-go/pbkdf2 v1.0.0 // indirect
github.com/xdg-go/scram v1.1.2 // indirect
github.com/xdg-go/stringprep v1.0.4 // indirect
github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78 // indirect
golang.org/x/crypto v0.45.0 // indirect
golang.org/x/sync v0.18.0 // indirect
golang.org/x/text v0.31.0 // indirect
)

56
api/go.sum Normal file
View File

@@ -0,0 +1,56 @@
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY=
github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ=
github.com/joho/godotenv v1.5.1 h1:7eLL/+HRGLY0ldzfGMeQkb7vMd0as4CfYvUVzLqw0N0=
github.com/joho/godotenv v1.5.1/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4=
github.com/klauspost/compress v1.16.7 h1:2mk3MPGNzKyxErAw8YaohYh69+pa4sIQSC0fPGCFR9I=
github.com/klauspost/compress v1.16.7/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
github.com/montanaflynn/stats v0.7.1 h1:etflOAAHORrCC44V+aR6Ftzort912ZU+YLiSTuV8eaE=
github.com/montanaflynn/stats v0.7.1/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow=
github.com/rs/cors v1.11.1 h1:eU3gRzXLRK57F5rKMGMZURNdIG4EoAmX8k94r9wXWHA=
github.com/rs/cors v1.11.1/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU=
github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c=
github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI=
github.com/xdg-go/scram v1.1.2 h1:FHX5I5B4i4hKRVRBCFRxq1iQRej7WO3hhBuJf+UUySY=
github.com/xdg-go/scram v1.1.2/go.mod h1:RT/sEzTbU5y00aCK8UOx6R7YryM0iF1N2MOmC3kKLN4=
github.com/xdg-go/stringprep v1.0.4 h1:XLI/Ng3O1Atzq0oBs3TWm+5ZVgkq2aqdlvP9JtoZ6c8=
github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM=
github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78 h1:ilQV1hzziu+LLM3zUTJ0trRztfwgjqKnBWNtSRkbmwM=
github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78/go.mod h1:aL8wCCfTfSfmXjznFBSZNN13rSJjlIOI1fUNAtF7rmI=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
go.mongodb.org/mongo-driver v1.17.2 h1:gvZyk8352qSfzyZ2UMWcpDpMSGEr1eqE4T793SqyhzM=
go.mongodb.org/mongo-driver v1.17.2/go.mod h1:Hy04i7O2kC4RS06ZrhPRqj/u4DTYkFDAAccj+rVKqgQ=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q=
golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I=
golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM=
golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=

450
api/main.go Normal file
View File

@@ -0,0 +1,450 @@
// Copyright (c) 2021-2026 community-scripts ORG
// Author: Michel Roegl-Brunner (michelroegl-brunner)
// License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
package main
import (
"context"
"encoding/json"
"fmt"
"log"
"net/http"
"os"
"strconv"
"time"
"github.com/gorilla/mux"
"github.com/joho/godotenv"
"github.com/rs/cors"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/bson/primitive"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/options"
)
var client *mongo.Client
var collection *mongo.Collection
func loadEnv() {
if err := godotenv.Load(); err != nil {
log.Fatal("Error loading .env file")
}
}
// DataModel represents a single document in MongoDB
type DataModel struct {
ID primitive.ObjectID `json:"id" bson:"_id,omitempty"`
CT_TYPE uint `json:"ct_type" bson:"ct_type"`
DISK_SIZE float32 `json:"disk_size" bson:"disk_size"`
CORE_COUNT uint `json:"core_count" bson:"core_count"`
RAM_SIZE uint `json:"ram_size" bson:"ram_size"`
OS_TYPE string `json:"os_type" bson:"os_type"`
OS_VERSION string `json:"os_version" bson:"os_version"`
DISABLEIP6 string `json:"disableip6" bson:"disableip6"`
NSAPP string `json:"nsapp" bson:"nsapp"`
METHOD string `json:"method" bson:"method"`
CreatedAt time.Time `json:"created_at" bson:"created_at"`
PVEVERSION string `json:"pve_version" bson:"pve_version"`
STATUS string `json:"status" bson:"status"`
RANDOM_ID string `json:"random_id" bson:"random_id"`
TYPE string `json:"type" bson:"type"`
ERROR string `json:"error" bson:"error"`
}
type StatusModel struct {
RANDOM_ID string `json:"random_id" bson:"random_id"`
ERROR string `json:"error" bson:"error"`
STATUS string `json:"status" bson:"status"`
}
type CountResponse struct {
TotalEntries int64 `json:"total_entries"`
StatusCount map[string]int64 `json:"status_count"`
NSAPPCount map[string]int64 `json:"nsapp_count"`
}
// ConnectDatabase initializes the MongoDB connection
func ConnectDatabase() {
loadEnv()
mongoURI := fmt.Sprintf("mongodb://%s:%s@%s:%s",
os.Getenv("MONGO_USER"),
os.Getenv("MONGO_PASSWORD"),
os.Getenv("MONGO_IP"),
os.Getenv("MONGO_PORT"))
database := os.Getenv("MONGO_DATABASE")
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
var err error
client, err = mongo.Connect(ctx, options.Client().ApplyURI(mongoURI))
if err != nil {
log.Fatal("Failed to connect to MongoDB!", err)
}
collection = client.Database(database).Collection("data_models")
fmt.Println("Connected to MongoDB on 10.10.10.18")
}
// UploadJSON handles API requests and stores data as a document in MongoDB
func UploadJSON(w http.ResponseWriter, r *http.Request) {
var input DataModel
if err := json.NewDecoder(r.Body).Decode(&input); err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
input.CreatedAt = time.Now()
_, err := collection.InsertOne(context.Background(), input)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
log.Println("Received data:", input)
w.WriteHeader(http.StatusCreated)
json.NewEncoder(w).Encode(map[string]string{"message": "Data saved successfully"})
}
// UpdateStatus updates the status of a record based on RANDOM_ID
func UpdateStatus(w http.ResponseWriter, r *http.Request) {
var input StatusModel
if err := json.NewDecoder(r.Body).Decode(&input); err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
filter := bson.M{"random_id": input.RANDOM_ID}
update := bson.M{"$set": bson.M{"status": input.STATUS, "error": input.ERROR}}
_, err := collection.UpdateOne(context.Background(), filter, update)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
log.Println("Updated data:", input)
w.WriteHeader(http.StatusOK)
json.NewEncoder(w).Encode(map[string]string{"message": "Record updated successfully"})
}
// GetDataJSON fetches all data from MongoDB
func GetDataJSON(w http.ResponseWriter, r *http.Request) {
var records []DataModel
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
cursor, err := collection.Find(ctx, bson.M{})
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
defer cursor.Close(ctx)
for cursor.Next(ctx) {
var record DataModel
if err := cursor.Decode(&record); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
records = append(records, record)
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(records)
}
func GetPaginatedData(w http.ResponseWriter, r *http.Request) {
page, _ := strconv.Atoi(r.URL.Query().Get("page"))
limit, _ := strconv.Atoi(r.URL.Query().Get("limit"))
if page < 1 {
page = 1
}
if limit < 1 {
limit = 10
}
skip := (page - 1) * limit
var records []DataModel
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
options := options.Find().SetSkip(int64(skip)).SetLimit(int64(limit))
cursor, err := collection.Find(ctx, bson.M{}, options)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
defer cursor.Close(ctx)
for cursor.Next(ctx) {
var record DataModel
if err := cursor.Decode(&record); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
records = append(records, record)
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(records)
}
func GetSummary(w http.ResponseWriter, r *http.Request) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
totalCount, err := collection.CountDocuments(ctx, bson.M{})
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
statusCount := make(map[string]int64)
nsappCount := make(map[string]int64)
pipeline := []bson.M{
{"$group": bson.M{"_id": "$status", "count": bson.M{"$sum": 1}}},
}
cursor, err := collection.Aggregate(ctx, pipeline)
if err == nil {
for cursor.Next(ctx) {
var result struct {
ID string `bson:"_id"`
Count int64 `bson:"count"`
}
if err := cursor.Decode(&result); err == nil {
statusCount[result.ID] = result.Count
}
}
}
pipeline = []bson.M{
{"$group": bson.M{"_id": "$nsapp", "count": bson.M{"$sum": 1}}},
}
cursor, err = collection.Aggregate(ctx, pipeline)
if err == nil {
for cursor.Next(ctx) {
var result struct {
ID string `bson:"_id"`
Count int64 `bson:"count"`
}
if err := cursor.Decode(&result); err == nil {
nsappCount[result.ID] = result.Count
}
}
}
response := CountResponse{
TotalEntries: totalCount,
StatusCount: statusCount,
NSAPPCount: nsappCount,
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(response)
}
func GetByNsapp(w http.ResponseWriter, r *http.Request) {
nsapp := r.URL.Query().Get("nsapp")
var records []DataModel
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
cursor, err := collection.Find(ctx, bson.M{"nsapp": nsapp})
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
defer cursor.Close(ctx)
for cursor.Next(ctx) {
var record DataModel
if err := cursor.Decode(&record); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
records = append(records, record)
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(records)
}
func GetByDateRange(w http.ResponseWriter, r *http.Request) {
startDate := r.URL.Query().Get("start_date")
endDate := r.URL.Query().Get("end_date")
if startDate == "" || endDate == "" {
http.Error(w, "Both start_date and end_date are required", http.StatusBadRequest)
return
}
start, err := time.Parse("2006-01-02T15:04:05.999999+00:00", startDate+"T00:00:00+00:00")
if err != nil {
http.Error(w, "Invalid start_date format", http.StatusBadRequest)
return
}
end, err := time.Parse("2006-01-02T15:04:05.999999+00:00", endDate+"T23:59:59+00:00")
if err != nil {
http.Error(w, "Invalid end_date format", http.StatusBadRequest)
return
}
var records []DataModel
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
cursor, err := collection.Find(ctx, bson.M{
"created_at": bson.M{
"$gte": start,
"$lte": end,
},
})
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
defer cursor.Close(ctx)
for cursor.Next(ctx) {
var record DataModel
if err := cursor.Decode(&record); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
records = append(records, record)
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(records)
}
func GetByStatus(w http.ResponseWriter, r *http.Request) {
status := r.URL.Query().Get("status")
var records []DataModel
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
cursor, err := collection.Find(ctx, bson.M{"status": status})
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
defer cursor.Close(ctx)
for cursor.Next(ctx) {
var record DataModel
if err := cursor.Decode(&record); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
records = append(records, record)
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(records)
}
func GetByOS(w http.ResponseWriter, r *http.Request) {
osType := r.URL.Query().Get("os_type")
osVersion := r.URL.Query().Get("os_version")
var records []DataModel
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
cursor, err := collection.Find(ctx, bson.M{"os_type": osType, "os_version": osVersion})
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
defer cursor.Close(ctx)
for cursor.Next(ctx) {
var record DataModel
if err := cursor.Decode(&record); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
records = append(records, record)
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(records)
}
func GetErrors(w http.ResponseWriter, r *http.Request) {
errorCount := make(map[string]int)
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
cursor, err := collection.Find(ctx, bson.M{"error": bson.M{"$ne": ""}})
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
defer cursor.Close(ctx)
for cursor.Next(ctx) {
var record DataModel
if err := cursor.Decode(&record); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
if record.ERROR != "" {
errorCount[record.ERROR]++
}
}
type ErrorCountResponse struct {
Error string `json:"error"`
Count int `json:"count"`
}
var errorCounts []ErrorCountResponse
for err, count := range errorCount {
errorCounts = append(errorCounts, ErrorCountResponse{
Error: err,
Count: count,
})
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(struct {
ErrorCounts []ErrorCountResponse `json:"error_counts"`
}{
ErrorCounts: errorCounts,
})
}
func main() {
ConnectDatabase()
router := mux.NewRouter()
router.HandleFunc("/upload", UploadJSON).Methods("POST")
router.HandleFunc("/upload/updatestatus", UpdateStatus).Methods("POST")
router.HandleFunc("/data/json", GetDataJSON).Methods("GET")
router.HandleFunc("/data/paginated", GetPaginatedData).Methods("GET")
router.HandleFunc("/data/summary", GetSummary).Methods("GET")
router.HandleFunc("/data/nsapp", GetByNsapp).Methods("GET")
router.HandleFunc("/data/date", GetByDateRange).Methods("GET")
router.HandleFunc("/data/status", GetByStatus).Methods("GET")
router.HandleFunc("/data/os", GetByOS).Methods("GET")
router.HandleFunc("/data/errors", GetErrors).Methods("GET")
c := cors.New(cors.Options{
AllowedOrigins: []string{"*"},
AllowedMethods: []string{"GET", "POST"},
AllowedHeaders: []string{"Content-Type", "Authorization"},
AllowCredentials: true,
})
handler := c.Handler(router)
fmt.Println("Server running on port 8080")
log.Fatal(http.ListenAndServe(":8080", handler))
}

View File

@@ -9,7 +9,7 @@ APP="Alpine-Grafana"
var_tags="${var_tags:-alpine;monitoring}" var_tags="${var_tags:-alpine;monitoring}"
var_cpu="${var_cpu:-1}" var_cpu="${var_cpu:-1}"
var_ram="${var_ram:-256}" var_ram="${var_ram:-256}"
var_disk="${var_disk:-2}" var_disk="${var_disk:-1}"
var_os="${var_os:-alpine}" var_os="${var_os:-alpine}"
var_version="${var_version:-3.23}" var_version="${var_version:-3.23}"
var_unprivileged="${var_unprivileged:-1}" var_unprivileged="${var_unprivileged:-1}"

View File

@@ -28,7 +28,6 @@ function update_script() {
exit exit
fi fi
msg_info "Updating Deluge" msg_info "Updating Deluge"
ensure_dependencies python3-setuptools
$STD apt update $STD apt update
$STD pip3 install deluge[all] --upgrade $STD pip3 install deluge[all] --upgrade
msg_ok "Updated Deluge" msg_ok "Updated Deluge"

View File

@@ -104,7 +104,7 @@ function update_script() {
cd /opt/dispatcharr cd /opt/dispatcharr
rm -rf .venv rm -rf .venv
$STD uv venv --clear $STD uv venv --clear
$STD uv sync $STD uv pip install -r requirements.txt --index-strategy unsafe-best-match
$STD uv pip install gunicorn gevent celery redis daphne $STD uv pip install gunicorn gevent celery redis daphne
msg_ok "Updated Dispatcharr Backend" msg_ok "Updated Dispatcharr Backend"
@@ -144,4 +144,4 @@ description
msg_ok "Completed successfully!\n" msg_ok "Completed successfully!\n"
echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}" echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}"
echo -e "${INFO}${YW} Access it using the following URL:${CL}" echo -e "${INFO}${YW} Access it using the following URL:${CL}"
echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:9191${CL}" echo -e "${TAB}${GATEWAY}${BGN}http://${IP}${CL}"

View File

@@ -35,15 +35,13 @@ function update_script() {
msg_ok "Stopped Service" msg_ok "Stopped Service"
msg_info "Backing Up Configurations" msg_info "Backing Up Configurations"
mv /opt/donetick/config/selfhosted.yaml /opt/donetick/donetick.db /opt mv /opt/donetick/config/selfhosted.yml /opt/donetick/donetick.db /opt
msg_ok "Backed Up Configurations" msg_ok "Backed Up Configurations"
CLEAN_INSTALL=1 fetch_and_deploy_gh_release "donetick" "donetick/donetick" "prebuild" "latest" "/opt/donetick" "donetick_Linux_x86_64.tar.gz" CLEAN_INSTALL=1 fetch_and_deploy_gh_release "donetick" "donetick/donetick" "prebuild" "latest" "/opt/donetick" "donetick_Linux_x86_64.tar.gz"
msg_info "Restoring Configurations" msg_info "Restoring Configurations"
mv /opt/selfhosted.yaml /opt/donetick/config mv /opt/selfhosted.yml /opt/donetick/config
grep -q 'http://localhost"$' /opt/donetick/config/selfhosted.yaml || sed -i '/https:\/\/localhost"$/a\ - "http://localhost"' /opt/donetick/config/selfhosted.yaml
grep -q 'capacitor://localhost' /opt/donetick/config/selfhosted.yaml || sed -i '/http:\/\/localhost"$/a\ - "capacitor://localhost"' /opt/donetick/config/selfhosted.yaml
mv /opt/donetick.db /opt/donetick mv /opt/donetick.db /opt/donetick
msg_ok "Restored Configurations" msg_ok "Restored Configurations"

View File

@@ -1,58 +0,0 @@
#!/usr/bin/env bash
source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/build.func)
# Copyright (c) 2021-2026 community-scripts ORG
# Author: Slaviša Arežina (tremor021)
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
# Source: https://www.drawio.com/
APP="DrawIO"
var_tags="${var_tags:-diagrams}"
var_cpu="${var_cpu:-1}"
var_ram="${var_ram:-2048}"
var_disk="${var_disk:-4}"
var_os="${var_os:-debian}"
var_version="${var_version:-13}"
var_unprivileged="${var_unprivileged:-1}"
header_info "$APP"
variables
color
catch_errors
function update_script() {
header_info
check_container_storage
check_container_resources
if [[ ! -f /var/lib/tomcat11/webapps/draw.war ]]; then
msg_error "No ${APP} Installation Found!"
exit
fi
if check_for_gh_release "drawio" "jgraph/drawio"; then
msg_info "Stopping service"
systemctl stop tomcat11
msg_ok "Service stopped"
msg_info "Updating Debian LXC"
$STD apt update
$STD apt upgrade -y
msg_ok "Updated Debian LXC"
USE_ORIGINAL_FILENAME=true fetch_and_deploy_gh_release "drawio" "jgraph/drawio" "singlefile" "latest" "/var/lib/tomcat11/webapps" "draw.war"
msg_info "Starting service"
systemctl start tomcat11
msg_ok "Service started"
msg_ok "Updated successfully!"
fi
exit
}
start
build_container
description
msg_ok "Completed Successfully!\n"
echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}"
echo -e "${INFO}${YW} Access it using the following URL:${CL}"
echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:8080/draw${CL}"

View File

@@ -9,7 +9,7 @@ APP="EMQX"
var_tags="${var_tags:-mqtt}" var_tags="${var_tags:-mqtt}"
var_cpu="${var_cpu:-2}" var_cpu="${var_cpu:-2}"
var_ram="${var_ram:-1024}" var_ram="${var_ram:-1024}"
var_disk="${var_disk:-6}" var_disk="${var_disk:-4}"
var_os="${var_os:-debian}" var_os="${var_os:-debian}"
var_version="${var_version:-13}" var_version="${var_version:-13}"
var_unprivileged="${var_unprivileged:-1}" var_unprivileged="${var_unprivileged:-1}"

View File

@@ -9,7 +9,7 @@ APP="Grafana"
var_tags="${var_tags:-monitoring;visualization}" var_tags="${var_tags:-monitoring;visualization}"
var_cpu="${var_cpu:-1}" var_cpu="${var_cpu:-1}"
var_ram="${var_ram:-512}" var_ram="${var_ram:-512}"
var_disk="${var_disk:-4}" var_disk="${var_disk:-2}"
var_os="${var_os:-debian}" var_os="${var_os:-debian}"
var_version="${var_version:-13}" var_version="${var_version:-13}"
var_unprivileged="${var_unprivileged:-1}" var_unprivileged="${var_unprivileged:-1}"

View File

@@ -1,6 +0,0 @@
____ ________
/ __ \_________ __ __/ _/ __ \
/ / / / ___/ __ `/ | /| / // // / / /
/ /_/ / / / /_/ /| |/ |/ // // /_/ /
/_____/_/ \__,_/ |__/|__/___/\____/

View File

@@ -105,7 +105,7 @@ EOF
msg_ok "Image-processing libraries up to date" msg_ok "Image-processing libraries up to date"
fi fi
RELEASE="2.5.6" RELEASE="2.5.5"
if check_for_gh_release "Immich" "immich-app/immich" "${RELEASE}"; then if check_for_gh_release "Immich" "immich-app/immich" "${RELEASE}"; then
if [[ $(cat ~/.immich) > "2.5.1" ]]; then if [[ $(cat ~/.immich) > "2.5.1" ]]; then
msg_info "Enabling Maintenance Mode" msg_info "Enabling Maintenance Mode"

View File

@@ -46,7 +46,7 @@ function update_script() {
msg_info "Restoring configuration & data" msg_info "Restoring configuration & data"
mv /opt/app.env /opt/jotty/.env mv /opt/app.env /opt/jotty/.env
[[ -d /opt/data ]] && mv /opt/data /opt/jotty/data [[ -d /opt/data ]] && mv /opt/data /opt/jotty/data
[[ -d /opt/jotty/config ]] && cp -a /opt/config/* /opt/jotty/config && rm -rf /opt/config [[ -d /opt/jotty/config ]] && mv /opt/config/* /opt/jotty/config
msg_ok "Restored configuration & data" msg_ok "Restored configuration & data"
msg_info "Starting Service" msg_info "Starting Service"

View File

@@ -34,19 +34,10 @@ function update_script() {
CURRENT_VERSION=$(readlink -f /opt/kasm/current | awk -F'/' '{print $4}') CURRENT_VERSION=$(readlink -f /opt/kasm/current | awk -F'/' '{print $4}')
KASM_URL=$(curl -fsSL "https://www.kasm.com/downloads" | tr '\n' ' ' | grep -oE 'https://kasm-static-content[^"]*kasm_release_[0-9]+\.[0-9]+\.[0-9]+\.[a-z0-9]+\.tar\.gz' | head -n 1) KASM_URL=$(curl -fsSL "https://www.kasm.com/downloads" | tr '\n' ' ' | grep -oE 'https://kasm-static-content[^"]*kasm_release_[0-9]+\.[0-9]+\.[0-9]+\.[a-z0-9]+\.tar\.gz' | head -n 1)
if [[ -z "$KASM_URL" ]]; then if [[ -z "$KASM_URL" ]]; then
SERVICE_IMAGE_URL=$(curl -fsSL "https://www.kasm.com/downloads" | tr '\n' ' ' | grep -oE 'https://kasm-static-content[^"]*kasm_release_service_images_amd64_[0-9]+\.[0-9]+\.[0-9]+\.tar\.gz' | head -n 1)
if [[ -n "$SERVICE_IMAGE_URL" ]]; then
KASM_VERSION=$(echo "$SERVICE_IMAGE_URL" | sed -E 's/.*kasm_release_service_images_amd64_([0-9]+\.[0-9]+\.[0-9]+).*/\1/')
KASM_URL="https://kasm-static-content.s3.amazonaws.com/kasm_release_${KASM_VERSION}.tar.gz"
fi
else
KASM_VERSION=$(echo "$KASM_URL" | sed -E 's/.*kasm_release_([0-9]+\.[0-9]+\.[0-9]+).*/\1/')
fi
if [[ -z "$KASM_URL" ]] || [[ -z "$KASM_VERSION" ]]; then
msg_error "Unable to detect latest Kasm release URL." msg_error "Unable to detect latest Kasm release URL."
exit 1 exit 1
fi fi
KASM_VERSION=$(echo "$KASM_URL" | sed -E 's/.*kasm_release_([0-9]+\.[0-9]+\.[0-9]+).*/\1/')
msg_info "Checked for new version" msg_info "Checked for new version"
msg_info "Removing outdated docker-compose plugin" msg_info "Removing outdated docker-compose plugin"

View File

@@ -30,7 +30,7 @@ function update_script() {
fi fi
RELEASE="v5.0.2" RELEASE="v5.0.2"
if check_for_gh_release "OpenCloud" "opencloud-eu/opencloud" "${RELEASE}"; then if check_for_gh_release "opencloud" "opencloud-eu/opencloud" "${RELEASE}"; then
msg_info "Stopping services" msg_info "Stopping services"
systemctl stop opencloud opencloud-wopi systemctl stop opencloud opencloud-wopi
msg_ok "Stopped services" msg_ok "Stopped services"
@@ -38,21 +38,9 @@ function update_script() {
msg_info "Updating packages" msg_info "Updating packages"
$STD apt-get update $STD apt-get update
$STD apt-get dist-upgrade -y $STD apt-get dist-upgrade -y
ensure_dependencies "inotify-tools"
msg_ok "Updated packages" msg_ok "Updated packages"
CLEAN_INSTALL=1 fetch_and_deploy_gh_release "OpenCloud" "opencloud-eu/opencloud" "singlefile" "${RELEASE}" "/usr/bin" "opencloud-*-linux-amd64" CLEAN_INSTALL=1 fetch_and_deploy_gh_release "opencloud" "opencloud-eu/opencloud" "singlefile" "${RELEASE}" "/usr/bin" "opencloud-*-linux-amd64"
if ! grep -q 'POSIX_WATCH' /etc/opencloud/opencloud.env; then
sed -i '/^## External/i ## Uncomment below to enable PosixFS Collaborative Mode\
## Increase inotify watch/instance limits on your PVE host:\
### sysctl -w fs.inotify.max_user_watches=1048576\
### sysctl -w fs.inotify.max_user_instances=1024\
# STORAGE_USERS_POSIX_ENABLE_COLLABORATION=true\
# STORAGE_USERS_POSIX_WATCH_TYPE=inotifywait\
# STORAGE_USERS_POSIX_WATCH_FS=true\
# STORAGE_USERS_POSIX_WATCH_PATH=<path-to-storage-or-bind-mount>' /etc/opencloud/opencloud.env
fi
msg_info "Starting services" msg_info "Starting services"
systemctl start opencloud opencloud-wopi systemctl start opencloud opencloud-wopi

View File

@@ -44,7 +44,7 @@ function update_script() {
msg_info "Installing uv-based Open-WebUI" msg_info "Installing uv-based Open-WebUI"
PYTHON_VERSION="3.12" setup_uv PYTHON_VERSION="3.12" setup_uv
$STD uv tool install --python 3.12 --constraint <(echo "numba>=0.60") open-webui[all] $STD uv tool install --python 3.12 open-webui[all]
msg_ok "Installed uv-based Open-WebUI" msg_ok "Installed uv-based Open-WebUI"
msg_info "Restoring data" msg_info "Restoring data"
@@ -126,7 +126,7 @@ EOF
msg_info "Updating Open WebUI via uv" msg_info "Updating Open WebUI via uv"
PYTHON_VERSION="3.12" setup_uv PYTHON_VERSION="3.12" setup_uv
$STD uv tool install --force --python 3.12 --constraint <(echo "numba>=0.60") open-webui[all] $STD uv tool upgrade --python 3.12 open-webui[all]
systemctl restart open-webui systemctl restart open-webui
msg_ok "Updated Open WebUI" msg_ok "Updated Open WebUI"
msg_ok "Updated successfully!" msg_ok "Updated successfully!"

View File

@@ -51,7 +51,7 @@ function update_script() {
$STD npm run db:generate $STD npm run db:generate
$STD npm run build $STD npm run build
$STD npm run build:cli $STD npm run build:cli
$STD npm run db:push $STD npm run db:sqlite:push
cp -R .next/standalone ./ cp -R .next/standalone ./
chmod +x ./dist/cli.mjs chmod +x ./dist/cli.mjs
cp server/db/names.json ./dist/names.json cp server/db/names.json ./dist/names.json

View File

@@ -61,12 +61,6 @@ function update_script() {
rm -rf "$BK" rm -rf "$BK"
msg_ok "Restored data" msg_ok "Restored data"
msg_ok "Migrate Database"
cd /opt/planka
$STD npm run db:upgrade
$STD npm run db:migrate
msg_ok "Migrated Database"
msg_info "Starting Service" msg_info "Starting Service"
systemctl start planka systemctl start planka
msg_ok "Started Service" msg_ok "Started Service"

View File

@@ -28,55 +28,16 @@ function update_script() {
exit exit
fi fi
if check_for_gh_release "Radicale" "Kozea/Radicale"; then msg_info "Updating ${APP}"
msg_info "Stopping service" $STD python3 -m venv /opt/radicale
systemctl stop radicale source /opt/radicale/bin/activate
msg_ok "Stopped service" $STD python3 -m pip install --upgrade https://github.com/Kozea/Radicale/archive/master.tar.gz
msg_ok "Updated ${APP}"
msg_info "Backing up users file" msg_info "Starting Service"
cp /opt/radicale/users /opt/radicale_users_backup systemctl enable -q --now radicale
msg_ok "Backed up users file" msg_ok "Started Service"
msg_ok "Updated successfully!"
PYTHON_VERSION="3.13" setup_uv
CLEAN_INSTALL=1 fetch_and_deploy_gh_release "Radicale" "Kozea/Radicale" "tarball" "latest" "/opt/radicale"
msg_info "Restoring users file"
rm -f /opt/radicale/users
mv /opt/radicale_users_backup /opt/radicale/users
msg_ok "Restored users file"
if grep -q 'start.sh' /etc/systemd/system/radicale.service; then
sed -i -e '/^Description/i[Unit]' \
-e '\|^ExecStart|iWorkingDirectory=/opt/radicale' \
-e 's|^ExecStart=.*|ExecStart=/usr/local/bin/uv run -m radicale --config /etc/radicale/config|' /etc/systemd/system/radicale.service
systemctl daemon-reload
fi
if [[ ! -f /etc/radicale/config ]]; then
msg_info "Migrating to config file (/etc/radicale/config)"
mkdir -p /etc/radicale
cat <<EOF >/etc/radicale/config
[server]
hosts = 0.0.0.0:5232
[auth]
type = htpasswd
htpasswd_filename = /opt/radicale/users
htpasswd_encryption = sha512
[storage]
type = multifilesystem
filesystem_folder = /var/lib/radicale/collections
[web]
type = internal
EOF
msg_ok "Migrated to config (/etc/radicale/config)"
fi
msg_info "Starting service"
systemctl start radicale
msg_ok "Started service"
msg_ok "Updated Successfully!"
fi
exit exit
} }

View File

@@ -3,7 +3,7 @@ source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxV
# Copyright (c) 2021-2026 community-scripts ORG # Copyright (c) 2021-2026 community-scripts ORG
# Author: vhsdream # Author: vhsdream
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE # License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
# Source: https://github.com/slskd/slskd, https://github.com/mrusse/soularr # Source: https://github.com/slskd/slskd, https://soularr.net
APP="slskd" APP="slskd"
var_tags="${var_tags:-arr;p2p}" var_tags="${var_tags:-arr;p2p}"
@@ -24,65 +24,50 @@ function update_script() {
check_container_storage check_container_storage
check_container_resources check_container_resources
if [[ ! -d /opt/slskd ]]; then if [[ ! -d /opt/slskd ]] || [[ ! -d /opt/soularr ]]; then
msg_error "No Slskd Installation Found!" msg_error "No ${APP} Installation Found!"
exit exit
fi fi
if check_for_gh_release "Slskd" "slskd/slskd"; then RELEASE=$(curl -s https://api.github.com/repos/slskd/slskd/releases/latest | grep "tag_name" | awk '{print substr($2, 2, length($2)-3) }')
msg_info "Stopping Service(s)" if [[ "${RELEASE}" != "$(cat /opt/${APP}_version.txt)" ]] || [[ ! -f /opt/${APP}_version.txt ]]; then
systemctl stop slskd msg_info "Stopping Service"
[[ -f /etc/systemd/system/soularr.service ]] && systemctl stop soularr.timer soularr.service systemctl stop slskd soularr.timer soularr.service
msg_ok "Stopped Service(s)" msg_info "Stopped Service"
msg_info "Backing up config" msg_info "Updating $APP to v${RELEASE}"
cp /opt/slskd/config/slskd.yml /opt/slskd.yml.bak tmp_file=$(mktemp)
msg_ok "Backed up config" curl -fsSL "https://github.com/slskd/slskd/releases/download/${RELEASE}/slskd-${RELEASE}-linux-x64.zip" -o $tmp_file
$STD unzip -oj $tmp_file slskd -d /opt/${APP}
echo "${RELEASE}" >/opt/${APP}_version.txt
msg_ok "Updated $APP to v${RELEASE}"
CLEAN_INSTALL=1 fetch_and_deploy_gh_release "Slskd" "slskd/slskd" "prebuild" "latest" "/opt/slskd" "slskd-*-linux-x64.zip" msg_info "Starting Service"
msg_info "Restoring config"
mv /opt/slskd.yml.bak /opt/slskd/config/slskd.yml
msg_ok "Restored config"
msg_info "Starting Service(s)"
systemctl start slskd systemctl start slskd
[[ -f /etc/systemd/system/soularr.service ]] && systemctl start soularr.timer msg_ok "Started Service"
msg_ok "Started Service(s)" rm -rf $tmp_file
msg_ok "Updated Slskd successfully!" else
msg_ok "No ${APP} update required. ${APP} is already at v${RELEASE}"
fi fi
[[ -d /opt/soularr ]] && if check_for_gh_release "Soularr" "mrusse/soularr"; then msg_info "Updating Soularr"
if systemctl is-active soularr.timer >/dev/null; then cp /opt/soularr/config.ini /opt/config.ini.bak
msg_info "Stopping Timer and Service" cp /opt/soularr/run.sh /opt/run.sh.bak
systemctl stop soularr.timer soularr.service cd /tmp
msg_ok "Stopped Timer and Service" rm -rf /opt/soularr
fi curl -fsSL -o main.zip https://github.com/mrusse/soularr/archive/refs/heads/main.zip
$STD unzip main.zip
mv soularr-main /opt/soularr
cd /opt/soularr
$STD pip install -r requirements.txt
mv /opt/config.ini.bak /opt/soularr/config.ini
mv /opt/run.sh.bak /opt/soularr/run.sh
rm -rf /tmp/main.zip
msg_ok "Updated soularr"
msg_info "Backing up Soularr config" msg_info "Starting soularr timer"
cp /opt/soularr/config.ini /opt/soularr_config.ini.bak systemctl start soularr.timer
cp /opt/soularr/run.sh /opt/soularr_run.sh.bak msg_ok "Started soularr timer"
msg_ok "Backed up Soularr config" exit
PYTHON_VERSION="3.11" setup_uv
CLEAN_INSTALL=1 fetch_and_deploy_gh_release "Soularr" "mrusse/soularr" "tarball" "latest" "/opt/soularr"
msg_info "Updating Soularr"
cd /opt/soularr
$STD uv venv -c venv
$STD source venv/bin/activate
$STD uv pip install -r requirements.txt
deactivate
msg_ok "Updated Soularr"
msg_info "Restoring Soularr config"
mv /opt/soularr_config.ini.bak /opt/soularr/config.ini
mv /opt/soularr_run.sh.bak /opt/soularr/run.sh
msg_ok "Restored Soularr config"
msg_info "Starting Soularr Timer"
systemctl restart soularr.timer
msg_ok "Started Soularr Timer"
msg_ok "Updated Soularr successfully!"
fi
} }
start start

View File

@@ -33,15 +33,7 @@ function update_script() {
systemctl stop snowshare systemctl stop snowshare
msg_ok "Stopped Service" msg_ok "Stopped Service"
msg_info "Backing up uploads" fetch_and_deploy_gh_release "snowshare" "TuroYT/snowshare" "tarball"
[ -d /opt/snowshare/uploads ] && cp -a /opt/snowshare/uploads /opt/.snowshare_uploads_backup
msg_ok "Uploads backed up"
CLEAN_INSTALL=1 fetch_and_deploy_gh_release "snowshare" "TuroYT/snowshare" "tarball"
msg_info "Restoring uploads"
[ -d /opt/.snowshare_uploads_backup ] && rm -rf /opt/snowshare/uploads && cp -a /opt/.snowshare_uploads_backup /opt/snowshare/uploads
msg_ok "Uploads restored"
msg_info "Updating Snowshare" msg_info "Updating Snowshare"
cd /opt/snowshare cd /opt/snowshare

View File

@@ -1,35 +0,0 @@
{
"name": "Draw.IO",
"slug": "drawio",
"categories": [
12
],
"date_created": "2026-02-11",
"type": "ct",
"updateable": true,
"privileged": false,
"interface_port": 8080,
"documentation": "https://www.drawio.com/doc/",
"website": "https://www.drawio.com/",
"logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/draw-io.webp",
"config_path": "",
"description": "draw.io is a configurable diagramming and whiteboarding application, jointly owned and developed by draw.io Ltd (previously named JGraph) and draw.io AG.",
"install_methods": [
{
"type": "default",
"script": "ct/drawio.sh",
"resources": {
"cpu": 1,
"ram": 2048,
"hdd": 4,
"os": "Debian",
"version": "13"
}
}
],
"default_credentials": {
"username": null,
"password": null
},
"notes": []
}

View File

@@ -21,7 +21,7 @@
"resources": { "resources": {
"cpu": 2, "cpu": 2,
"ram": 1024, "ram": 1024,
"hdd": 6, "hdd": 4,
"os": "debian", "os": "debian",
"version": "13" "version": "13"
} }

View File

@@ -1,5 +1,5 @@
{ {
"generated": "2026-02-13T12:11:36Z", "generated": "2026-02-10T12:19:02Z",
"versions": [ "versions": [
{ {
"slug": "2fauth", "slug": "2fauth",
@@ -15,13 +15,6 @@
"pinned": false, "pinned": false,
"date": "2025-12-08T14:34:55Z" "date": "2025-12-08T14:34:55Z"
}, },
{
"slug": "adguardhome-sync",
"repo": "bakito/adguardhome-sync",
"version": "v0.8.2",
"pinned": false,
"date": "2025-10-24T17:13:47Z"
},
{ {
"slug": "adventurelog", "slug": "adventurelog",
"repo": "seanmorley15/adventurelog", "repo": "seanmorley15/adventurelog",
@@ -193,9 +186,9 @@
{ {
"slug": "cleanuparr", "slug": "cleanuparr",
"repo": "Cleanuparr/Cleanuparr", "repo": "Cleanuparr/Cleanuparr",
"version": "v2.6.1", "version": "v2.5.1",
"pinned": false, "pinned": false,
"date": "2026-02-13T10:00:19Z" "date": "2026-01-11T00:46:17Z"
}, },
{ {
"slug": "cloudreve", "slug": "cloudreve",
@@ -207,9 +200,9 @@
{ {
"slug": "comfyui", "slug": "comfyui",
"repo": "comfyanonymous/ComfyUI", "repo": "comfyanonymous/ComfyUI",
"version": "v0.13.0", "version": "v0.12.3",
"pinned": false, "pinned": false,
"date": "2026-02-10T20:27:38Z" "date": "2026-02-05T07:04:07Z"
}, },
{ {
"slug": "commafeed", "slug": "commafeed",
@@ -242,16 +235,16 @@
{ {
"slug": "cronicle", "slug": "cronicle",
"repo": "jhuckaby/Cronicle", "repo": "jhuckaby/Cronicle",
"version": "v0.9.106", "version": "v0.9.105",
"pinned": false, "pinned": false,
"date": "2026-02-11T17:11:46Z" "date": "2026-02-05T18:16:11Z"
}, },
{ {
"slug": "cryptpad", "slug": "cryptpad",
"repo": "cryptpad/cryptpad", "repo": "cryptpad/cryptpad",
"version": "2026.2.0", "version": "2025.9.0",
"pinned": false, "pinned": false,
"date": "2026-02-11T15:39:05Z" "date": "2025-10-22T10:06:29Z"
}, },
{ {
"slug": "dawarich", "slug": "dawarich",
@@ -270,9 +263,9 @@
{ {
"slug": "dispatcharr", "slug": "dispatcharr",
"repo": "Dispatcharr/Dispatcharr", "repo": "Dispatcharr/Dispatcharr",
"version": "v0.19.0", "version": "v0.18.1",
"pinned": false, "pinned": false,
"date": "2026-02-10T21:18:10Z" "date": "2026-01-27T17:09:11Z"
}, },
{ {
"slug": "docmost", "slug": "docmost",
@@ -284,30 +277,23 @@
{ {
"slug": "domain-locker", "slug": "domain-locker",
"repo": "Lissy93/domain-locker", "repo": "Lissy93/domain-locker",
"version": "v0.1.3", "version": "v0.1.2",
"pinned": false, "pinned": false,
"date": "2026-02-11T10:03:32Z" "date": "2025-11-14T22:08:23Z"
}, },
{ {
"slug": "domain-monitor", "slug": "domain-monitor",
"repo": "Hosteroid/domain-monitor", "repo": "Hosteroid/domain-monitor",
"version": "v1.1.3", "version": "v1.1.2",
"pinned": false, "pinned": false,
"date": "2026-02-11T15:48:18Z" "date": "2026-02-09T06:29:34Z"
}, },
{ {
"slug": "donetick", "slug": "donetick",
"repo": "donetick/donetick", "repo": "donetick/donetick",
"version": "v0.1.73", "version": "v0.1.64",
"pinned": false, "pinned": false,
"date": "2026-02-12T23:42:30Z" "date": "2025-10-03T05:18:24Z"
},
{
"slug": "drawio",
"repo": "jgraph/drawio",
"version": "v29.3.6",
"pinned": false,
"date": "2026-01-28T18:25:02Z"
}, },
{ {
"slug": "duplicati", "slug": "duplicati",
@@ -333,9 +319,9 @@
{ {
"slug": "endurain", "slug": "endurain",
"repo": "endurain-project/endurain", "repo": "endurain-project/endurain",
"version": "v0.17.4", "version": "v0.17.3",
"pinned": false, "pinned": false,
"date": "2026-02-11T04:54:22Z" "date": "2026-01-23T22:02:05Z"
}, },
{ {
"slug": "ersatztv", "slug": "ersatztv",
@@ -403,9 +389,9 @@
{ {
"slug": "ghostfolio", "slug": "ghostfolio",
"repo": "ghostfolio/ghostfolio", "repo": "ghostfolio/ghostfolio",
"version": "2.238.0", "version": "2.237.0",
"pinned": false, "pinned": false,
"date": "2026-02-12T18:28:55Z" "date": "2026-02-08T13:59:53Z"
}, },
{ {
"slug": "gitea", "slug": "gitea",
@@ -543,16 +529,9 @@
{ {
"slug": "huntarr", "slug": "huntarr",
"repo": "plexguide/Huntarr.io", "repo": "plexguide/Huntarr.io",
"version": "9.2.4.1", "version": "9.2.3",
"pinned": false, "pinned": false,
"date": "2026-02-12T22:17:47Z" "date": "2026-02-07T04:44:20Z"
},
{
"slug": "immich-public-proxy",
"repo": "alangrainger/immich-public-proxy",
"version": "v1.15.1",
"pinned": false,
"date": "2026-01-26T08:04:27Z"
}, },
{ {
"slug": "inspircd", "slug": "inspircd",
@@ -571,23 +550,16 @@
{ {
"slug": "invoiceninja", "slug": "invoiceninja",
"repo": "invoiceninja/invoiceninja", "repo": "invoiceninja/invoiceninja",
"version": "v5.12.59", "version": "v5.12.55",
"pinned": false, "pinned": false,
"date": "2026-02-13T02:26:13Z" "date": "2026-02-05T01:06:15Z"
}, },
{ {
"slug": "jackett", "slug": "jackett",
"repo": "Jackett/Jackett", "repo": "Jackett/Jackett",
"version": "v0.24.1103", "version": "v0.24.1089",
"pinned": false, "pinned": false,
"date": "2026-02-13T05:53:23Z" "date": "2026-02-10T05:55:59Z"
},
{
"slug": "jellystat",
"repo": "CyferShepard/Jellystat",
"version": "V1.1.8",
"pinned": false,
"date": "2026-02-08T08:15:00Z"
}, },
{ {
"slug": "joplin-server", "slug": "joplin-server",
@@ -599,9 +571,9 @@
{ {
"slug": "jotty", "slug": "jotty",
"repo": "fccview/jotty", "repo": "fccview/jotty",
"version": "1.20.0", "version": "1.19.1",
"pinned": false, "pinned": false,
"date": "2026-02-12T09:23:30Z" "date": "2026-01-26T21:30:39Z"
}, },
{ {
"slug": "kapowarr", "slug": "kapowarr",
@@ -711,9 +683,9 @@
{ {
"slug": "libretranslate", "slug": "libretranslate",
"repo": "LibreTranslate/LibreTranslate", "repo": "LibreTranslate/LibreTranslate",
"version": "v1.9.0", "version": "v1.8.4",
"pinned": false, "pinned": false,
"date": "2026-02-10T19:05:48Z" "date": "2026-02-02T17:45:16Z"
}, },
{ {
"slug": "lidarr", "slug": "lidarr",
@@ -746,9 +718,9 @@
{ {
"slug": "lubelogger", "slug": "lubelogger",
"repo": "hargata/lubelog", "repo": "hargata/lubelog",
"version": "v1.6.0", "version": "v1.5.9",
"pinned": false, "pinned": false,
"date": "2026-02-10T20:16:32Z" "date": "2026-02-09T17:36:13Z"
}, },
{ {
"slug": "mafl", "slug": "mafl",
@@ -767,9 +739,9 @@
{ {
"slug": "mail-archiver", "slug": "mail-archiver",
"repo": "s1t5/mail-archiver", "repo": "s1t5/mail-archiver",
"version": "2602.1", "version": "2601.3",
"pinned": false, "pinned": false,
"date": "2026-02-11T06:23:11Z" "date": "2026-01-25T12:52:24Z"
}, },
{ {
"slug": "managemydamnlife", "slug": "managemydamnlife",
@@ -795,9 +767,9 @@
{ {
"slug": "mediamanager", "slug": "mediamanager",
"repo": "maxdorninger/MediaManager", "repo": "maxdorninger/MediaManager",
"version": "v1.12.3", "version": "v1.12.2",
"pinned": false, "pinned": false,
"date": "2026-02-11T16:45:40Z" "date": "2026-02-08T19:18:29Z"
}, },
{ {
"slug": "mediamtx", "slug": "mediamtx",
@@ -823,9 +795,9 @@
{ {
"slug": "metube", "slug": "metube",
"repo": "alexta69/metube", "repo": "alexta69/metube",
"version": "2026.02.12", "version": "2026.02.08",
"pinned": false, "pinned": false,
"date": "2026-02-12T21:05:49Z" "date": "2026-02-08T17:01:37Z"
}, },
{ {
"slug": "miniflux", "slug": "miniflux",
@@ -865,9 +837,9 @@
{ {
"slug": "navidrome", "slug": "navidrome",
"repo": "navidrome/navidrome", "repo": "navidrome/navidrome",
"version": "v0.60.3", "version": "v0.60.2",
"pinned": false, "pinned": false,
"date": "2026-02-10T23:55:04Z" "date": "2026-02-07T19:42:33Z"
}, },
{ {
"slug": "netbox", "slug": "netbox",
@@ -876,13 +848,6 @@
"pinned": false, "pinned": false,
"date": "2026-02-03T13:54:26Z" "date": "2026-02-03T13:54:26Z"
}, },
{
"slug": "nextcloud-exporter",
"repo": "xperimental/nextcloud-exporter",
"version": "v0.9.0",
"pinned": false,
"date": "2025-10-12T20:03:10Z"
},
{ {
"slug": "nginx-ui", "slug": "nginx-ui",
"repo": "0xJacky/nginx-ui", "repo": "0xJacky/nginx-ui",
@@ -998,9 +963,9 @@
{ {
"slug": "pangolin", "slug": "pangolin",
"repo": "fosrl/pangolin", "repo": "fosrl/pangolin",
"version": "1.15.4", "version": "1.15.2",
"pinned": false, "pinned": false,
"date": "2026-02-13T00:54:02Z" "date": "2026-02-05T19:23:58Z"
}, },
{ {
"slug": "paperless-ai", "slug": "paperless-ai",
@@ -1026,9 +991,9 @@
{ {
"slug": "patchmon", "slug": "patchmon",
"repo": "PatchMon/PatchMon", "repo": "PatchMon/PatchMon",
"version": "v1.4.0", "version": "v1.3.7",
"pinned": false, "pinned": false,
"date": "2026-02-13T10:39:03Z" "date": "2025-12-25T11:08:14Z"
}, },
{ {
"slug": "paymenter", "slug": "paymenter",
@@ -1072,19 +1037,12 @@
"pinned": false, "pinned": false,
"date": "2025-12-01T05:07:31Z" "date": "2025-12-01T05:07:31Z"
}, },
{
"slug": "pihole-exporter",
"repo": "eko/pihole-exporter",
"version": "v1.2.0",
"pinned": false,
"date": "2025-07-29T19:15:37Z"
},
{ {
"slug": "planka", "slug": "planka",
"repo": "plankanban/planka", "repo": "plankanban/planka",
"version": "v2.0.0", "version": "v2.0.0-rc.4",
"pinned": false, "pinned": false,
"date": "2026-02-11T13:50:10Z" "date": "2025-09-04T12:41:17Z"
}, },
{ {
"slug": "plant-it", "slug": "plant-it",
@@ -1131,9 +1089,9 @@
{ {
"slug": "prometheus-alertmanager", "slug": "prometheus-alertmanager",
"repo": "prometheus/alertmanager", "repo": "prometheus/alertmanager",
"version": "v0.31.1", "version": "v0.31.0",
"pinned": false, "pinned": false,
"date": "2026-02-11T21:28:26Z" "date": "2026-02-02T13:34:15Z"
}, },
{ {
"slug": "prometheus-blackbox-exporter", "slug": "prometheus-blackbox-exporter",
@@ -1142,13 +1100,6 @@
"pinned": false, "pinned": false,
"date": "2025-12-06T13:32:18Z" "date": "2025-12-06T13:32:18Z"
}, },
{
"slug": "prometheus-paperless-ngx-exporter",
"repo": "hansmi/prometheus-paperless-exporter",
"version": "v0.0.9",
"pinned": false,
"date": "2025-12-08T20:37:45Z"
},
{ {
"slug": "prowlarr", "slug": "prowlarr",
"repo": "Prowlarr/Prowlarr", "repo": "Prowlarr/Prowlarr",
@@ -1173,9 +1124,9 @@
{ {
"slug": "pulse", "slug": "pulse",
"repo": "rcourtman/Pulse", "repo": "rcourtman/Pulse",
"version": "v5.1.9", "version": "v5.1.7",
"pinned": false, "pinned": false,
"date": "2026-02-11T15:34:40Z" "date": "2026-02-10T09:59:55Z"
}, },
{ {
"slug": "pve-scripts-local", "slug": "pve-scripts-local",
@@ -1191,13 +1142,6 @@
"pinned": false, "pinned": false,
"date": "2025-11-19T23:54:34Z" "date": "2025-11-19T23:54:34Z"
}, },
{
"slug": "qbittorrent-exporter",
"repo": "martabal/qbittorrent-exporter",
"version": "v1.13.2",
"pinned": false,
"date": "2025-12-13T22:59:03Z"
},
{ {
"slug": "qdrant", "slug": "qdrant",
"repo": "qdrant/qdrant", "repo": "qdrant/qdrant",
@@ -1219,13 +1163,6 @@
"pinned": false, "pinned": false,
"date": "2025-11-16T22:39:01Z" "date": "2025-11-16T22:39:01Z"
}, },
{
"slug": "radicale",
"repo": "Kozea/Radicale",
"version": "v3.6.0",
"pinned": false,
"date": "2026-01-10T06:56:46Z"
},
{ {
"slug": "rclone", "slug": "rclone",
"repo": "rclone/rclone", "repo": "rclone/rclone",
@@ -1236,9 +1173,9 @@
{ {
"slug": "rdtclient", "slug": "rdtclient",
"repo": "rogerfar/rdt-client", "repo": "rogerfar/rdt-client",
"version": "v2.0.120", "version": "v2.0.119",
"pinned": false, "pinned": false,
"date": "2026-02-12T02:53:51Z" "date": "2025-10-13T23:15:11Z"
}, },
{ {
"slug": "reactive-resume", "slug": "reactive-resume",
@@ -1292,16 +1229,16 @@
{ {
"slug": "scanopy", "slug": "scanopy",
"repo": "scanopy/scanopy", "repo": "scanopy/scanopy",
"version": "v0.14.4", "version": "v0.14.3",
"pinned": false, "pinned": false,
"date": "2026-02-10T03:57:28Z" "date": "2026-02-04T01:41:01Z"
}, },
{ {
"slug": "scraparr", "slug": "scraparr",
"repo": "thecfu/scraparr", "repo": "thecfu/scraparr",
"version": "v3.0.3", "version": "v2.2.5",
"pinned": false, "pinned": false,
"date": "2026-02-12T14:20:56Z" "date": "2025-10-07T12:34:31Z"
}, },
{ {
"slug": "seelf", "slug": "seelf",
@@ -1338,13 +1275,6 @@
"pinned": false, "pinned": false,
"date": "2026-01-16T12:08:28Z" "date": "2026-01-16T12:08:28Z"
}, },
{
"slug": "slskd",
"repo": "slskd/slskd",
"version": "0.24.3",
"pinned": false,
"date": "2026-01-15T14:40:15Z"
},
{ {
"slug": "snipeit", "slug": "snipeit",
"repo": "grokability/snipe-it", "repo": "grokability/snipe-it",
@@ -1355,9 +1285,9 @@
{ {
"slug": "snowshare", "slug": "snowshare",
"repo": "TuroYT/snowshare", "repo": "TuroYT/snowshare",
"version": "v1.3.5", "version": "v1.3.3",
"pinned": false, "pinned": false,
"date": "2026-02-11T10:24:51Z" "date": "2026-02-09T10:52:12Z"
}, },
{ {
"slug": "sonarr", "slug": "sonarr",
@@ -1390,9 +1320,9 @@
{ {
"slug": "stirling-pdf", "slug": "stirling-pdf",
"repo": "Stirling-Tools/Stirling-PDF", "repo": "Stirling-Tools/Stirling-PDF",
"version": "v2.4.6", "version": "v2.4.5",
"pinned": false, "pinned": false,
"date": "2026-02-12T00:01:19Z" "date": "2026-02-06T23:12:20Z"
}, },
{ {
"slug": "streamlink-webui", "slug": "streamlink-webui",
@@ -1439,9 +1369,9 @@
{ {
"slug": "termix", "slug": "termix",
"repo": "Termix-SSH/Termix", "repo": "Termix-SSH/Termix",
"version": "release-1.11.1-tag", "version": "release-1.11.0-tag",
"pinned": false, "pinned": false,
"date": "2026-02-13T04:49:16Z" "date": "2026-01-25T02:09:52Z"
}, },
{ {
"slug": "the-lounge", "slug": "the-lounge",
@@ -1467,9 +1397,9 @@
{ {
"slug": "tianji", "slug": "tianji",
"repo": "msgbyte/tianji", "repo": "msgbyte/tianji",
"version": "v1.31.12", "version": "v1.31.10",
"pinned": false, "pinned": false,
"date": "2026-02-12T19:06:14Z" "date": "2026-02-04T17:21:04Z"
}, },
{ {
"slug": "traccar", "slug": "traccar",
@@ -1481,9 +1411,9 @@
{ {
"slug": "tracearr", "slug": "tracearr",
"repo": "connorgallopo/Tracearr", "repo": "connorgallopo/Tracearr",
"version": "v1.4.17", "version": "v1.4.12",
"pinned": false, "pinned": false,
"date": "2026-02-11T01:33:21Z" "date": "2026-01-28T23:29:37Z"
}, },
{ {
"slug": "tracktor", "slug": "tracktor",
@@ -1495,9 +1425,9 @@
{ {
"slug": "traefik", "slug": "traefik",
"repo": "traefik/traefik", "repo": "traefik/traefik",
"version": "v3.6.8", "version": "v3.6.7",
"pinned": false, "pinned": false,
"date": "2026-02-11T16:44:37Z" "date": "2026-01-14T14:11:45Z"
}, },
{ {
"slug": "trilium", "slug": "trilium",
@@ -1509,9 +1439,9 @@
{ {
"slug": "trip", "slug": "trip",
"repo": "itskovacs/TRIP", "repo": "itskovacs/TRIP",
"version": "1.40.0", "version": "1.39.0",
"pinned": false, "pinned": false,
"date": "2026-02-10T20:12:53Z" "date": "2026-02-07T16:59:51Z"
}, },
{ {
"slug": "tududi", "slug": "tududi",
@@ -1558,9 +1488,9 @@
{ {
"slug": "upsnap", "slug": "upsnap",
"repo": "seriousm4x/UpSnap", "repo": "seriousm4x/UpSnap",
"version": "5.2.8", "version": "5.2.7",
"pinned": false, "pinned": false,
"date": "2026-02-13T00:02:37Z" "date": "2026-01-07T23:48:00Z"
}, },
{ {
"slug": "uptimekuma", "slug": "uptimekuma",
@@ -1572,9 +1502,9 @@
{ {
"slug": "vaultwarden", "slug": "vaultwarden",
"repo": "dani-garcia/vaultwarden", "repo": "dani-garcia/vaultwarden",
"version": "1.35.3", "version": "1.35.2",
"pinned": false, "pinned": false,
"date": "2026-02-10T20:37:03Z" "date": "2026-01-09T18:37:04Z"
}, },
{ {
"slug": "victoriametrics", "slug": "victoriametrics",
@@ -1600,9 +1530,9 @@
{ {
"slug": "wallos", "slug": "wallos",
"repo": "ellite/Wallos", "repo": "ellite/Wallos",
"version": "v4.6.1", "version": "v4.6.0",
"pinned": false, "pinned": false,
"date": "2026-02-10T21:06:46Z" "date": "2025-12-20T15:57:51Z"
}, },
{ {
"slug": "wanderer", "slug": "wanderer",
@@ -1635,9 +1565,9 @@
{ {
"slug": "wavelog", "slug": "wavelog",
"repo": "wavelog/wavelog", "repo": "wavelog/wavelog",
"version": "2.3", "version": "2.2.2",
"pinned": false, "pinned": false,
"date": "2026-02-11T15:46:40Z" "date": "2025-12-31T16:53:34Z"
}, },
{ {
"slug": "wealthfolio", "slug": "wealthfolio",
@@ -1663,9 +1593,9 @@
{ {
"slug": "wikijs", "slug": "wikijs",
"repo": "requarks/wiki", "repo": "requarks/wiki",
"version": "v2.5.312", "version": "v2.5.311",
"pinned": false, "pinned": false,
"date": "2026-02-12T02:45:22Z" "date": "2026-01-08T09:50:00Z"
}, },
{ {
"slug": "wishlist", "slug": "wishlist",
@@ -1712,9 +1642,9 @@
{ {
"slug": "zipline", "slug": "zipline",
"repo": "diced/zipline", "repo": "diced/zipline",
"version": "v4.4.2", "version": "v4.4.1",
"pinned": false, "pinned": false,
"date": "2026-02-11T04:58:54Z" "date": "2026-01-20T01:29:01Z"
}, },
{ {
"slug": "zitadel", "slug": "zitadel",

View File

@@ -21,7 +21,7 @@
"resources": { "resources": {
"cpu": 1, "cpu": 1,
"ram": 512, "ram": 512,
"hdd": 4, "hdd": 2,
"os": "debian", "os": "debian",
"version": "13" "version": "13"
} }
@@ -32,7 +32,7 @@
"resources": { "resources": {
"cpu": 1, "cpu": 1,
"ram": 256, "ram": 256,
"hdd": 2, "hdd": 1,
"os": "alpine", "os": "alpine",
"version": "3.23" "version": "3.23"
} }

View File

@@ -33,7 +33,7 @@
}, },
"notes": [ "notes": [
{ {
"text": "Kutt needs so be served with an SSL certificate for its login to work. During install, you will be prompted to choose if you want to have Caddy installed for SSL termination or if you want to use your own reverse proxy (in that case point your reverse proxy to port 3000).", "text": "Kutt needs so be served with an SSL certificate for its login to work. During install, you will be prompted to choose if you want to have Caddy installed for SSL termination or if you want to use your own reverse proxy (in that case point your reverse porxy to port 3000).",
"type": "info" "type": "info"
} }
] ]

View File

@@ -12,7 +12,7 @@
"documentation": "https://radicale.org/master.html#documentation-1", "documentation": "https://radicale.org/master.html#documentation-1",
"website": "https://radicale.org/", "website": "https://radicale.org/",
"logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/radicale.webp", "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/radicale.webp",
"config_path": "/etc/radicale/config", "config_path": "/etc/radicale/config or ~/.config/radicale/config",
"description": "Radicale is a small but powerful CalDAV (calendars, to-do lists) and CardDAV (contacts)", "description": "Radicale is a small but powerful CalDAV (calendars, to-do lists) and CardDAV (contacts)",
"install_methods": [ "install_methods": [
{ {

View File

@@ -1,5 +1,5 @@
{ {
"name": "Slskd", "name": "slskd",
"slug": "slskd", "slug": "slskd",
"categories": [ "categories": [
11 11
@@ -35,6 +35,10 @@
{ {
"text": "See /opt/slskd/config/slskd.yml to add your Soulseek credentials", "text": "See /opt/slskd/config/slskd.yml to add your Soulseek credentials",
"type": "info" "type": "info"
},
{
"text": "This LXC includes Soularr; it needs to be configured (/opt/soularr/config.ini) before it will work",
"type": "info"
} }
] ]
} }

View File

@@ -10,7 +10,7 @@
"privileged": false, "privileged": false,
"interface_port": 3000, "interface_port": 3000,
"documentation": "https://github.com/TuroYT/snowshare", "documentation": "https://github.com/TuroYT/snowshare",
"config_path": "/opt/snowshare.env", "config_path": "/opt/snowshare/.env",
"website": "https://github.com/TuroYT/snowshare", "website": "https://github.com/TuroYT/snowshare",
"logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/png/snowshare.png", "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/png/snowshare.png",
"description": "A modern, secure file and link sharing platform built with Next.js, Prisma, and NextAuth. Share URLs, code snippets, and files with customizable expiration, privacy, and QR codes.", "description": "A modern, secure file and link sharing platform built with Next.js, Prisma, and NextAuth. Share URLs, code snippets, and files with customizable expiration, privacy, and QR codes.",

View File

@@ -32,10 +32,6 @@
"password": null "password": null
}, },
"notes": [ "notes": [
{
"text": "SQL Server (2025) SQLPAL is incompatible with Proxmox VE 9 (Kernel 6.12+) in LXC containers. Use a VM instead or the SQL-Server 2022 LXC.",
"type": "warning"
},
{ {
"text": "If you choose not to run the installation setup, execute: `/opt/mssql/bin/mssql-conf setup` in LXC shell.", "text": "If you choose not to run the installation setup, execute: `/opt/mssql/bin/mssql-conf setup` in LXC shell.",
"type": "info" "type": "info"

View File

@@ -16,8 +16,7 @@ update_os
msg_info "Installing Dependencies" msg_info "Installing Dependencies"
$STD apt install -y \ $STD apt install -y \
python3-pip \ python3-pip \
python3-libtorrent \ python3-libtorrent
python3-setuptools
msg_ok "Installed Dependencies" msg_ok "Installed Dependencies"
msg_info "Installing Deluge" msg_info "Installing Deluge"

View File

@@ -37,7 +37,7 @@ fetch_and_deploy_gh_release "dispatcharr" "Dispatcharr/Dispatcharr" "tarball"
msg_info "Installing Python Dependencies with uv" msg_info "Installing Python Dependencies with uv"
cd /opt/dispatcharr cd /opt/dispatcharr
$STD uv venv --clear $STD uv venv --clear
$STD uv sync $STD uv pip install -r requirements.txt --index-strategy unsafe-best-match
$STD uv pip install gunicorn gevent celery redis daphne $STD uv pip install gunicorn gevent celery redis daphne
msg_ok "Installed Python Dependencies" msg_ok "Installed Python Dependencies"

View File

@@ -1,25 +0,0 @@
#!/usr/bin/env bash
# Copyright (c) 2021-2026 community-scripts ORG
# Author: Slaviša Arežina (tremor021)
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
# Source: https://www.drawio.com/
source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
color
verb_ip6
catch_errors
setting_up_container
network_check
update_os
setup_hwaccel
msg_info "Installing Dependencies"
$STD apt install -y tomcat11
msg_ok "Installed Dependencies"
USE_ORIGINAL_FILENAME=true fetch_and_deploy_gh_release "drawio" "jgraph/drawio" "singlefile" "latest" "/var/lib/tomcat11/webapps" "draw.war"
motd_ssh
customize
cleanup_lxc

View File

@@ -38,18 +38,6 @@ rm -f "$DEB_FILE"
echo "$LATEST_VERSION" >~/.emqx echo "$LATEST_VERSION" >~/.emqx
msg_ok "Installed EMQX" msg_ok "Installed EMQX"
read -r -p "${TAB3}Would you like to disable the EMQX MQ feature? (reduces disk/CPU usage) <y/N> " prompt
if [[ ${prompt,,} =~ ^(y|yes)$ ]]; then
msg_info "Disabling EMQX MQ feature"
mkdir -p /etc/emqx
if ! grep -q "^mq.enable" /etc/emqx/emqx.conf 2>/dev/null; then
echo "mq.enable = false" >>/etc/emqx/emqx.conf
else
sed -i 's/^mq.enable.*/mq.enable = false/' /etc/emqx/emqx.conf
fi
msg_ok "Disabled EMQX MQ feature"
fi
msg_info "Starting EMQX service" msg_info "Starting EMQX service"
$STD systemctl enable -q --now emqx $STD systemctl enable -q --now emqx
msg_ok "Enabled EMQX service" msg_ok "Enabled EMQX service"

View File

@@ -289,7 +289,7 @@ ML_DIR="${APP_DIR}/machine-learning"
GEO_DIR="${INSTALL_DIR}/geodata" GEO_DIR="${INSTALL_DIR}/geodata"
mkdir -p {"${APP_DIR}","${UPLOAD_DIR}","${GEO_DIR}","${INSTALL_DIR}"/cache} mkdir -p {"${APP_DIR}","${UPLOAD_DIR}","${GEO_DIR}","${INSTALL_DIR}"/cache}
fetch_and_deploy_gh_release "Immich" "immich-app/immich" "tarball" "v2.5.6" "$SRC_DIR" fetch_and_deploy_gh_release "Immich" "immich-app/immich" "tarball" "v2.5.5" "$SRC_DIR"
PNPM_VERSION="$(jq -r '.packageManager | split("@")[1]' ${SRC_DIR}/package.json)" PNPM_VERSION="$(jq -r '.packageManager | split("@")[1]' ${SRC_DIR}/package.json)"
NODE_VERSION="24" NODE_MODULE="pnpm@${PNPM_VERSION}" setup_nodejs NODE_VERSION="24" NODE_MODULE="pnpm@${PNPM_VERSION}" setup_nodejs

View File

@@ -20,19 +20,10 @@ msg_ok "Installed Docker"
msg_info "Detecting latest Kasm Workspaces release" msg_info "Detecting latest Kasm Workspaces release"
KASM_URL=$(curl -fsSL "https://www.kasm.com/downloads" | tr '\n' ' ' | grep -oE 'https://kasm-static-content[^"]*kasm_release_[0-9]+\.[0-9]+\.[0-9]+\.[a-z0-9]+\.tar\.gz' | head -n 1) KASM_URL=$(curl -fsSL "https://www.kasm.com/downloads" | tr '\n' ' ' | grep -oE 'https://kasm-static-content[^"]*kasm_release_[0-9]+\.[0-9]+\.[0-9]+\.[a-z0-9]+\.tar\.gz' | head -n 1)
if [[ -z "$KASM_URL" ]]; then if [[ -z "$KASM_URL" ]]; then
SERVICE_IMAGE_URL=$(curl -fsSL "https://www.kasm.com/downloads" | tr '\n' ' ' | grep -oE 'https://kasm-static-content[^"]*kasm_release_service_images_amd64_[0-9]+\.[0-9]+\.[0-9]+\.tar\.gz' | head -n 1)
if [[ -n "$SERVICE_IMAGE_URL" ]]; then
KASM_VERSION=$(echo "$SERVICE_IMAGE_URL" | sed -E 's/.*kasm_release_service_images_amd64_([0-9]+\.[0-9]+\.[0-9]+).*/\1/')
KASM_URL="https://kasm-static-content.s3.amazonaws.com/kasm_release_${KASM_VERSION}.tar.gz"
fi
else
KASM_VERSION=$(echo "$KASM_URL" | sed -E 's/.*kasm_release_([0-9]+\.[0-9]+\.[0-9]+).*/\1/')
fi
if [[ -z "$KASM_URL" ]] || [[ -z "$KASM_VERSION" ]]; then
msg_error "Unable to detect latest Kasm release URL." msg_error "Unable to detect latest Kasm release URL."
exit 1 exit 1
fi fi
KASM_VERSION=$(echo "$KASM_URL" | sed -E 's/.*kasm_release_([0-9]+\.[0-9]+\.[0-9]+).*/\1/')
msg_ok "Detected Kasm Workspaces version $KASM_VERSION" msg_ok "Detected Kasm Workspaces version $KASM_VERSION"
msg_warn "WARNING: This script will run an external installer from a third-party source (https://www.kasmweb.com/)." msg_warn "WARNING: This script will run an external installer from a third-party source (https://www.kasmweb.com/)."

View File

@@ -37,13 +37,18 @@ PYTHON_VERSION="3.12" setup_uv
fetch_and_deploy_gh_release "libretranslate" "LibreTranslate/LibreTranslate" "tarball" fetch_and_deploy_gh_release "libretranslate" "LibreTranslate/LibreTranslate" "tarball"
msg_info "Setup LibreTranslate (Patience)" msg_info "Setup LibreTranslate (Patience)"
TORCH_VERSION=$(grep -Eo '"torch ==[0-9]+\.[0-9]+\.[0-9]+' /opt/libretranslate/pyproject.toml |
tail -n1 | sed 's/.*==//')
if [[ -z "$TORCH_VERSION" ]]; then
TORCH_VERSION="2.5.0"
fi
cd /opt/libretranslate cd /opt/libretranslate
$STD uv venv --clear .venv --python 3.12 $STD uv venv --clear .venv --python 3.12
$STD source .venv/bin/activate $STD source .venv/bin/activate
$STD uv pip install --upgrade pip $STD uv pip install --upgrade pip setuptools
$STD uv pip install "setuptools<81"
$STD uv pip install Babel==2.12.1 $STD uv pip install Babel==2.12.1
$STD .venv/bin/python scripts/compile_locales.py $STD .venv/bin/python scripts/compile_locales.py
$STD uv pip install "torch==${TORCH_VERSION}" --extra-index-url https://download.pytorch.org/whl/cpu
$STD uv pip install "numpy<2" $STD uv pip install "numpy<2"
$STD uv pip install . $STD uv pip install .
$STD uv pip install libretranslate $STD uv pip install libretranslate

View File

@@ -38,10 +38,6 @@ for server in "${servers[@]}"; do
fi fi
done done
msg_info "Installing dependencies"
$STD apt install -y inotify-tools
msg_ok "Installed dependencies"
msg_info "Installing Collabora Online" msg_info "Installing Collabora Online"
curl -fsSL https://collaboraoffice.com/downloads/gpg/collaboraonline-release-keyring.gpg -o /etc/apt/keyrings/collaboraonline-release-keyring.gpg curl -fsSL https://collaboraoffice.com/downloads/gpg/collaboraonline-release-keyring.gpg -o /etc/apt/keyrings/collaboraonline-release-keyring.gpg
cat <<EOF >/etc/apt/sources.list.d/colloboraonline.sources cat <<EOF >/etc/apt/sources.list.d/colloboraonline.sources
@@ -152,15 +148,8 @@ COLLABORATION_JWT_SECRET=
# FRONTEND_FULL_TEXT_SEARCH_ENABLED=true # FRONTEND_FULL_TEXT_SEARCH_ENABLED=true
# SEARCH_EXTRACTOR_TIKA_TIKA_URL=<your-tika-url> # SEARCH_EXTRACTOR_TIKA_TIKA_URL=<your-tika-url>
## Uncomment below to enable PosixFS Collaborative Mode ## External storage test - Only NFS v4.2+ is supported
## Increase inotify watch/instance limits on your PVE host: ## User files
### sysctl -w fs.inotify.max_user_watches=1048576
### sysctl -w fs.inotify.max_user_instances=1024
# STORAGE_USERS_POSIX_ENABLE_COLLABORATION=true
# STORAGE_USERS_POSIX_WATCH_TYPE=inotifywait
# STORAGE_USERS_POSIX_WATCH_FS=true
# STORAGE_USERS_POSIX_WATCH_PATH=<path-to-storage-or-bind-mount>
## User files location - experimental - use at your own risk! - ZFS, NFS v4.2+ supported - CIFS/SMB not supported
# STORAGE_USERS_POSIX_ROOT=<path-to-your-bind_mount> # STORAGE_USERS_POSIX_ROOT=<path-to-your-bind_mount>
EOF EOF

View File

@@ -24,7 +24,7 @@ setup_hwaccel
PYTHON_VERSION="3.12" setup_uv PYTHON_VERSION="3.12" setup_uv
msg_info "Installing Open WebUI" msg_info "Installing Open WebUI"
$STD uv tool install --python 3.12 --constraint <(echo "numba>=0.60") open-webui[all] $STD uv tool install --python 3.12 open-webui[all]
msg_ok "Installed Open WebUI" msg_ok "Installed Open WebUI"
read -r -p "${TAB3}Would you like to add Ollama? <y/N> " prompt read -r -p "${TAB3}Would you like to add Ollama? <y/N> " prompt

View File

@@ -178,7 +178,7 @@ http:
servers: servers:
- url: "http://$LOCAL_IP:3000" - url: "http://$LOCAL_IP:3000"
EOF EOF
$STD npm run db:push $STD npm run db:sqlite:push
. /etc/os-release . /etc/os-release
if [ "$VERSION_CODENAME" = "trixie" ]; then if [ "$VERSION_CODENAME" = "trixie" ]; then

View File

@@ -14,51 +14,42 @@ network_check
update_os update_os
msg_info "Installing Dependencies" msg_info "Installing Dependencies"
$STD apt install -y apache2-utils $STD apt install -y \
apache2-utils \
python3-pip \
python3-venv
msg_ok "Installed Dependencies" msg_ok "Installed Dependencies"
PYTHON_VERSION="3.13" setup_uv
fetch_and_deploy_gh_release "Radicale" "Kozea/Radicale" "tarball" "latest" "/opt/radicale"
msg_info "Setting up Radicale" msg_info "Setting up Radicale"
cd /opt/radicale python3 -m venv /opt/radicale
source /opt/radicale/bin/activate
$STD python3 -m pip install --upgrade https://github.com/Kozea/Radicale/archive/master.tar.gz
RNDPASS=$(openssl rand -base64 18 | tr -dc 'a-zA-Z0-9' | head -c13) RNDPASS=$(openssl rand -base64 18 | tr -dc 'a-zA-Z0-9' | head -c13)
$STD htpasswd -c -b -5 /opt/radicale/users admin "$RNDPASS" $STD htpasswd -c -b -5 /opt/radicale/users admin $RNDPASS
{ {
echo "Radicale Credentials" echo "Radicale Credentials"
echo "Admin User: admin" echo "Admin User: admin"
echo "Admin Password: $RNDPASS" echo "Admin Password: $RNDPASS"
} >>~/radicale.creds } >>~/radicale.creds
msg_ok "Done setting up Radicale"
mkdir -p /etc/radicale msg_info "Setup Service"
cat <<EOF >/etc/radicale/config
[server]
hosts = 0.0.0.0:5232
[auth] cat <<EOF >/opt/radicale/start.sh
type = htpasswd #!/usr/bin/env bash
htpasswd_filename = /opt/radicale/users source /opt/radicale/bin/activate
htpasswd_encryption = sha512 python3 -m radicale --storage-filesystem-folder=/var/lib/radicale/collections --hosts 0.0.0.0:5232 --auth-type htpasswd --auth-htpasswd-filename /opt/radicale/users --auth-htpasswd-encryption sha512
[storage]
type = multifilesystem
filesystem_folder = /var/lib/radicale/collections
[web]
type = internal
EOF EOF
msg_ok "Set up Radicale"
msg_info "Creating Service" chmod +x /opt/radicale/start.sh
cat <<EOF >/etc/systemd/system/radicale.service cat <<EOF >/etc/systemd/system/radicale.service
[Unit]
Description=A simple CalDAV (calendar) and CardDAV (contact) server Description=A simple CalDAV (calendar) and CardDAV (contact) server
After=network.target After=network.target
Requires=network.target Requires=network.target
[Service] [Service]
WorkingDirectory=/opt/radicale ExecStart=/opt/radicale/start.sh
ExecStart=/usr/local/bin/uv run -m radicale --config /etc/radicale/config
Restart=on-failure Restart=on-failure
# User=radicale # User=radicale
# Deny other users access to the calendar data # Deny other users access to the calendar data

View File

@@ -3,7 +3,7 @@
# Copyright (c) 2021-2026 community-scripts ORG # Copyright (c) 2021-2026 community-scripts ORG
# Author: vhsdream # Author: vhsdream
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE # License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
# Source: https://github.com/slskd/slskd/, https://github.com/mrusse/soularr # Source: https://github.com/slskd/slskd/, https://soularr.net
source /dev/stdin <<<"$FUNCTIONS_FILE_PATH" source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
color color
@@ -13,71 +13,71 @@ setting_up_container
network_check network_check
update_os update_os
fetch_and_deploy_gh_release "Slskd" "slskd/slskd" "prebuild" "latest" "/opt/slskd" "slskd-*-linux-x64.zip" msg_info "Installing Dependencies"
$STD apt install -y \
python3-pip
msg_ok "Installed Dependencies"
msg_info "Configuring Slskd" msg_info "Setup ${APPLICATION}"
tmp_file=$(mktemp)
RELEASE=$(curl -s https://api.github.com/repos/slskd/slskd/releases/latest | grep "tag_name" | awk '{print substr($2, 2, length($2)-3) }')
curl -fsSL "https://github.com/slskd/slskd/releases/download/${RELEASE}/slskd-${RELEASE}-linux-x64.zip" -o $tmp_file
$STD unzip $tmp_file -d /opt/${APPLICATION}
echo "${RELEASE}" >/opt/${APPLICATION}_version.txt
JWT_KEY=$(openssl rand -base64 44) JWT_KEY=$(openssl rand -base64 44)
SLSKD_API_KEY=$(openssl rand -base64 44) SLSKD_API_KEY=$(openssl rand -base64 44)
cp /opt/slskd/config/slskd.example.yml /opt/slskd/config/slskd.yml cp /opt/${APPLICATION}/config/slskd.example.yml /opt/${APPLICATION}/config/slskd.yml
sed -i \ sed -i \
-e '/web:/,/cidr/s/^# //' \ -e "\|web:|,\|cidr|s|^#||" \
-e '/https:/,/port: 5031/s/false/true/' \ -e "\|https:|,\|5031|s|false|true|" \
-e '/port: 5030/,/socket/s/,.*$//' \
-e '/content_path:/,/authentication/s/false/true/' \
-e "\|api_keys|,\|cidr|s|<some.*$|$SLSKD_API_KEY|; \ -e "\|api_keys|,\|cidr|s|<some.*$|$SLSKD_API_KEY|; \
s|role: readonly|role: readwrite|; \ s|role: readonly|role: readwrite|; \
s|0.0.0.0/0,::/0|& # Replace this with your subnet|" \ s|0.0.0.0/0,::/0|& # Replace this with your subnet|" \
-e "\|soulseek|,\|write_queue|s|^#||" \
-e "\|jwt:|,\|ttl|s|key: ~|key: $JWT_KEY|" \ -e "\|jwt:|,\|ttl|s|key: ~|key: $JWT_KEY|" \
-e '/soulseek/,/write_queue/s/^# //' \ -e "s|^ picture|# picture|" \
-e 's/^.*picture/#&/' /opt/slskd/config/slskd.yml /opt/${APPLICATION}/config/slskd.yml
msg_ok "Configured Slskd" msg_ok "Setup ${APPLICATION}"
read -rp "${TAB3}Do you want to install Soularr? y/N " soularr msg_info "Installing Soularr"
if [[ ${soularr,,} =~ ^(y|yes)$ ]]; then rm -rf /usr/lib/python3.*/EXTERNALLY-MANAGED
PYTHON_VERSION="3.11" setup_uv cd /tmp
fetch_and_deploy_gh_release "Soularr" "mrusse/soularr" "tarball" "latest" "/opt/soularr" curl -fsSL -o main.zip https://github.com/mrusse/soularr/archive/refs/heads/main.zip
cd /opt/soularr $STD unzip main.zip
$STD uv venv venv mv soularr-main /opt/soularr
$STD source venv/bin/activate cd /opt/soularr
$STD uv pip install -r requirements.txt $STD pip install -r requirements.txt
sed -i \ sed -i \
-e "\|[Slskd]|,\|host_url|s|yourslskdapikeygoeshere|$SLSKD_API_KEY|" \ -e "\|[Slskd]|,\|host_url|s|yourslskdapikeygoeshere|$SLSKD_API_KEY|" \
-e "/host_url/s/slskd/localhost/" \ -e "/host_url/s/slskd/localhost/" \
/opt/soularr/config.ini /opt/soularr/config.ini
cat <<EOF >/opt/soularr/run.sh sed -i \
#!/usr/bin/env bash -e "/#This\|#Default\|INTERVAL/{N;d;}" \
-e "/while\|#Pass/d" \
-e "\|python|s|app|opt/soularr|; s|python|python3|" \
-e "/dt/,+2d" \
/opt/soularr/run.sh
sed -i -E "/(soularr.py)/s/.{5}$//; /if/,/fi/s/.{4}//" /opt/soularr/run.sh
chmod +x /opt/soularr/run.sh
msg_ok "Installed Soularr"
if ps aux | grep "[s]oularr.py" >/dev/null; then msg_info "Creating Services"
echo "Soularr is already running. Exiting..." cat <<EOF >/etc/systemd/system/${APPLICATION}.service
exit 1
else
source /opt/soularr/venv/bin/activate
uv run python3 -u /opt/soularr/soularr.py --config-dir /opt/soularr
fi
EOF
chmod +x /opt/soularr/run.sh
deactivate
msg_ok "Installed Soularr"
fi
msg_info "Creating Service"
cat <<EOF >/etc/systemd/system/slskd.service
[Unit] [Unit]
Description=Slskd Service Description=${APPLICATION} Service
After=network.target After=network.target
Wants=network.target Wants=network.target
[Service] [Service]
WorkingDirectory=/opt/slskd WorkingDirectory=/opt/${APPLICATION}
ExecStart=/opt/slskd/slskd --config /opt/slskd/config/slskd.yml ExecStart=/opt/${APPLICATION}/slskd --config /opt/${APPLICATION}/config/slskd.yml
Restart=always Restart=always
[Install] [Install]
WantedBy=multi-user.target WantedBy=multi-user.target
EOF EOF
if [[ -d /opt/soularr ]]; then cat <<EOF >/etc/systemd/system/soularr.timer
cat <<EOF >/etc/systemd/system/soularr.timer
[Unit] [Unit]
Description=Soularr service timer Description=Soularr service timer
RefuseManualStart=no RefuseManualStart=no
@@ -85,15 +85,15 @@ RefuseManualStop=no
[Timer] [Timer]
Persistent=true Persistent=true
# run every 10 minutes # run every 5 minutes
OnCalendar=*-*-* *:0/10:00 OnCalendar=*-*-* *:0/5:00
Unit=soularr.service Unit=soularr.service
[Install] [Install]
WantedBy=timers.target WantedBy=timers.target
EOF EOF
cat <<EOF >/etc/systemd/system/soularr.service cat <<EOF >/etc/systemd/system/soularr.service
[Unit] [Unit]
Description=Soularr service Description=Soularr service
After=network.target slskd.service After=network.target slskd.service
@@ -106,9 +106,10 @@ ExecStart=/bin/bash -c /opt/soularr/run.sh
[Install] [Install]
WantedBy=multi-user.target WantedBy=multi-user.target
EOF EOF
msg_warn "Add your Lidarr API key to Soularr in '/opt/soularr/config.ini', then run 'systemctl enable --now soularr.timer'" systemctl enable -q --now ${APPLICATION}
fi systemctl enable -q soularr.timer
systemctl enable -q --now slskd rm -rf $tmp_file
rm -rf /tmp/main.zip
msg_ok "Created Services" msg_ok "Created Services"
motd_ssh motd_ssh

View File

@@ -15,18 +15,16 @@ update_os
msg_info "Installing Dependencies" msg_info "Installing Dependencies"
$STD apt install -y apt-transport-https $STD apt install -y apt-transport-https
curl -fsSL "https://dl.ui.com/unifi/unifi-repo.gpg" -o "/usr/share/keyrings/unifi-repo.gpg"
cat <<EOF | sudo tee /etc/apt/sources.list.d/100-ubnt-unifi.sources >/dev/null
Types: deb
URIs: https://www.ui.com/downloads/unifi/debian
Suites: stable
Components: ubiquiti
Architectures: amd64
Signed-By: /usr/share/keyrings/unifi-repo.gpg
EOF
$STD apt update
msg_ok "Installed Dependencies" msg_ok "Installed Dependencies"
setup_deb822_repo \
"unifi" \
"https://dl.ui.com/unifi/unifi-repo.gpg" \
"https://www.ui.com/downloads/unifi/debian" \
"stable" \
"ubiquiti" \
"amd64"
JAVA_VERSION="21" setup_java JAVA_VERSION="21" setup_java
if lscpu | grep -q 'avx'; then if lscpu | grep -q 'avx'; then

File diff suppressed because it is too large Load Diff

View File

@@ -3078,10 +3078,10 @@ settings_menu() {
case "$choice" in case "$choice" in
1) diagnostics_menu ;; 1) diagnostics_menu ;;
2) ${EDITOR:-nano} /usr/local/community-scripts/default.vars ;; 2) nano /usr/local/community-scripts/default.vars ;;
3) 3)
if [ -f "$(get_app_defaults_path)" ]; then if [ -f "$(get_app_defaults_path)" ]; then
${EDITOR:-nano} "$(get_app_defaults_path)" nano "$(get_app_defaults_path)"
else else
# Back was selected (no app.vars available) # Back was selected (no app.vars available)
return return
@@ -3636,9 +3636,6 @@ $PCT_OPTIONS_STRING"
exit 214 exit 214
fi fi
msg_ok "Storage space validated" msg_ok "Storage space validated"
# Report installation start to API (early - captures failed installs too)
post_to_api
fi fi
create_lxc_container || exit $? create_lxc_container || exit $?
@@ -4013,9 +4010,6 @@ EOF'
# Install SSH keys # Install SSH keys
install_ssh_keys_into_ct install_ssh_keys_into_ct
# Start timer for duration tracking
start_install_timer
# Run application installer # Run application installer
# Disable error trap - container errors are handled internally via flag file # Disable error trap - container errors are handled internally via flag file
set +Eeuo pipefail # Disable ALL error handling temporarily set +Eeuo pipefail # Disable ALL error handling temporarily
@@ -4046,10 +4040,9 @@ EOF'
if [[ $install_exit_code -ne 0 ]]; then if [[ $install_exit_code -ne 0 ]]; then
msg_error "Installation failed in container ${CTID} (exit code: ${install_exit_code})" msg_error "Installation failed in container ${CTID} (exit code: ${install_exit_code})"
# Copy install log from container BEFORE API call so get_error_text() can read it # Copy both logs from container before potential deletion
local build_log_copied=false local build_log_copied=false
local install_log_copied=false local install_log_copied=false
local host_install_log="/tmp/install-lxc-${CTID}-${SESSION_ID}.log"
if [[ -n "$CTID" && -n "${SESSION_ID:-}" ]]; then if [[ -n "$CTID" && -n "${SESSION_ID:-}" ]]; then
# Copy BUILD_LOG (creation log) if it exists # Copy BUILD_LOG (creation log) if it exists
@@ -4057,22 +4050,15 @@ EOF'
cp "${BUILD_LOG}" "/tmp/create-lxc-${CTID}-${SESSION_ID}.log" 2>/dev/null && build_log_copied=true cp "${BUILD_LOG}" "/tmp/create-lxc-${CTID}-${SESSION_ID}.log" 2>/dev/null && build_log_copied=true
fi fi
# Copy INSTALL_LOG from container to host # Copy INSTALL_LOG from container
if pct pull "$CTID" "/root/.install-${SESSION_ID}.log" "$host_install_log" 2>/dev/null; then if pct pull "$CTID" "/root/.install-${SESSION_ID}.log" "/tmp/install-lxc-${CTID}-${SESSION_ID}.log" 2>/dev/null; then
install_log_copied=true install_log_copied=true
# Point INSTALL_LOG to host copy so get_error_text() finds it
INSTALL_LOG="$host_install_log"
fi fi
fi
# Report failure to telemetry API (now with log available on host) # Show available logs
post_update_to_api "failed" "$install_exit_code"
# Show available logs
if [[ -n "$CTID" && -n "${SESSION_ID:-}" ]]; then
echo "" echo ""
[[ "$build_log_copied" == true ]] && echo -e "${GN}${CL} Container creation log: ${BL}/tmp/create-lxc-${CTID}-${SESSION_ID}.log${CL}" [[ "$build_log_copied" == true ]] && echo -e "${GN}${CL} Container creation log: ${BL}/tmp/create-lxc-${CTID}-${SESSION_ID}.log${CL}"
[[ "$install_log_copied" == true ]] && echo -e "${GN}${CL} Installation log: ${BL}${host_install_log}${CL}" [[ "$install_log_copied" == true ]] && echo -e "${GN}${CL} Installation log: ${BL}/tmp/install-lxc-${CTID}-${SESSION_ID}.log${CL}"
fi fi
# Dev mode: Keep container or open breakpoint shell # Dev mode: Keep container or open breakpoint shell
@@ -4130,10 +4116,6 @@ EOF'
echo -e "${BFR}${CM}${GN}Container ${CTID} removed${CL}" echo -e "${BFR}${CM}${GN}Container ${CTID} removed${CL}"
fi fi
# Force one final status update attempt after cleanup
# This ensures status is updated even if the first attempt failed (e.g., HTTP 400)
post_update_to_api "failed" "$install_exit_code" "force"
exit $install_exit_code exit $install_exit_code
fi fi
} }
@@ -5141,9 +5123,9 @@ EOF
# api_exit_script() # api_exit_script()
# #
# - Exit trap handler for reporting to API telemetry # - Exit trap handler for reporting to API telemetry
# - Captures exit code and reports to PocketBase using centralized error descriptions # - Captures exit code and reports to API using centralized error descriptions
# - Uses explain_exit_code() from api.func for consistent error messages # - Uses explain_exit_code() from error_handler.func for consistent error messages
# - Posts failure status with exit code to API (error description resolved automatically) # - Posts failure status with exit code to API (error description added automatically)
# - Only executes on non-zero exit codes # - Only executes on non-zero exit codes
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
api_exit_script() { api_exit_script() {
@@ -5156,6 +5138,6 @@ api_exit_script() {
if command -v pveversion >/dev/null 2>&1; then if command -v pveversion >/dev/null 2>&1; then
trap 'api_exit_script' EXIT trap 'api_exit_script' EXIT
fi fi
trap 'post_update_to_api "failed" "$?"' ERR trap 'post_update_to_api "failed" "$BASH_COMMAND"' ERR
trap 'post_update_to_api "failed" "130"' SIGINT trap 'post_update_to_api "failed" "INTERRUPTED"' SIGINT
trap 'post_update_to_api "failed" "143"' SIGTERM trap 'post_update_to_api "failed" "TERMINATED"' SIGTERM

View File

@@ -27,90 +27,100 @@
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# explain_exit_code() # explain_exit_code()
# #
# - Canonical version is defined in api.func (sourced before this file) # - Maps numeric exit codes to human-readable error descriptions
# - This section only provides a fallback if api.func was not loaded # - Supports:
# - See api.func SECTION 1 for the authoritative exit code mappings # * Generic/Shell errors (1, 2, 126, 127, 128, 130, 137, 139, 143)
# * Package manager errors (APT, DPKG: 100, 101, 255)
# * Node.js/npm errors (243-249, 254)
# * Python/pip/uv errors (210-212)
# * PostgreSQL errors (231-234)
# * MySQL/MariaDB errors (241-244)
# * MongoDB errors (251-254)
# * Proxmox custom codes (200-231)
# - Returns description string for given exit code
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
if ! declare -f explain_exit_code &>/dev/null; then explain_exit_code() {
explain_exit_code() { local code="$1"
local code="$1" case "$code" in
case "$code" in # --- Generic / Shell ---
1) echo "General error / Operation not permitted" ;; 1) echo "General error / Operation not permitted" ;;
2) echo "Misuse of shell builtins (e.g. syntax error)" ;; 2) echo "Misuse of shell builtins (e.g. syntax error)" ;;
6) echo "curl: DNS resolution failed (could not resolve host)" ;; 126) echo "Command invoked cannot execute (permission problem?)" ;;
7) echo "curl: Failed to connect (network unreachable / host down)" ;; 127) echo "Command not found" ;;
22) echo "curl: HTTP error returned (404, 429, 500+)" ;; 128) echo "Invalid argument to exit" ;;
28) echo "curl: Operation timeout (network slow or server not responding)" ;; 130) echo "Terminated by Ctrl+C (SIGINT)" ;;
35) echo "curl: SSL/TLS handshake failed (certificate error)" ;; 137) echo "Killed (SIGKILL / Out of memory?)" ;;
100) echo "APT: Package manager error (broken packages / dependency problems)" ;; 139) echo "Segmentation fault (core dumped)" ;;
101) echo "APT: Configuration error (bad sources.list, malformed config)" ;; 143) echo "Terminated (SIGTERM)" ;;
102) echo "APT: Lock held by another process (dpkg/apt still running)" ;;
124) echo "Command timed out (timeout command)" ;; # --- Package manager / APT / DPKG ---
126) echo "Command invoked cannot execute (permission problem?)" ;; 100) echo "APT: Package manager error (broken packages / dependency problems)" ;;
127) echo "Command not found" ;; 101) echo "APT: Configuration error (bad sources.list, malformed config)" ;;
128) echo "Invalid argument to exit" ;; 255) echo "DPKG: Fatal internal error" ;;
130) echo "Terminated by Ctrl+C (SIGINT)" ;;
134) echo "Process aborted (SIGABRT - possibly Node.js heap overflow)" ;; # --- Node.js / npm / pnpm / yarn ---
137) echo "Killed (SIGKILL / Out of memory?)" ;; 243) echo "Node.js: Out of memory (JavaScript heap out of memory)" ;;
139) echo "Segmentation fault (core dumped)" ;; 245) echo "Node.js: Invalid command-line option" ;;
141) echo "Broken pipe (SIGPIPE - output closed prematurely)" ;; 246) echo "Node.js: Internal JavaScript Parse Error" ;;
143) echo "Terminated (SIGTERM)" ;; 247) echo "Node.js: Fatal internal error" ;;
150) echo "Systemd: Service failed to start" ;; 248) echo "Node.js: Invalid C++ addon / N-API failure" ;;
151) echo "Systemd: Service unit not found" ;; 249) echo "Node.js: Inspector error" ;;
152) echo "Permission denied (EACCES)" ;; 254) echo "npm/pnpm/yarn: Unknown fatal error" ;;
153) echo "Build/compile failed (make/gcc/cmake)" ;;
154) echo "Node.js: Native addon build failed (node-gyp)" ;; # --- Python / pip / uv ---
160) echo "Python: Virtualenv / uv environment missing or broken" ;; 210) echo "Python: Virtualenv / uv environment missing or broken" ;;
161) echo "Python: Dependency resolution failed" ;; 211) echo "Python: Dependency resolution failed" ;;
162) echo "Python: Installation aborted (permissions or EXTERNALLY-MANAGED)" ;; 212) echo "Python: Installation aborted (permissions or EXTERNALLY-MANAGED)" ;;
170) echo "PostgreSQL: Connection failed (server not running / wrong socket)" ;;
171) echo "PostgreSQL: Authentication failed (bad user/password)" ;; # --- PostgreSQL ---
172) echo "PostgreSQL: Database does not exist" ;; 231) echo "PostgreSQL: Connection failed (server not running / wrong socket)" ;;
173) echo "PostgreSQL: Fatal error in query / syntax" ;; 232) echo "PostgreSQL: Authentication failed (bad user/password)" ;;
180) echo "MySQL/MariaDB: Connection failed (server not running / wrong socket)" ;; 233) echo "PostgreSQL: Database does not exist" ;;
181) echo "MySQL/MariaDB: Authentication failed (bad user/password)" ;; 234) echo "PostgreSQL: Fatal error in query / syntax" ;;
182) echo "MySQL/MariaDB: Database does not exist" ;;
183) echo "MySQL/MariaDB: Fatal error in query / syntax" ;; # --- MySQL / MariaDB ---
190) echo "MongoDB: Connection failed (server not running)" ;; 241) echo "MySQL/MariaDB: Connection failed (server not running / wrong socket)" ;;
191) echo "MongoDB: Authentication failed (bad user/password)" ;; 242) echo "MySQL/MariaDB: Authentication failed (bad user/password)" ;;
192) echo "MongoDB: Database not found" ;; 243) echo "MySQL/MariaDB: Database does not exist" ;;
193) echo "MongoDB: Fatal query error" ;; 244) echo "MySQL/MariaDB: Fatal error in query / syntax" ;;
200) echo "Proxmox: Failed to create lock file" ;;
203) echo "Proxmox: Missing CTID variable" ;; # --- MongoDB ---
204) echo "Proxmox: Missing PCT_OSTYPE variable" ;; 251) echo "MongoDB: Connection failed (server not running)" ;;
205) echo "Proxmox: Invalid CTID (<100)" ;; 252) echo "MongoDB: Authentication failed (bad user/password)" ;;
206) echo "Proxmox: CTID already in use" ;; 253) echo "MongoDB: Database not found" ;;
207) echo "Proxmox: Password contains unescaped special characters" ;; 254) echo "MongoDB: Fatal query error" ;;
208) echo "Proxmox: Invalid configuration (DNS/MAC/Network format)" ;;
209) echo "Proxmox: Container creation failed" ;; # --- Proxmox Custom Codes ---
210) echo "Proxmox: Cluster not quorate" ;; 200) echo "Proxmox: Failed to create lock file" ;;
211) echo "Proxmox: Timeout waiting for template lock" ;; 203) echo "Proxmox: Missing CTID variable" ;;
212) echo "Proxmox: Storage type 'iscsidirect' does not support containers (VMs only)" ;; 204) echo "Proxmox: Missing PCT_OSTYPE variable" ;;
213) echo "Proxmox: Storage type does not support 'rootdir' content" ;; 205) echo "Proxmox: Invalid CTID (<100)" ;;
214) echo "Proxmox: Not enough storage space" ;; 206) echo "Proxmox: CTID already in use" ;;
215) echo "Proxmox: Container created but not listed (ghost state)" ;; 207) echo "Proxmox: Password contains unescaped special characters" ;;
216) echo "Proxmox: RootFS entry missing in config" ;; 208) echo "Proxmox: Invalid configuration (DNS/MAC/Network format)" ;;
217) echo "Proxmox: Storage not accessible" ;; 209) echo "Proxmox: Container creation failed" ;;
218) echo "Proxmox: Template file corrupted or incomplete" ;; 210) echo "Proxmox: Cluster not quorate" ;;
219) echo "Proxmox: CephFS does not support containers - use RBD" ;; 211) echo "Proxmox: Timeout waiting for template lock" ;;
220) echo "Proxmox: Unable to resolve template path" ;; 212) echo "Proxmox: Storage type 'iscsidirect' does not support containers (VMs only)" ;;
221) echo "Proxmox: Template file not readable" ;; 213) echo "Proxmox: Storage type does not support 'rootdir' content" ;;
222) echo "Proxmox: Template download failed" ;; 214) echo "Proxmox: Not enough storage space" ;;
223) echo "Proxmox: Template not available after download" ;; 215) echo "Proxmox: Container created but not listed (ghost state)" ;;
224) echo "Proxmox: PBS storage is for backups only" ;; 216) echo "Proxmox: RootFS entry missing in config" ;;
225) echo "Proxmox: No template available for OS/Version" ;; 217) echo "Proxmox: Storage not accessible" ;;
231) echo "Proxmox: LXC stack upgrade failed" ;; 219) echo "Proxmox: CephFS does not support containers - use RBD" ;;
243) echo "Node.js: Out of memory (JavaScript heap out of memory)" ;; 224) echo "Proxmox: PBS storage is for backups only" ;;
245) echo "Node.js: Invalid command-line option" ;; 218) echo "Proxmox: Template file corrupted or incomplete" ;;
246) echo "Node.js: Internal JavaScript Parse Error" ;; 220) echo "Proxmox: Unable to resolve template path" ;;
247) echo "Node.js: Fatal internal error" ;; 221) echo "Proxmox: Template file not readable" ;;
248) echo "Node.js: Invalid C++ addon / N-API failure" ;; 222) echo "Proxmox: Template download failed" ;;
249) echo "npm/pnpm/yarn: Unknown fatal error" ;; 223) echo "Proxmox: Template not available after download" ;;
255) echo "DPKG: Fatal internal error" ;; 225) echo "Proxmox: No template available for OS/Version" ;;
*) echo "Unknown error" ;; 231) echo "Proxmox: LXC stack upgrade failed" ;;
esac
} # --- Default ---
fi *) echo "Unknown error" ;;
esac
}
# ============================================================================== # ==============================================================================
# SECTION 2: ERROR HANDLERS # SECTION 2: ERROR HANDLERS
@@ -187,7 +197,12 @@ error_handler() {
# Create error flag file with exit code for host detection # Create error flag file with exit code for host detection
echo "$exit_code" >"/root/.install-${SESSION_ID:-error}.failed" 2>/dev/null || true echo "$exit_code" >"/root/.install-${SESSION_ID:-error}.failed" 2>/dev/null || true
# Log path is shown by host as combined log - no need to show container path
if declare -f msg_custom >/dev/null 2>&1; then
msg_custom "📋" "${YW}" "Log saved to: ${container_log}"
else
echo -e "${YW}Log saved to:${CL} ${BL}${container_log}${CL}"
fi
else else
# HOST CONTEXT: Show local log path and offer container cleanup # HOST CONTEXT: Show local log path and offer container cleanup
if declare -f msg_custom >/dev/null 2>&1; then if declare -f msg_custom >/dev/null 2>&1; then
@@ -198,11 +213,6 @@ error_handler() {
# Offer to remove container if it exists (build errors after container creation) # Offer to remove container if it exists (build errors after container creation)
if [[ -n "${CTID:-}" ]] && command -v pct &>/dev/null && pct status "$CTID" &>/dev/null; then if [[ -n "${CTID:-}" ]] && command -v pct &>/dev/null && pct status "$CTID" &>/dev/null; then
# Report failure to API before container cleanup
if declare -f post_update_to_api &>/dev/null; then
post_update_to_api "failed" "$exit_code"
fi
echo "" echo ""
echo -en "${YW}Remove broken container ${CTID}? (Y/n) [auto-remove in 60s]: ${CL}" echo -en "${YW}Remove broken container ${CTID}? (Y/n) [auto-remove in 60s]: ${CL}"
@@ -222,12 +232,6 @@ error_handler() {
pct destroy "$CTID" &>/dev/null || true pct destroy "$CTID" &>/dev/null || true
echo -e "${GN}${CL} Container ${CTID} removed" echo -e "${GN}${CL} Container ${CTID} removed"
fi fi
# Force one final status update attempt after cleanup
# This ensures status is updated even if the first attempt failed (e.g., HTTP 400)
if declare -f post_update_to_api &>/dev/null; then
post_update_to_api "failed" "$exit_code" "force"
fi
fi fi
fi fi
fi fi
@@ -249,18 +253,6 @@ error_handler() {
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
on_exit() { on_exit() {
local exit_code=$? local exit_code=$?
# Report orphaned "installing" records to telemetry API
# Catches ALL exit paths: errors (non-zero), signals, AND clean exits where
# post_to_api was called ("installing" sent) but post_update_to_api was never called
if [[ "${POST_TO_API_DONE:-}" == "true" && "${POST_UPDATE_DONE:-}" != "true" ]]; then
if declare -f post_update_to_api >/dev/null 2>&1; then
if [[ $exit_code -ne 0 ]]; then
post_update_to_api "failed" "$exit_code"
else
post_update_to_api "failed" "1"
fi
fi
fi
[[ -n "${lockfile:-}" && -e "$lockfile" ]] && rm -f "$lockfile" [[ -n "${lockfile:-}" && -e "$lockfile" ]] && rm -f "$lockfile"
exit "$exit_code" exit "$exit_code"
} }
@@ -273,10 +265,6 @@ on_exit() {
# - Exits with code 130 (128 + SIGINT=2) # - Exits with code 130 (128 + SIGINT=2)
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
on_interrupt() { on_interrupt() {
# Report interruption to telemetry API (prevents stuck "installing" records)
if declare -f post_update_to_api >/dev/null 2>&1; then
post_update_to_api "failed" "130"
fi
if declare -f msg_error >/dev/null 2>&1; then if declare -f msg_error >/dev/null 2>&1; then
msg_error "Interrupted by user (SIGINT)" msg_error "Interrupted by user (SIGINT)"
else else
@@ -294,10 +282,6 @@ on_interrupt() {
# - Triggered by external process termination # - Triggered by external process termination
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
on_terminate() { on_terminate() {
# Report termination to telemetry API (prevents stuck "installing" records)
if declare -f post_update_to_api >/dev/null 2>&1; then
post_update_to_api "failed" "143"
fi
if declare -f msg_error >/dev/null 2>&1; then if declare -f msg_error >/dev/null 2>&1; then
msg_error "Terminated by signal (SIGTERM)" msg_error "Terminated by signal (SIGTERM)"
else else

View File

@@ -465,7 +465,6 @@ manage_tool_repository() {
msg_error "Failed to download MongoDB GPG key" msg_error "Failed to download MongoDB GPG key"
return 1 return 1
fi fi
chmod 644 "/etc/apt/keyrings/mongodb-server-${version}.gpg"
# Setup repository # Setup repository
local distro_codename local distro_codename
@@ -1295,33 +1294,12 @@ setup_deb822_repo() {
return 1 return 1
} }
# Import GPG key (auto-detect binary vs ASCII-armored format) # Import GPG
local tmp_gpg curl -fsSL "$gpg_url" | gpg --dearmor --yes -o "/etc/apt/keyrings/${name}.gpg" || {
tmp_gpg=$(mktemp) || return 1 msg_error "Failed to import GPG key for ${name}"
curl -fsSL "$gpg_url" -o "$tmp_gpg" || {
msg_error "Failed to download GPG key for ${name}"
rm -f "$tmp_gpg"
return 1 return 1
} }
if grep -q "BEGIN PGP" "$tmp_gpg" 2>/dev/null; then
# ASCII-armored — dearmor to binary
gpg --dearmor --yes -o "/etc/apt/keyrings/${name}.gpg" < "$tmp_gpg" || {
msg_error "Failed to dearmor GPG key for ${name}"
rm -f "$tmp_gpg"
return 1
}
else
# Already in binary GPG format — copy directly
cp "$tmp_gpg" "/etc/apt/keyrings/${name}.gpg" || {
msg_error "Failed to install GPG key for ${name}"
rm -f "$tmp_gpg"
return 1
}
fi
rm -f "$tmp_gpg"
chmod 644 "/etc/apt/keyrings/${name}.gpg"
# Write deb822 # Write deb822
{ {
echo "Types: deb" echo "Types: deb"

View File

@@ -75,37 +75,14 @@ pct exec "$CTID" -- bash -c '
set -e set -e
export DEBIAN_FRONTEND=noninteractive export DEBIAN_FRONTEND=noninteractive
# Source os-release properly (handles quoted values) ID=$(grep "^ID=" /etc/os-release | cut -d"=" -f2)
source /etc/os-release VER=$(grep "^VERSION_CODENAME=" /etc/os-release | cut -d"=" -f2)
# Fallback if DNS is poisoned or blocked # fallback if DNS is poisoned or blocked
ORIG_RESOLV="/etc/resolv.conf" ORIG_RESOLV="/etc/resolv.conf"
BACKUP_RESOLV="/tmp/resolv.conf.backup" BACKUP_RESOLV="/tmp/resolv.conf.backup"
# Check DNS resolution using multiple methods (dig may not be installed) if ! dig +short pkgs.tailscale.com | grep -qvE "^127\.|^0\.0\.0\.0$"; then
dns_check_failed=true
if command -v dig &>/dev/null; then
if dig +short pkgs.tailscale.com 2>/dev/null | grep -qvE "^127\.|^0\.0\.0\.0$|^$"; then
dns_check_failed=false
fi
elif command -v host &>/dev/null; then
if host pkgs.tailscale.com 2>/dev/null | grep -q "has address"; then
dns_check_failed=false
fi
elif command -v nslookup &>/dev/null; then
if nslookup pkgs.tailscale.com 2>/dev/null | grep -q "Address:"; then
dns_check_failed=false
fi
elif command -v getent &>/dev/null; then
if getent hosts pkgs.tailscale.com &>/dev/null; then
dns_check_failed=false
fi
else
# No DNS tools available, try curl directly and assume DNS works
dns_check_failed=false
fi
if $dns_check_failed; then
echo "[INFO] DNS resolution for pkgs.tailscale.com failed (blocked or redirected)." echo "[INFO] DNS resolution for pkgs.tailscale.com failed (blocked or redirected)."
echo "[INFO] Temporarily overriding /etc/resolv.conf with Cloudflare DNS (1.1.1.1)" echo "[INFO] Temporarily overriding /etc/resolv.conf with Cloudflare DNS (1.1.1.1)"
cp "$ORIG_RESOLV" "$BACKUP_RESOLV" cp "$ORIG_RESOLV" "$BACKUP_RESOLV"
@@ -115,22 +92,17 @@ fi
if ! command -v curl &>/dev/null; then if ! command -v curl &>/dev/null; then
echo "[INFO] curl not found, installing..." echo "[INFO] curl not found, installing..."
apt-get update -qq apt-get update -qq
apt update -qq apt-get install -y curl >/dev/null
apt install -y curl >/dev/null
fi fi
# Ensure keyrings directory exists curl -fsSL https://pkgs.tailscale.com/stable/${ID}/${VER}.noarmor.gpg \
mkdir -p /usr/share/keyrings
curl -fsSL "https://pkgs.tailscale.com/stable/${ID}/${VERSION_CODENAME}.noarmor.gpg" \
| tee /usr/share/keyrings/tailscale-archive-keyring.gpg >/dev/null | tee /usr/share/keyrings/tailscale-archive-keyring.gpg >/dev/null
echo "deb [signed-by=/usr/share/keyrings/tailscale-archive-keyring.gpg] https://pkgs.tailscale.com/stable/${ID} ${VERSION_CODENAME} main" \ echo "deb [signed-by=/usr/share/keyrings/tailscale-archive-keyring.gpg] https://pkgs.tailscale.com/stable/${ID} ${VER} main" \
>/etc/apt/sources.list.d/tailscale.list >/etc/apt/sources.list.d/tailscale.list
apt-get update -qq apt-get update -qq
apt update -qq apt-get install -y tailscale >/dev/null
apt install -y tailscale >/dev/null
if [[ -f /tmp/resolv.conf.backup ]]; then if [[ -f /tmp/resolv.conf.backup ]]; then
echo "[INFO] Restoring original /etc/resolv.conf" echo "[INFO] Restoring original /etc/resolv.conf"

View File

@@ -131,7 +131,7 @@ function detect_service() {
function backup_container() { function backup_container() {
msg_info "Creating backup for container $1" msg_info "Creating backup for container $1"
vzdump $1 --compress zstd --storage $STORAGE_CHOICE -notes-template "{{guestname}} - community-scripts backup updater" >/dev/null 2>&1 vzdump $1 --compress zstd --storage $STORAGE_CHOICE -notes-template "community-scripts backup updater" >/dev/null 2>&1
status=$? status=$?
if [ $status -eq 0 ]; then if [ $status -eq 0 ]; then
@@ -151,11 +151,11 @@ function get_backup_storages() {
split($0, a, ":") split($0, a, ":")
type = a[1] type = a[1]
name = a[2] name = a[2]
gsub(/^[ \t]+|[ \t]+$/, "", name) sub(/^ +/, "", name)
has_content = 0 has_content = 0
has_backup = 0 has_backup = 0
} }
/^[ \t]*content/ { /^ +content/ {
has_content = 1 has_content = 1
if ($0 ~ /backup/) has_backup = 1 if ($0 ~ /backup/) has_backup = 1
} }

View File

@@ -70,7 +70,7 @@ function error_handler() {
local line_number="$1" local line_number="$1"
local command="$2" local command="$2"
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}" local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
post_update_to_api "failed" "${exit_code}" post_update_to_api "failed" "${command}"
echo -e "\n$error_message\n" echo -e "\n$error_message\n"
cleanup_vmid cleanup_vmid
} }
@@ -203,6 +203,7 @@ function exit-script() {
function default_settings() { function default_settings() {
VMID=$(get_valid_nextid) VMID=$(get_valid_nextid)
FORMAT=",efitype=4m"
MACHINE="" MACHINE=""
DISK_SIZE="4G" DISK_SIZE="4G"
DISK_CACHE="" DISK_CACHE=""
@@ -258,9 +259,11 @@ function advanced_settings() {
3>&1 1>&2 2>&3); then 3>&1 1>&2 2>&3); then
if [ "$MACH" = q35 ]; then if [ "$MACH" = q35 ]; then
echo -e "${CONTAINERTYPE}${BOLD}${DGN}Machine Type: ${BGN}$MACH${CL}" echo -e "${CONTAINERTYPE}${BOLD}${DGN}Machine Type: ${BGN}$MACH${CL}"
FORMAT=""
MACHINE=" -machine q35" MACHINE=" -machine q35"
else else
echo -e "${CONTAINERTYPE}${BOLD}${DGN}Machine Type: ${BGN}$MACH${CL}" echo -e "${CONTAINERTYPE}${BOLD}${DGN}Machine Type: ${BGN}$MACH${CL}"
FORMAT=",efitype=4m"
MACHINE="" MACHINE=""
fi fi
else else
@@ -473,45 +476,31 @@ case $STORAGE_TYPE in
nfs | dir | cifs) nfs | dir | cifs)
DISK_EXT=".qcow2" DISK_EXT=".qcow2"
DISK_REF="$VMID/" DISK_REF="$VMID/"
DISK_IMPORT="--format qcow2" DISK_IMPORT="-format qcow2"
THIN="" THIN=""
;; ;;
btrfs) btrfs)
DISK_EXT=".raw" DISK_EXT=".raw"
DISK_REF="$VMID/" DISK_REF="$VMID/"
DISK_IMPORT="--format raw" DISK_IMPORT="-format raw"
FORMAT=",efitype=4m"
THIN="" THIN=""
;; ;;
*)
DISK_EXT=""
DISK_REF=""
DISK_IMPORT="--format raw"
;;
esac esac
for i in {0,1}; do
disk="DISK$i"
eval DISK"${i}"=vm-"${VMID}"-disk-"${i}"${DISK_EXT:-}
eval DISK"${i}"_REF="${STORAGE}":"${DISK_REF:-}"${!disk}
done
msg_info "Creating a Arch Linux VM" msg_info "Creating a Arch Linux VM"
qm create $VMID -agent 1${MACHINE} -tablet 0 -localtime 1 -bios ovmf${CPU_TYPE} -cores $CORE_COUNT -memory $RAM_SIZE \ qm create $VMID -agent 1${MACHINE} -tablet 0 -localtime 1 -bios ovmf${CPU_TYPE} -cores $CORE_COUNT -memory $RAM_SIZE \
-name $HN -tags community-script -net0 virtio,bridge=$BRG,macaddr=$MAC$VLAN$MTU -onboot 1 -ostype l26 -scsihw virtio-scsi-pci -name $HN -tags community-script -net0 virtio,bridge=$BRG,macaddr=$MAC$VLAN$MTU -onboot 1 -ostype l26 -scsihw virtio-scsi-pci
pvesm alloc $STORAGE $VMID $DISK0 4M 1>&/dev/null
if qm disk import --help >/dev/null 2>&1; then qm importdisk $VMID ${FILE} $STORAGE ${DISK_IMPORT:-} 1>&/dev/null
IMPORT_CMD=(qm disk import)
else
IMPORT_CMD=(qm importdisk)
fi
IMPORT_OUT="$("${IMPORT_CMD[@]}" "$VMID" "${FILE}" "$STORAGE" ${DISK_IMPORT:-} 2>&1 || true)"
DISK_REF_IMPORTED="$(printf '%s\n' "$IMPORT_OUT" | sed -n "s/.*successfully imported disk '\([^']\+\)'.*/\1/p" | tr -d "\r\"'")"
[[ -z "$DISK_REF_IMPORTED" ]] && DISK_REF_IMPORTED="$(pvesm list "$STORAGE" | awk -v id="$VMID" '$5 ~ ("vm-"id"-disk-") {print $1":"$5}' | sort | tail -n1)"
[[ -z "$DISK_REF_IMPORTED" ]] && {
msg_error "Unable to determine imported disk reference."
echo "$IMPORT_OUT"
exit 1
}
msg_ok "Imported disk (${CL}${BL}${DISK_REF_IMPORTED}${CL})"
qm set $VMID \ qm set $VMID \
-efidisk0 ${STORAGE}:0,efitype=4m \ -efidisk0 ${DISK0_REF}${FORMAT} \
-scsi0 ${DISK_REF_IMPORTED},${DISK_CACHE}${THIN%,} \ -scsi0 ${DISK1_REF},${DISK_CACHE}${THIN}size=${DISK_SIZE} \
-ide2 ${STORAGE}:cloudinit \ -ide2 ${STORAGE}:cloudinit \
-boot order=scsi0 \ -boot order=scsi0 \
-serial0 socket >/dev/null -serial0 socket >/dev/null

View File

@@ -70,7 +70,7 @@ function error_handler() {
local line_number="$1" local line_number="$1"
local command="$2" local command="$2"
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}" local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
post_update_to_api "failed" "${exit_code}" post_update_to_api "failed" "${command}"
echo -e "\n$error_message\n" echo -e "\n$error_message\n"
cleanup_vmid cleanup_vmid
} }
@@ -201,17 +201,6 @@ function exit-script() {
exit exit
} }
function select_cloud_init() {
if (whiptail --backtitle "Proxmox VE Helper Scripts" --title "CLOUD-INIT" \
--yesno "Enable Cloud-Init for VM configuration?\n\nCloud-Init allows automatic configuration of:\n- User accounts and passwords\n- SSH keys\n- Network settings (DHCP/Static)\n- DNS configuration\n\nYou can also configure these settings later in Proxmox UI.\n\nNote: Without Cloud-Init, the nocloud image will be used with console auto-login." --defaultno 18 68); then
CLOUD_INIT="yes"
echo -e "${CLOUD}${BOLD}${DGN}Cloud-Init: ${BGN}yes${CL}"
else
CLOUD_INIT="no"
echo -e "${CLOUD}${BOLD}${DGN}Cloud-Init: ${BGN}no${CL}"
fi
}
function default_settings() { function default_settings() {
VMID=$(get_valid_nextid) VMID=$(get_valid_nextid)
FORMAT=",efitype=4m" FORMAT=",efitype=4m"
@@ -227,6 +216,7 @@ function default_settings() {
VLAN="" VLAN=""
MTU="" MTU=""
START_VM="yes" START_VM="yes"
CLOUD_INIT="no"
METHOD="default" METHOD="default"
echo -e "${CONTAINERID}${BOLD}${DGN}Virtual Machine ID: ${BGN}${VMID}${CL}" echo -e "${CONTAINERID}${BOLD}${DGN}Virtual Machine ID: ${BGN}${VMID}${CL}"
echo -e "${CONTAINERTYPE}${BOLD}${DGN}Machine Type: ${BGN}i440fx${CL}" echo -e "${CONTAINERTYPE}${BOLD}${DGN}Machine Type: ${BGN}i440fx${CL}"
@@ -240,7 +230,7 @@ function default_settings() {
echo -e "${MACADDRESS}${BOLD}${DGN}MAC Address: ${BGN}${MAC}${CL}" echo -e "${MACADDRESS}${BOLD}${DGN}MAC Address: ${BGN}${MAC}${CL}"
echo -e "${VLANTAG}${BOLD}${DGN}VLAN: ${BGN}Default${CL}" echo -e "${VLANTAG}${BOLD}${DGN}VLAN: ${BGN}Default${CL}"
echo -e "${DEFAULT}${BOLD}${DGN}Interface MTU Size: ${BGN}Default${CL}" echo -e "${DEFAULT}${BOLD}${DGN}Interface MTU Size: ${BGN}Default${CL}"
select_cloud_init echo -e "${CLOUD}${BOLD}${DGN}Configure Cloud-init: ${BGN}no${CL}"
echo -e "${GATEWAY}${BOLD}${DGN}Start VM when completed: ${BGN}yes${CL}" echo -e "${GATEWAY}${BOLD}${DGN}Start VM when completed: ${BGN}yes${CL}"
echo -e "${CREATING}${BOLD}${DGN}Creating a Debian 13 VM using the above default settings${CL}" echo -e "${CREATING}${BOLD}${DGN}Creating a Debian 13 VM using the above default settings${CL}"
} }
@@ -410,7 +400,13 @@ function advanced_settings() {
exit-script exit-script
fi fi
select_cloud_init if (whiptail --backtitle "Proxmox VE Helper Scripts" --title "CLOUD-INIT" --yesno "Configure the VM with Cloud-init?" --defaultno 10 58); then
echo -e "${CLOUD}${BOLD}${DGN}Configure Cloud-init: ${BGN}yes${CL}"
CLOUD_INIT="yes"
else
echo -e "${CLOUD}${BOLD}${DGN}Configure Cloud-init: ${BGN}no${CL}"
CLOUD_INIT="no"
fi
if (whiptail --backtitle "Proxmox VE Helper Scripts" --title "START VIRTUAL MACHINE" --yesno "Start VM when completed?" 10 58); then if (whiptail --backtitle "Proxmox VE Helper Scripts" --title "START VIRTUAL MACHINE" --yesno "Start VM when completed?" 10 58); then
echo -e "${GATEWAY}${BOLD}${DGN}Start VM when completed: ${BGN}yes${CL}" echo -e "${GATEWAY}${BOLD}${DGN}Start VM when completed: ${BGN}yes${CL}"
@@ -477,17 +473,6 @@ else
fi fi
msg_ok "Using ${CL}${BL}$STORAGE${CL} ${GN}for Storage Location." msg_ok "Using ${CL}${BL}$STORAGE${CL} ${GN}for Storage Location."
msg_ok "Virtual Machine ID is ${CL}${BL}$VMID${CL}." msg_ok "Virtual Machine ID is ${CL}${BL}$VMID${CL}."
# ==============================================================================
# PREREQUISITES
# ==============================================================================
if ! command -v virt-customize &>/dev/null; then
msg_info "Installing libguestfs-tools"
apt-get update >/dev/null 2>&1
apt-get install -y libguestfs-tools >/dev/null 2>&1
msg_ok "Installed libguestfs-tools"
fi
msg_info "Retrieving the URL for the Debian 13 Qcow2 Disk Image" msg_info "Retrieving the URL for the Debian 13 Qcow2 Disk Image"
if [ "$CLOUD_INIT" == "yes" ]; then if [ "$CLOUD_INIT" == "yes" ]; then
URL=https://cloud.debian.org/images/cloud/trixie/latest/debian-13-genericcloud-amd64.qcow2 URL=https://cloud.debian.org/images/cloud/trixie/latest/debian-13-genericcloud-amd64.qcow2
@@ -501,50 +486,6 @@ echo -en "\e[1A\e[0K"
FILE=$(basename $URL) FILE=$(basename $URL)
msg_ok "Downloaded ${CL}${BL}${FILE}${CL}" msg_ok "Downloaded ${CL}${BL}${FILE}${CL}"
# ==============================================================================
# IMAGE CUSTOMIZATION
# ==============================================================================
msg_info "Customizing ${FILE} image"
WORK_FILE=$(mktemp --suffix=.qcow2)
cp "$FILE" "$WORK_FILE"
# Set hostname
virt-customize -q -a "$WORK_FILE" --hostname "${HN}" >/dev/null 2>&1
# Prepare for unique machine-id on first boot
virt-customize -q -a "$WORK_FILE" --run-command "truncate -s 0 /etc/machine-id" >/dev/null 2>&1
virt-customize -q -a "$WORK_FILE" --run-command "rm -f /var/lib/dbus/machine-id" >/dev/null 2>&1
# Disable systemd-firstboot to prevent interactive prompts blocking the console
virt-customize -q -a "$WORK_FILE" --run-command "systemctl disable systemd-firstboot.service 2>/dev/null; rm -f /etc/systemd/system/sysinit.target.wants/systemd-firstboot.service; ln -sf /dev/null /etc/systemd/system/systemd-firstboot.service" >/dev/null 2>&1 || true
# Pre-seed firstboot settings so it won't prompt even if triggered
virt-customize -q -a "$WORK_FILE" --run-command "echo 'Etc/UTC' > /etc/timezone && ln -sf /usr/share/zoneinfo/Etc/UTC /etc/localtime" >/dev/null 2>&1 || true
virt-customize -q -a "$WORK_FILE" --run-command "touch /etc/locale.conf" >/dev/null 2>&1 || true
if [ "$CLOUD_INIT" == "yes" ]; then
# Cloud-Init handles SSH and login
virt-customize -q -a "$WORK_FILE" --run-command "sed -i 's/^#*PermitRootLogin.*/PermitRootLogin yes/' /etc/ssh/sshd_config" >/dev/null 2>&1 || true
virt-customize -q -a "$WORK_FILE" --run-command "sed -i 's/^#*PasswordAuthentication.*/PasswordAuthentication yes/' /etc/ssh/sshd_config" >/dev/null 2>&1 || true
else
# Configure auto-login on serial console (ttyS0) and virtual console (tty1)
virt-customize -q -a "$WORK_FILE" --run-command "mkdir -p /etc/systemd/system/serial-getty@ttyS0.service.d" >/dev/null 2>&1 || true
virt-customize -q -a "$WORK_FILE" --run-command 'cat > /etc/systemd/system/serial-getty@ttyS0.service.d/autologin.conf << EOF
[Service]
ExecStart=
ExecStart=-/sbin/agetty --autologin root --noclear %I \$TERM
EOF' >/dev/null 2>&1 || true
virt-customize -q -a "$WORK_FILE" --run-command "mkdir -p /etc/systemd/system/getty@tty1.service.d" >/dev/null 2>&1 || true
virt-customize -q -a "$WORK_FILE" --run-command 'cat > /etc/systemd/system/getty@tty1.service.d/autologin.conf << EOF
[Service]
ExecStart=
ExecStart=-/sbin/agetty --autologin root --noclear %I \$TERM
EOF' >/dev/null 2>&1 || true
fi
msg_ok "Customized image"
STORAGE_TYPE=$(pvesm status -storage "$STORAGE" | awk 'NR>1 {print $2}') STORAGE_TYPE=$(pvesm status -storage "$STORAGE" | awk 'NR>1 {print $2}')
case $STORAGE_TYPE in case $STORAGE_TYPE in
nfs | dir) nfs | dir)
@@ -560,11 +501,6 @@ btrfs)
FORMAT=",efitype=4m" FORMAT=",efitype=4m"
THIN="" THIN=""
;; ;;
*)
DISK_EXT=""
DISK_REF=""
DISK_IMPORT="-format raw"
;;
esac esac
for i in {0,1}; do for i in {0,1}; do
disk="DISK$i" disk="DISK$i"
@@ -576,7 +512,7 @@ msg_info "Creating a Debian 13 VM"
qm create $VMID -agent 1${MACHINE} -tablet 0 -localtime 1 -bios ovmf${CPU_TYPE} -cores $CORE_COUNT -memory $RAM_SIZE \ qm create $VMID -agent 1${MACHINE} -tablet 0 -localtime 1 -bios ovmf${CPU_TYPE} -cores $CORE_COUNT -memory $RAM_SIZE \
-name $HN -tags community-script -net0 virtio,bridge=$BRG,macaddr=$MAC$VLAN$MTU -onboot 1 -ostype l26 -scsihw virtio-scsi-pci -name $HN -tags community-script -net0 virtio,bridge=$BRG,macaddr=$MAC$VLAN$MTU -onboot 1 -ostype l26 -scsihw virtio-scsi-pci
pvesm alloc $STORAGE $VMID $DISK0 4M 1>&/dev/null pvesm alloc $STORAGE $VMID $DISK0 4M 1>&/dev/null
qm importdisk $VMID ${WORK_FILE} $STORAGE ${DISK_IMPORT:-} 1>&/dev/null qm importdisk $VMID ${FILE} $STORAGE ${DISK_IMPORT:-} 1>&/dev/null
if [ "$CLOUD_INIT" == "yes" ]; then if [ "$CLOUD_INIT" == "yes" ]; then
qm set $VMID \ qm set $VMID \
-efidisk0 ${DISK0_REF}${FORMAT} \ -efidisk0 ${DISK0_REF}${FORMAT} \
@@ -591,10 +527,6 @@ else
-boot order=scsi0 \ -boot order=scsi0 \
-serial0 socket >/dev/null -serial0 socket >/dev/null
fi fi
# Clean up work file
rm -f "$WORK_FILE"
DESCRIPTION=$( DESCRIPTION=$(
cat <<EOF cat <<EOF
<div align='center'> <div align='center'>

View File

@@ -70,7 +70,7 @@ function error_handler() {
local line_number="$1" local line_number="$1"
local command="$2" local command="$2"
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}" local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
post_update_to_api "failed" "${exit_code}" post_update_to_api "failed" "${command}"
echo -e "\n$error_message\n" echo -e "\n$error_message\n"
cleanup_vmid cleanup_vmid
} }
@@ -501,11 +501,6 @@ btrfs)
FORMAT=",efitype=4m" FORMAT=",efitype=4m"
THIN="" THIN=""
;; ;;
*)
DISK_EXT=""
DISK_REF=""
DISK_IMPORT="-format raw"
;;
esac esac
for i in {0,1}; do for i in {0,1}; do
disk="DISK$i" disk="DISK$i"

View File

@@ -45,7 +45,7 @@ function error_handler() {
local line_number="$1" local line_number="$1"
local command="$2" local command="$2"
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}" local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
post_update_to_api "failed" "${exit_code}" post_update_to_api "failed" "${command}"
echo -e "\n$error_message\n" echo -e "\n$error_message\n"
cleanup_vmid cleanup_vmid
} }

View File

@@ -74,7 +74,7 @@ function error_handler() {
local line_number="$1" local line_number="$1"
local command="$2" local command="$2"
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}" local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
post_update_to_api "failed" "${exit_code}" post_update_to_api "failed" "${command}"
echo -e "\n$error_message\n" echo -e "\n$error_message\n"
cleanup_vmid cleanup_vmid
} }

View File

@@ -71,7 +71,7 @@ function error_handler() {
local line_number="$1" local line_number="$1"
local command="$2" local command="$2"
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}" local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
post_update_to_api "failed" "${exit_code}" post_update_to_api "failed" "${command}"
echo -e "\n$error_message\n" echo -e "\n$error_message\n"
cleanup_vmid cleanup_vmid
} }
@@ -566,11 +566,6 @@ zfspool)
DISK_REF="" DISK_REF=""
DISK_IMPORT="-format raw" DISK_IMPORT="-format raw"
;; ;;
*)
DISK_EXT=""
DISK_REF=""
DISK_IMPORT="-format raw"
;;
esac esac
DISK_VAR="vm-${VMID}-disk-0${DISK_EXT:-}" DISK_VAR="vm-${VMID}-disk-0${DISK_EXT:-}"

View File

@@ -70,7 +70,7 @@ function error_handler() {
local line_number="$1" local line_number="$1"
local command="$2" local command="$2"
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}" local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
post_update_to_api "failed" "${exit_code}" post_update_to_api "failed" "${command}"
echo -e "\n$error_message\n" echo -e "\n$error_message\n"
cleanup_vmid cleanup_vmid
} }
@@ -487,11 +487,6 @@ btrfs)
FORMAT=",efitype=4m" FORMAT=",efitype=4m"
THIN="" THIN=""
;; ;;
*)
DISK_EXT=""
DISK_REF=""
DISK_IMPORT="-format raw"
;;
esac esac
for i in {0,1,2}; do for i in {0,1,2}; do
disk="DISK$i" disk="DISK$i"

View File

@@ -74,7 +74,7 @@ function error_handler() {
local exit_code="$?" local exit_code="$?"
local line_number="$1" local line_number="$1"
local command="$2" local command="$2"
post_update_to_api "failed" "$exit_code" post_update_to_api "failed" "$command"
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}" local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
echo -e "\n$error_message\n" echo -e "\n$error_message\n"
cleanup_vmid cleanup_vmid

View File

@@ -48,7 +48,7 @@ function error_handler() {
local exit_code="$?" local exit_code="$?"
local line_number="$1" local line_number="$1"
local command="$2" local command="$2"
post_update_to_api "failed" "$exit_code" post_update_to_api "failed" "$command"
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}" local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
echo -e "\n$error_message\n" echo -e "\n$error_message\n"
cleanup_vmid cleanup_vmid
@@ -619,11 +619,6 @@ btrfs)
FORMAT=",efitype=4m" FORMAT=",efitype=4m"
THIN="" THIN=""
;; ;;
*)
DISK_EXT=""
DISK_REF=""
DISK_IMPORT="-format raw"
;;
esac esac
for i in {0,1}; do for i in {0,1}; do
disk="DISK$i" disk="DISK$i"

View File

@@ -71,7 +71,7 @@ function error_handler() {
local line_number="$1" local line_number="$1"
local command="$2" local command="$2"
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}" local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
post_update_to_api "failed" "${exit_code}" post_update_to_api "failed" "${command}"
echo -e "\n$error_message\n" echo -e "\n$error_message\n"
cleanup_vmid cleanup_vmid
} }
@@ -500,11 +500,6 @@ btrfs)
FORMAT=",efitype=4m" FORMAT=",efitype=4m"
THIN="" THIN=""
;; ;;
*)
DISK_EXT=""
DISK_REF=""
DISK_IMPORT="-format raw"
;;
esac esac
for i in {0,1,2}; do for i in {0,1,2}; do
disk="DISK$i" disk="DISK$i"

View File

@@ -79,7 +79,7 @@ function error_handler() {
local line_number="$1" local line_number="$1"
local command="$2" local command="$2"
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}" local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
post_update_to_api "failed" "${exit_code}" post_update_to_api "failed" "${command}"
echo -e "\n$error_message\n" echo -e "\n$error_message\n"
cleanup_vmid cleanup_vmid
} }
@@ -402,11 +402,6 @@ nfs | dir)
DISK_REF="$VMID/" DISK_REF="$VMID/"
DISK_IMPORT="-format qcow2" DISK_IMPORT="-format qcow2"
;; ;;
*)
DISK_EXT=""
DISK_REF=""
DISK_IMPORT="-format raw"
;;
esac esac
for i in {0,1}; do for i in {0,1}; do
disk="DISK$i" disk="DISK$i"

View File

@@ -66,7 +66,7 @@ function error_handler() {
local exit_code="$?" local exit_code="$?"
local line_number="$1" local line_number="$1"
local command="$2" local command="$2"
post_update_to_api "failed" "$exit_code" post_update_to_api "failed" "$command"
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}" local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
echo -e "\n$error_message\n" echo -e "\n$error_message\n"
cleanup_vmid cleanup_vmid
@@ -482,11 +482,6 @@ btrfs)
FORMAT=",efitype=4m" FORMAT=",efitype=4m"
THIN="" THIN=""
;; ;;
*)
DISK_EXT=""
DISK_REF=""
DISK_IMPORT="-format raw"
;;
esac esac
for i in {0,1}; do for i in {0,1}; do
disk="DISK$i" disk="DISK$i"

View File

@@ -69,7 +69,7 @@ function error_handler() {
local exit_code="$?" local exit_code="$?"
local line_number="$1" local line_number="$1"
local command="$2" local command="$2"
post_update_to_api "failed" "$exit_code" post_update_to_api "failed" "$command"
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}" local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
echo -e "\n$error_message\n" echo -e "\n$error_message\n"
cleanup_vmid cleanup_vmid
@@ -484,11 +484,6 @@ btrfs)
FORMAT=",efitype=4m" FORMAT=",efitype=4m"
THIN="" THIN=""
;; ;;
*)
DISK_EXT=""
DISK_REF=""
DISK_IMPORT="-format raw"
;;
esac esac
for i in {0,1}; do for i in {0,1}; do
disk="DISK$i" disk="DISK$i"

View File

@@ -68,7 +68,7 @@ function error_handler() {
local exit_code="$?" local exit_code="$?"
local line_number="$1" local line_number="$1"
local command="$2" local command="$2"
post_update_to_api "failed" "$exit_code" post_update_to_api "failed" "$command"
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}" local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
echo -e "\n$error_message\n" echo -e "\n$error_message\n"
cleanup_vmid cleanup_vmid
@@ -483,11 +483,6 @@ btrfs)
FORMAT=",efitype=4m" FORMAT=",efitype=4m"
THIN="" THIN=""
;; ;;
*)
DISK_EXT=""
DISK_REF=""
DISK_IMPORT="-format raw"
;;
esac esac
for i in {0,1}; do for i in {0,1}; do
disk="DISK$i" disk="DISK$i"

View File

@@ -69,7 +69,7 @@ function error_handler() {
local line_number="$1" local line_number="$1"
local command="$2" local command="$2"
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}" local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
post_update_to_api "failed" "${exit_code}" post_update_to_api "failed" "${command}"
echo -e "\n$error_message\n" echo -e "\n$error_message\n"
cleanup_vmid cleanup_vmid
} }