Compare commits

..

5 Commits

Author SHA1 Message Date
Tobias
4a9594aee7 Update frontend build command in install script 2026-02-12 08:24:23 +01:00
Tobias
b65b8a2ffa Update frontend build command in wealthfolio.sh 2026-02-12 08:24:07 +01:00
Tobias
97cb1ac7e2 fix: remove duplicate 2026-02-10 15:21:43 +01:00
Tobias
47b445bdb1 fix: remove duplicate 2026-02-10 15:21:17 +01:00
CrazyWolf13
a2b27c9ff8 wealthfolio-v3 2026-02-10 14:42:16 +01:00
73 changed files with 1312 additions and 2220 deletions

View File

@@ -89,15 +89,9 @@ jobs:
slug=$(jq -r '.slug // empty' "$json_file" 2>/dev/null)
[[ -z "$slug" ]] && continue
# Find corresponding script (install script or addon script)
install_script=""
if [[ -f "install/${slug}-install.sh" ]]; then
install_script="install/${slug}-install.sh"
elif [[ -f "tools/addon/${slug}.sh" ]]; then
install_script="tools/addon/${slug}.sh"
else
continue
fi
# Find corresponding install script
install_script="install/${slug}-install.sh"
[[ ! -f "$install_script" ]] && continue
# Look for fetch_and_deploy_gh_release calls
# Pattern: fetch_and_deploy_gh_release "app" "owner/repo" ["mode"] ["version"]

View File

@@ -401,125 +401,11 @@ Exercise vigilance regarding copycat or coat-tailing sites that seek to exploit
</details>
## 2026-02-13
### 🚀 Updated Scripts
- #### 🐞 Bug Fixes
- OpenWebUI: pin numba constraint [@MickLesk](https://github.com/MickLesk) ([#11874](https://github.com/community-scripts/ProxmoxVE/pull/11874))
- Planka: add migrate step to update function [@ZimmermannLeon](https://github.com/ZimmermannLeon) ([#11877](https://github.com/community-scripts/ProxmoxVE/pull/11877))
- Pangolin: switch sqlite-specific back to generic [@MickLesk](https://github.com/MickLesk) ([#11868](https://github.com/community-scripts/ProxmoxVE/pull/11868))
- [Hotfix] Jotty: Copy contents of config backup into /opt/jotty/config [@vhsdream](https://github.com/vhsdream) ([#11864](https://github.com/community-scripts/ProxmoxVE/pull/11864))
- #### 🔧 Refactor
- chore(donetick): add config entry for v0.1.73 [@tomfrenzel](https://github.com/tomfrenzel) ([#11872](https://github.com/community-scripts/ProxmoxVE/pull/11872))
- Refactor: Radicale [@vhsdream](https://github.com/vhsdream) ([#11850](https://github.com/community-scripts/ProxmoxVE/pull/11850))
### 📡 API
- #### ✨ New Features
- error-handler: Implement json_escape and enhance error handling [@MickLesk](https://github.com/MickLesk) ([#11875](https://github.com/community-scripts/ProxmoxVE/pull/11875))
### 🌐 Website
- #### 📝 Script Information
- SQLServer-2025: add PVE9/Kernel 6.x incompatibility warning [@MickLesk](https://github.com/MickLesk) ([#11829](https://github.com/community-scripts/ProxmoxVE/pull/11829))
## 2026-02-12
### 🚀 Updated Scripts
- #### 🐞 Bug Fixes
- EMQX: increase disk to 6GB and add optional MQ disable prompt [@MickLesk](https://github.com/MickLesk) ([#11844](https://github.com/community-scripts/ProxmoxVE/pull/11844))
- Increased the Grafana container default disk size. [@shtefko](https://github.com/shtefko) ([#11840](https://github.com/community-scripts/ProxmoxVE/pull/11840))
- Pangolin: Update database generation command in install script [@tremor021](https://github.com/tremor021) ([#11825](https://github.com/community-scripts/ProxmoxVE/pull/11825))
- Deluge: add python3-setuptools as dep [@MickLesk](https://github.com/MickLesk) ([#11833](https://github.com/community-scripts/ProxmoxVE/pull/11833))
- Dispatcharr: migrate to uv sync [@MickLesk](https://github.com/MickLesk) ([#11831](https://github.com/community-scripts/ProxmoxVE/pull/11831))
- #### ✨ New Features
- Archlinux-VM: fix LVM/LVM-thin storage and improve error reporting | VM's add correct exit_code for analytics [@MickLesk](https://github.com/MickLesk) ([#11842](https://github.com/community-scripts/ProxmoxVE/pull/11842))
- Debian13-VM: Optimize First Boot & add noCloud/Cloud Selection [@MickLesk](https://github.com/MickLesk) ([#11810](https://github.com/community-scripts/ProxmoxVE/pull/11810))
### 💾 Core
- #### ✨ New Features
- tools.func: auto-detect binary vs armored GPG keys in setup_deb822_repo [@MickLesk](https://github.com/MickLesk) ([#11841](https://github.com/community-scripts/ProxmoxVE/pull/11841))
- core: remove old Go API and extend misc/api.func with new backend [@MickLesk](https://github.com/MickLesk) ([#11822](https://github.com/community-scripts/ProxmoxVE/pull/11822))
- #### 🔧 Refactor
- error_handler: prevent stuck 'installing' status [@MickLesk](https://github.com/MickLesk) ([#11845](https://github.com/community-scripts/ProxmoxVE/pull/11845))
### 🧰 Tools
- #### 🐞 Bug Fixes
- Tailscale: fix DNS check and keyrings directory issues [@MickLesk](https://github.com/MickLesk) ([#11837](https://github.com/community-scripts/ProxmoxVE/pull/11837))
## 2026-02-11
### 🆕 New Scripts
- Draw.io ([#11788](https://github.com/community-scripts/ProxmoxVE/pull/11788))
### 🚀 Updated Scripts
- #### 🐞 Bug Fixes
- dispatcharr: include port 9191 in success-message [@MickLesk](https://github.com/MickLesk) ([#11808](https://github.com/community-scripts/ProxmoxVE/pull/11808))
- fix: make donetick 0.1.71 compatible [@tomfrenzel](https://github.com/tomfrenzel) ([#11804](https://github.com/community-scripts/ProxmoxVE/pull/11804))
- Kasm: Support new version URL format without hash suffix [@MickLesk](https://github.com/MickLesk) ([#11787](https://github.com/community-scripts/ProxmoxVE/pull/11787))
- LibreTranslate: Remove Torch [@tremor021](https://github.com/tremor021) ([#11783](https://github.com/community-scripts/ProxmoxVE/pull/11783))
- Snowshare: fix update script [@TuroYT](https://github.com/TuroYT) ([#11726](https://github.com/community-scripts/ProxmoxVE/pull/11726))
- #### ✨ New Features
- [Feature] OpenCloud: support PosixFS Collaborative Mode [@vhsdream](https://github.com/vhsdream) ([#11806](https://github.com/community-scripts/ProxmoxVE/pull/11806))
### 💾 Core
- #### 🔧 Refactor
- core: respect EDITOR variable for config editing [@ls-root](https://github.com/ls-root) ([#11693](https://github.com/community-scripts/ProxmoxVE/pull/11693))
### 📚 Documentation
- Fix formatting in kutt.json notes section [@tiagodenoronha](https://github.com/tiagodenoronha) ([#11774](https://github.com/community-scripts/ProxmoxVE/pull/11774))
## 2026-02-10
### 🚀 Updated Scripts
- #### 🐞 Bug Fixes
- Immich: Pin version to 2.5.6 [@vhsdream](https://github.com/vhsdream) ([#11775](https://github.com/community-scripts/ProxmoxVE/pull/11775))
- Libretranslate: Fix setuptools [@tremor021](https://github.com/tremor021) ([#11772](https://github.com/community-scripts/ProxmoxVE/pull/11772))
- Element Synapse: prevent systemd invoke failure during apt install [@MickLesk](https://github.com/MickLesk) ([#11758](https://github.com/community-scripts/ProxmoxVE/pull/11758))
- #### ✨ New Features
- Refactor: Slskd & Soularr [@vhsdream](https://github.com/vhsdream) ([#11674](https://github.com/community-scripts/ProxmoxVE/pull/11674))
### 🗑️ Deleted Scripts
- move paperless-exporter from LXC to addon ([#11737](https://github.com/community-scripts/ProxmoxVE/pull/11737))
### 🧰 Tools
- #### 🐞 Bug Fixes
- feat: improve storage parsing & add guestname [@carlosmaroot](https://github.com/carlosmaroot) ([#11752](https://github.com/community-scripts/ProxmoxVE/pull/11752))
### 📂 Github
- Github-Version Workflow: include addon scripts in extraction [@MickLesk](https://github.com/MickLesk) ([#11757](https://github.com/community-scripts/ProxmoxVE/pull/11757))
- paperless-exporter ([#11737](https://github.com/community-scripts/ProxmoxVE/pull/11737))
### 🌐 Website

5
api/.env.example Normal file
View File

@@ -0,0 +1,5 @@
MONGO_USER=
MONGO_PASSWORD=
MONGO_IP=
MONGO_PORT=
MONGO_DATABASE=

23
api/go.mod Normal file
View File

@@ -0,0 +1,23 @@
module proxmox-api
go 1.24.0
require (
github.com/gorilla/mux v1.8.1
github.com/joho/godotenv v1.5.1
github.com/rs/cors v1.11.1
go.mongodb.org/mongo-driver v1.17.2
)
require (
github.com/golang/snappy v0.0.4 // indirect
github.com/klauspost/compress v1.16.7 // indirect
github.com/montanaflynn/stats v0.7.1 // indirect
github.com/xdg-go/pbkdf2 v1.0.0 // indirect
github.com/xdg-go/scram v1.1.2 // indirect
github.com/xdg-go/stringprep v1.0.4 // indirect
github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78 // indirect
golang.org/x/crypto v0.45.0 // indirect
golang.org/x/sync v0.18.0 // indirect
golang.org/x/text v0.31.0 // indirect
)

56
api/go.sum Normal file
View File

@@ -0,0 +1,56 @@
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY=
github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ=
github.com/joho/godotenv v1.5.1 h1:7eLL/+HRGLY0ldzfGMeQkb7vMd0as4CfYvUVzLqw0N0=
github.com/joho/godotenv v1.5.1/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4=
github.com/klauspost/compress v1.16.7 h1:2mk3MPGNzKyxErAw8YaohYh69+pa4sIQSC0fPGCFR9I=
github.com/klauspost/compress v1.16.7/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
github.com/montanaflynn/stats v0.7.1 h1:etflOAAHORrCC44V+aR6Ftzort912ZU+YLiSTuV8eaE=
github.com/montanaflynn/stats v0.7.1/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow=
github.com/rs/cors v1.11.1 h1:eU3gRzXLRK57F5rKMGMZURNdIG4EoAmX8k94r9wXWHA=
github.com/rs/cors v1.11.1/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU=
github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c=
github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI=
github.com/xdg-go/scram v1.1.2 h1:FHX5I5B4i4hKRVRBCFRxq1iQRej7WO3hhBuJf+UUySY=
github.com/xdg-go/scram v1.1.2/go.mod h1:RT/sEzTbU5y00aCK8UOx6R7YryM0iF1N2MOmC3kKLN4=
github.com/xdg-go/stringprep v1.0.4 h1:XLI/Ng3O1Atzq0oBs3TWm+5ZVgkq2aqdlvP9JtoZ6c8=
github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM=
github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78 h1:ilQV1hzziu+LLM3zUTJ0trRztfwgjqKnBWNtSRkbmwM=
github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78/go.mod h1:aL8wCCfTfSfmXjznFBSZNN13rSJjlIOI1fUNAtF7rmI=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
go.mongodb.org/mongo-driver v1.17.2 h1:gvZyk8352qSfzyZ2UMWcpDpMSGEr1eqE4T793SqyhzM=
go.mongodb.org/mongo-driver v1.17.2/go.mod h1:Hy04i7O2kC4RS06ZrhPRqj/u4DTYkFDAAccj+rVKqgQ=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q=
golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I=
golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM=
golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=

450
api/main.go Normal file
View File

@@ -0,0 +1,450 @@
// Copyright (c) 2021-2026 community-scripts ORG
// Author: Michel Roegl-Brunner (michelroegl-brunner)
// License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
package main
import (
"context"
"encoding/json"
"fmt"
"log"
"net/http"
"os"
"strconv"
"time"
"github.com/gorilla/mux"
"github.com/joho/godotenv"
"github.com/rs/cors"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/bson/primitive"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/options"
)
var client *mongo.Client
var collection *mongo.Collection
func loadEnv() {
if err := godotenv.Load(); err != nil {
log.Fatal("Error loading .env file")
}
}
// DataModel represents a single document in MongoDB
type DataModel struct {
ID primitive.ObjectID `json:"id" bson:"_id,omitempty"`
CT_TYPE uint `json:"ct_type" bson:"ct_type"`
DISK_SIZE float32 `json:"disk_size" bson:"disk_size"`
CORE_COUNT uint `json:"core_count" bson:"core_count"`
RAM_SIZE uint `json:"ram_size" bson:"ram_size"`
OS_TYPE string `json:"os_type" bson:"os_type"`
OS_VERSION string `json:"os_version" bson:"os_version"`
DISABLEIP6 string `json:"disableip6" bson:"disableip6"`
NSAPP string `json:"nsapp" bson:"nsapp"`
METHOD string `json:"method" bson:"method"`
CreatedAt time.Time `json:"created_at" bson:"created_at"`
PVEVERSION string `json:"pve_version" bson:"pve_version"`
STATUS string `json:"status" bson:"status"`
RANDOM_ID string `json:"random_id" bson:"random_id"`
TYPE string `json:"type" bson:"type"`
ERROR string `json:"error" bson:"error"`
}
type StatusModel struct {
RANDOM_ID string `json:"random_id" bson:"random_id"`
ERROR string `json:"error" bson:"error"`
STATUS string `json:"status" bson:"status"`
}
type CountResponse struct {
TotalEntries int64 `json:"total_entries"`
StatusCount map[string]int64 `json:"status_count"`
NSAPPCount map[string]int64 `json:"nsapp_count"`
}
// ConnectDatabase initializes the MongoDB connection
func ConnectDatabase() {
loadEnv()
mongoURI := fmt.Sprintf("mongodb://%s:%s@%s:%s",
os.Getenv("MONGO_USER"),
os.Getenv("MONGO_PASSWORD"),
os.Getenv("MONGO_IP"),
os.Getenv("MONGO_PORT"))
database := os.Getenv("MONGO_DATABASE")
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
var err error
client, err = mongo.Connect(ctx, options.Client().ApplyURI(mongoURI))
if err != nil {
log.Fatal("Failed to connect to MongoDB!", err)
}
collection = client.Database(database).Collection("data_models")
fmt.Println("Connected to MongoDB on 10.10.10.18")
}
// UploadJSON handles API requests and stores data as a document in MongoDB
func UploadJSON(w http.ResponseWriter, r *http.Request) {
var input DataModel
if err := json.NewDecoder(r.Body).Decode(&input); err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
input.CreatedAt = time.Now()
_, err := collection.InsertOne(context.Background(), input)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
log.Println("Received data:", input)
w.WriteHeader(http.StatusCreated)
json.NewEncoder(w).Encode(map[string]string{"message": "Data saved successfully"})
}
// UpdateStatus updates the status of a record based on RANDOM_ID
func UpdateStatus(w http.ResponseWriter, r *http.Request) {
var input StatusModel
if err := json.NewDecoder(r.Body).Decode(&input); err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
filter := bson.M{"random_id": input.RANDOM_ID}
update := bson.M{"$set": bson.M{"status": input.STATUS, "error": input.ERROR}}
_, err := collection.UpdateOne(context.Background(), filter, update)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
log.Println("Updated data:", input)
w.WriteHeader(http.StatusOK)
json.NewEncoder(w).Encode(map[string]string{"message": "Record updated successfully"})
}
// GetDataJSON fetches all data from MongoDB
func GetDataJSON(w http.ResponseWriter, r *http.Request) {
var records []DataModel
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
cursor, err := collection.Find(ctx, bson.M{})
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
defer cursor.Close(ctx)
for cursor.Next(ctx) {
var record DataModel
if err := cursor.Decode(&record); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
records = append(records, record)
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(records)
}
func GetPaginatedData(w http.ResponseWriter, r *http.Request) {
page, _ := strconv.Atoi(r.URL.Query().Get("page"))
limit, _ := strconv.Atoi(r.URL.Query().Get("limit"))
if page < 1 {
page = 1
}
if limit < 1 {
limit = 10
}
skip := (page - 1) * limit
var records []DataModel
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
options := options.Find().SetSkip(int64(skip)).SetLimit(int64(limit))
cursor, err := collection.Find(ctx, bson.M{}, options)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
defer cursor.Close(ctx)
for cursor.Next(ctx) {
var record DataModel
if err := cursor.Decode(&record); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
records = append(records, record)
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(records)
}
func GetSummary(w http.ResponseWriter, r *http.Request) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
totalCount, err := collection.CountDocuments(ctx, bson.M{})
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
statusCount := make(map[string]int64)
nsappCount := make(map[string]int64)
pipeline := []bson.M{
{"$group": bson.M{"_id": "$status", "count": bson.M{"$sum": 1}}},
}
cursor, err := collection.Aggregate(ctx, pipeline)
if err == nil {
for cursor.Next(ctx) {
var result struct {
ID string `bson:"_id"`
Count int64 `bson:"count"`
}
if err := cursor.Decode(&result); err == nil {
statusCount[result.ID] = result.Count
}
}
}
pipeline = []bson.M{
{"$group": bson.M{"_id": "$nsapp", "count": bson.M{"$sum": 1}}},
}
cursor, err = collection.Aggregate(ctx, pipeline)
if err == nil {
for cursor.Next(ctx) {
var result struct {
ID string `bson:"_id"`
Count int64 `bson:"count"`
}
if err := cursor.Decode(&result); err == nil {
nsappCount[result.ID] = result.Count
}
}
}
response := CountResponse{
TotalEntries: totalCount,
StatusCount: statusCount,
NSAPPCount: nsappCount,
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(response)
}
func GetByNsapp(w http.ResponseWriter, r *http.Request) {
nsapp := r.URL.Query().Get("nsapp")
var records []DataModel
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
cursor, err := collection.Find(ctx, bson.M{"nsapp": nsapp})
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
defer cursor.Close(ctx)
for cursor.Next(ctx) {
var record DataModel
if err := cursor.Decode(&record); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
records = append(records, record)
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(records)
}
func GetByDateRange(w http.ResponseWriter, r *http.Request) {
startDate := r.URL.Query().Get("start_date")
endDate := r.URL.Query().Get("end_date")
if startDate == "" || endDate == "" {
http.Error(w, "Both start_date and end_date are required", http.StatusBadRequest)
return
}
start, err := time.Parse("2006-01-02T15:04:05.999999+00:00", startDate+"T00:00:00+00:00")
if err != nil {
http.Error(w, "Invalid start_date format", http.StatusBadRequest)
return
}
end, err := time.Parse("2006-01-02T15:04:05.999999+00:00", endDate+"T23:59:59+00:00")
if err != nil {
http.Error(w, "Invalid end_date format", http.StatusBadRequest)
return
}
var records []DataModel
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
cursor, err := collection.Find(ctx, bson.M{
"created_at": bson.M{
"$gte": start,
"$lte": end,
},
})
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
defer cursor.Close(ctx)
for cursor.Next(ctx) {
var record DataModel
if err := cursor.Decode(&record); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
records = append(records, record)
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(records)
}
func GetByStatus(w http.ResponseWriter, r *http.Request) {
status := r.URL.Query().Get("status")
var records []DataModel
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
cursor, err := collection.Find(ctx, bson.M{"status": status})
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
defer cursor.Close(ctx)
for cursor.Next(ctx) {
var record DataModel
if err := cursor.Decode(&record); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
records = append(records, record)
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(records)
}
func GetByOS(w http.ResponseWriter, r *http.Request) {
osType := r.URL.Query().Get("os_type")
osVersion := r.URL.Query().Get("os_version")
var records []DataModel
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
cursor, err := collection.Find(ctx, bson.M{"os_type": osType, "os_version": osVersion})
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
defer cursor.Close(ctx)
for cursor.Next(ctx) {
var record DataModel
if err := cursor.Decode(&record); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
records = append(records, record)
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(records)
}
func GetErrors(w http.ResponseWriter, r *http.Request) {
errorCount := make(map[string]int)
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
cursor, err := collection.Find(ctx, bson.M{"error": bson.M{"$ne": ""}})
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
defer cursor.Close(ctx)
for cursor.Next(ctx) {
var record DataModel
if err := cursor.Decode(&record); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
if record.ERROR != "" {
errorCount[record.ERROR]++
}
}
type ErrorCountResponse struct {
Error string `json:"error"`
Count int `json:"count"`
}
var errorCounts []ErrorCountResponse
for err, count := range errorCount {
errorCounts = append(errorCounts, ErrorCountResponse{
Error: err,
Count: count,
})
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(struct {
ErrorCounts []ErrorCountResponse `json:"error_counts"`
}{
ErrorCounts: errorCounts,
})
}
func main() {
ConnectDatabase()
router := mux.NewRouter()
router.HandleFunc("/upload", UploadJSON).Methods("POST")
router.HandleFunc("/upload/updatestatus", UpdateStatus).Methods("POST")
router.HandleFunc("/data/json", GetDataJSON).Methods("GET")
router.HandleFunc("/data/paginated", GetPaginatedData).Methods("GET")
router.HandleFunc("/data/summary", GetSummary).Methods("GET")
router.HandleFunc("/data/nsapp", GetByNsapp).Methods("GET")
router.HandleFunc("/data/date", GetByDateRange).Methods("GET")
router.HandleFunc("/data/status", GetByStatus).Methods("GET")
router.HandleFunc("/data/os", GetByOS).Methods("GET")
router.HandleFunc("/data/errors", GetErrors).Methods("GET")
c := cors.New(cors.Options{
AllowedOrigins: []string{"*"},
AllowedMethods: []string{"GET", "POST"},
AllowedHeaders: []string{"Content-Type", "Authorization"},
AllowCredentials: true,
})
handler := c.Handler(router)
fmt.Println("Server running on port 8080")
log.Fatal(http.ListenAndServe(":8080", handler))
}

View File

@@ -9,7 +9,7 @@ APP="Alpine-Grafana"
var_tags="${var_tags:-alpine;monitoring}"
var_cpu="${var_cpu:-1}"
var_ram="${var_ram:-256}"
var_disk="${var_disk:-2}"
var_disk="${var_disk:-1}"
var_os="${var_os:-alpine}"
var_version="${var_version:-3.23}"
var_unprivileged="${var_unprivileged:-1}"

View File

@@ -28,7 +28,6 @@ function update_script() {
exit
fi
msg_info "Updating Deluge"
ensure_dependencies python3-setuptools
$STD apt update
$STD pip3 install deluge[all] --upgrade
msg_ok "Updated Deluge"

View File

@@ -104,7 +104,7 @@ function update_script() {
cd /opt/dispatcharr
rm -rf .venv
$STD uv venv --clear
$STD uv sync
$STD uv pip install -r requirements.txt --index-strategy unsafe-best-match
$STD uv pip install gunicorn gevent celery redis daphne
msg_ok "Updated Dispatcharr Backend"
@@ -144,4 +144,4 @@ description
msg_ok "Completed successfully!\n"
echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}"
echo -e "${INFO}${YW} Access it using the following URL:${CL}"
echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:9191${CL}"
echo -e "${TAB}${GATEWAY}${BGN}http://${IP}${CL}"

View File

@@ -35,15 +35,13 @@ function update_script() {
msg_ok "Stopped Service"
msg_info "Backing Up Configurations"
mv /opt/donetick/config/selfhosted.yaml /opt/donetick/donetick.db /opt
mv /opt/donetick/config/selfhosted.yml /opt/donetick/donetick.db /opt
msg_ok "Backed Up Configurations"
CLEAN_INSTALL=1 fetch_and_deploy_gh_release "donetick" "donetick/donetick" "prebuild" "latest" "/opt/donetick" "donetick_Linux_x86_64.tar.gz"
msg_info "Restoring Configurations"
mv /opt/selfhosted.yaml /opt/donetick/config
grep -q 'http://localhost"$' /opt/donetick/config/selfhosted.yaml || sed -i '/https:\/\/localhost"$/a\ - "http://localhost"' /opt/donetick/config/selfhosted.yaml
grep -q 'capacitor://localhost' /opt/donetick/config/selfhosted.yaml || sed -i '/http:\/\/localhost"$/a\ - "capacitor://localhost"' /opt/donetick/config/selfhosted.yaml
mv /opt/selfhosted.yml /opt/donetick/config
mv /opt/donetick.db /opt/donetick
msg_ok "Restored Configurations"

View File

@@ -1,58 +0,0 @@
#!/usr/bin/env bash
source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/build.func)
# Copyright (c) 2021-2026 community-scripts ORG
# Author: Slaviša Arežina (tremor021)
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
# Source: https://www.drawio.com/
APP="DrawIO"
var_tags="${var_tags:-diagrams}"
var_cpu="${var_cpu:-1}"
var_ram="${var_ram:-2048}"
var_disk="${var_disk:-4}"
var_os="${var_os:-debian}"
var_version="${var_version:-13}"
var_unprivileged="${var_unprivileged:-1}"
header_info "$APP"
variables
color
catch_errors
function update_script() {
header_info
check_container_storage
check_container_resources
if [[ ! -f /var/lib/tomcat11/webapps/draw.war ]]; then
msg_error "No ${APP} Installation Found!"
exit
fi
if check_for_gh_release "drawio" "jgraph/drawio"; then
msg_info "Stopping service"
systemctl stop tomcat11
msg_ok "Service stopped"
msg_info "Updating Debian LXC"
$STD apt update
$STD apt upgrade -y
msg_ok "Updated Debian LXC"
USE_ORIGINAL_FILENAME=true fetch_and_deploy_gh_release "drawio" "jgraph/drawio" "singlefile" "latest" "/var/lib/tomcat11/webapps" "draw.war"
msg_info "Starting service"
systemctl start tomcat11
msg_ok "Service started"
msg_ok "Updated successfully!"
fi
exit
}
start
build_container
description
msg_ok "Completed Successfully!\n"
echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}"
echo -e "${INFO}${YW} Access it using the following URL:${CL}"
echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:8080/draw${CL}"

View File

@@ -9,7 +9,7 @@ APP="EMQX"
var_tags="${var_tags:-mqtt}"
var_cpu="${var_cpu:-2}"
var_ram="${var_ram:-1024}"
var_disk="${var_disk:-6}"
var_disk="${var_disk:-4}"
var_os="${var_os:-debian}"
var_version="${var_version:-13}"
var_unprivileged="${var_unprivileged:-1}"

View File

@@ -1,7 +1,7 @@
#!/usr/bin/env bash
source <(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVE/raw/branch/main/misc/build.func)
# Copyright (c) 2021-2026 community-scripts ORG
# Authors: MickLesk (CanbiZ) | Co-Author: remz1337
source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/build.func)
# Copyright (c) 2021-2026 tteck
# Authors: tteck (tteckster)
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
# Source: https://frigate.video/
@@ -11,7 +11,7 @@ var_cpu="${var_cpu:-4}"
var_ram="${var_ram:-4096}"
var_disk="${var_disk:-20}"
var_os="${var_os:-debian}"
var_version="${var_version:-12}"
var_version="${var_version:-11}"
var_unprivileged="${var_unprivileged:-0}"
var_gpu="${var_gpu:-yes}"
@@ -21,15 +21,15 @@ color
catch_errors
function update_script() {
header_info
check_container_storage
check_container_resources
if [[ ! -f /etc/systemd/system/frigate.service ]]; then
msg_error "No ${APP} Installation Found!"
exit
fi
msg_error "To update Frigate, create a new container and transfer your configuration."
header_info
check_container_storage
check_container_resources
if [[ ! -f /etc/systemd/system/frigate.service ]]; then
msg_error "No ${APP} Installation Found!"
exit
fi
msg_error "To update Frigate, create a new container and transfer your configuration."
exit
}
start

View File

@@ -9,7 +9,7 @@ APP="Grafana"
var_tags="${var_tags:-monitoring;visualization}"
var_cpu="${var_cpu:-1}"
var_ram="${var_ram:-512}"
var_disk="${var_disk:-4}"
var_disk="${var_disk:-2}"
var_os="${var_os:-debian}"
var_version="${var_version:-13}"
var_unprivileged="${var_unprivileged:-1}"

View File

@@ -1,6 +0,0 @@
____ ________
/ __ \_________ __ __/ _/ __ \
/ / / / ___/ __ `/ | /| / // // / / /
/ /_/ / / / /_/ /| |/ |/ // // /_/ /
/_____/_/ \__,_/ |__/|__/___/\____/

View File

@@ -105,7 +105,7 @@ EOF
msg_ok "Image-processing libraries up to date"
fi
RELEASE="2.5.6"
RELEASE="2.5.5"
if check_for_gh_release "Immich" "immich-app/immich" "${RELEASE}"; then
if [[ $(cat ~/.immich) > "2.5.1" ]]; then
msg_info "Enabling Maintenance Mode"

View File

@@ -46,7 +46,7 @@ function update_script() {
msg_info "Restoring configuration & data"
mv /opt/app.env /opt/jotty/.env
[[ -d /opt/data ]] && mv /opt/data /opt/jotty/data
[[ -d /opt/jotty/config ]] && cp -a /opt/config/* /opt/jotty/config && rm -rf /opt/config
[[ -d /opt/jotty/config ]] && mv /opt/config/* /opt/jotty/config
msg_ok "Restored configuration & data"
msg_info "Starting Service"

View File

@@ -34,19 +34,10 @@ function update_script() {
CURRENT_VERSION=$(readlink -f /opt/kasm/current | awk -F'/' '{print $4}')
KASM_URL=$(curl -fsSL "https://www.kasm.com/downloads" | tr '\n' ' ' | grep -oE 'https://kasm-static-content[^"]*kasm_release_[0-9]+\.[0-9]+\.[0-9]+\.[a-z0-9]+\.tar\.gz' | head -n 1)
if [[ -z "$KASM_URL" ]]; then
SERVICE_IMAGE_URL=$(curl -fsSL "https://www.kasm.com/downloads" | tr '\n' ' ' | grep -oE 'https://kasm-static-content[^"]*kasm_release_service_images_amd64_[0-9]+\.[0-9]+\.[0-9]+\.tar\.gz' | head -n 1)
if [[ -n "$SERVICE_IMAGE_URL" ]]; then
KASM_VERSION=$(echo "$SERVICE_IMAGE_URL" | sed -E 's/.*kasm_release_service_images_amd64_([0-9]+\.[0-9]+\.[0-9]+).*/\1/')
KASM_URL="https://kasm-static-content.s3.amazonaws.com/kasm_release_${KASM_VERSION}.tar.gz"
fi
else
KASM_VERSION=$(echo "$KASM_URL" | sed -E 's/.*kasm_release_([0-9]+\.[0-9]+\.[0-9]+).*/\1/')
fi
if [[ -z "$KASM_URL" ]] || [[ -z "$KASM_VERSION" ]]; then
msg_error "Unable to detect latest Kasm release URL."
exit 1
fi
KASM_VERSION=$(echo "$KASM_URL" | sed -E 's/.*kasm_release_([0-9]+\.[0-9]+\.[0-9]+).*/\1/')
msg_info "Checked for new version"
msg_info "Removing outdated docker-compose plugin"

View File

@@ -30,7 +30,7 @@ function update_script() {
fi
RELEASE="v5.0.2"
if check_for_gh_release "OpenCloud" "opencloud-eu/opencloud" "${RELEASE}"; then
if check_for_gh_release "opencloud" "opencloud-eu/opencloud" "${RELEASE}"; then
msg_info "Stopping services"
systemctl stop opencloud opencloud-wopi
msg_ok "Stopped services"
@@ -38,21 +38,9 @@ function update_script() {
msg_info "Updating packages"
$STD apt-get update
$STD apt-get dist-upgrade -y
ensure_dependencies "inotify-tools"
msg_ok "Updated packages"
CLEAN_INSTALL=1 fetch_and_deploy_gh_release "OpenCloud" "opencloud-eu/opencloud" "singlefile" "${RELEASE}" "/usr/bin" "opencloud-*-linux-amd64"
if ! grep -q 'POSIX_WATCH' /etc/opencloud/opencloud.env; then
sed -i '/^## External/i ## Uncomment below to enable PosixFS Collaborative Mode\
## Increase inotify watch/instance limits on your PVE host:\
### sysctl -w fs.inotify.max_user_watches=1048576\
### sysctl -w fs.inotify.max_user_instances=1024\
# STORAGE_USERS_POSIX_ENABLE_COLLABORATION=true\
# STORAGE_USERS_POSIX_WATCH_TYPE=inotifywait\
# STORAGE_USERS_POSIX_WATCH_FS=true\
# STORAGE_USERS_POSIX_WATCH_PATH=<path-to-storage-or-bind-mount>' /etc/opencloud/opencloud.env
fi
CLEAN_INSTALL=1 fetch_and_deploy_gh_release "opencloud" "opencloud-eu/opencloud" "singlefile" "${RELEASE}" "/usr/bin" "opencloud-*-linux-amd64"
msg_info "Starting services"
systemctl start opencloud opencloud-wopi

View File

@@ -44,7 +44,7 @@ function update_script() {
msg_info "Installing uv-based Open-WebUI"
PYTHON_VERSION="3.12" setup_uv
$STD uv tool install --python 3.12 --constraint <(echo "numba>=0.60") open-webui[all]
$STD uv tool install --python 3.12 open-webui[all]
msg_ok "Installed uv-based Open-WebUI"
msg_info "Restoring data"
@@ -126,7 +126,7 @@ EOF
msg_info "Updating Open WebUI via uv"
PYTHON_VERSION="3.12" setup_uv
$STD uv tool install --force --python 3.12 --constraint <(echo "numba>=0.60") open-webui[all]
$STD uv tool upgrade --python 3.12 open-webui[all]
systemctl restart open-webui
msg_ok "Updated Open WebUI"
msg_ok "Updated successfully!"

View File

@@ -51,7 +51,7 @@ function update_script() {
$STD npm run db:generate
$STD npm run build
$STD npm run build:cli
$STD npm run db:push
$STD npm run db:sqlite:push
cp -R .next/standalone ./
chmod +x ./dist/cli.mjs
cp server/db/names.json ./dist/names.json

View File

@@ -61,12 +61,6 @@ function update_script() {
rm -rf "$BK"
msg_ok "Restored data"
msg_ok "Migrate Database"
cd /opt/planka
$STD npm run db:upgrade
$STD npm run db:migrate
msg_ok "Migrated Database"
msg_info "Starting Service"
systemctl start planka
msg_ok "Started Service"

View File

@@ -28,55 +28,16 @@ function update_script() {
exit
fi
if check_for_gh_release "Radicale" "Kozea/Radicale"; then
msg_info "Stopping service"
systemctl stop radicale
msg_ok "Stopped service"
msg_info "Updating ${APP}"
$STD python3 -m venv /opt/radicale
source /opt/radicale/bin/activate
$STD python3 -m pip install --upgrade https://github.com/Kozea/Radicale/archive/master.tar.gz
msg_ok "Updated ${APP}"
msg_info "Backing up users file"
cp /opt/radicale/users /opt/radicale_users_backup
msg_ok "Backed up users file"
PYTHON_VERSION="3.13" setup_uv
CLEAN_INSTALL=1 fetch_and_deploy_gh_release "Radicale" "Kozea/Radicale" "tarball" "latest" "/opt/radicale"
msg_info "Restoring users file"
rm -f /opt/radicale/users
mv /opt/radicale_users_backup /opt/radicale/users
msg_ok "Restored users file"
if grep -q 'start.sh' /etc/systemd/system/radicale.service; then
sed -i -e '/^Description/i[Unit]' \
-e '\|^ExecStart|iWorkingDirectory=/opt/radicale' \
-e 's|^ExecStart=.*|ExecStart=/usr/local/bin/uv run -m radicale --config /etc/radicale/config|' /etc/systemd/system/radicale.service
systemctl daemon-reload
fi
if [[ ! -f /etc/radicale/config ]]; then
msg_info "Migrating to config file (/etc/radicale/config)"
mkdir -p /etc/radicale
cat <<EOF >/etc/radicale/config
[server]
hosts = 0.0.0.0:5232
[auth]
type = htpasswd
htpasswd_filename = /opt/radicale/users
htpasswd_encryption = sha512
[storage]
type = multifilesystem
filesystem_folder = /var/lib/radicale/collections
[web]
type = internal
EOF
msg_ok "Migrated to config (/etc/radicale/config)"
fi
msg_info "Starting service"
systemctl start radicale
msg_ok "Started service"
msg_ok "Updated Successfully!"
fi
msg_info "Starting Service"
systemctl enable -q --now radicale
msg_ok "Started Service"
msg_ok "Updated successfully!"
exit
}

View File

@@ -3,7 +3,7 @@ source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxV
# Copyright (c) 2021-2026 community-scripts ORG
# Author: vhsdream
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
# Source: https://github.com/slskd/slskd, https://github.com/mrusse/soularr
# Source: https://github.com/slskd/slskd, https://soularr.net
APP="slskd"
var_tags="${var_tags:-arr;p2p}"
@@ -24,65 +24,50 @@ function update_script() {
check_container_storage
check_container_resources
if [[ ! -d /opt/slskd ]]; then
msg_error "No Slskd Installation Found!"
if [[ ! -d /opt/slskd ]] || [[ ! -d /opt/soularr ]]; then
msg_error "No ${APP} Installation Found!"
exit
fi
if check_for_gh_release "Slskd" "slskd/slskd"; then
msg_info "Stopping Service(s)"
systemctl stop slskd
[[ -f /etc/systemd/system/soularr.service ]] && systemctl stop soularr.timer soularr.service
msg_ok "Stopped Service(s)"
RELEASE=$(curl -s https://api.github.com/repos/slskd/slskd/releases/latest | grep "tag_name" | awk '{print substr($2, 2, length($2)-3) }')
if [[ "${RELEASE}" != "$(cat /opt/${APP}_version.txt)" ]] || [[ ! -f /opt/${APP}_version.txt ]]; then
msg_info "Stopping Service"
systemctl stop slskd soularr.timer soularr.service
msg_info "Stopped Service"
msg_info "Backing up config"
cp /opt/slskd/config/slskd.yml /opt/slskd.yml.bak
msg_ok "Backed up config"
msg_info "Updating $APP to v${RELEASE}"
tmp_file=$(mktemp)
curl -fsSL "https://github.com/slskd/slskd/releases/download/${RELEASE}/slskd-${RELEASE}-linux-x64.zip" -o $tmp_file
$STD unzip -oj $tmp_file slskd -d /opt/${APP}
echo "${RELEASE}" >/opt/${APP}_version.txt
msg_ok "Updated $APP to v${RELEASE}"
CLEAN_INSTALL=1 fetch_and_deploy_gh_release "Slskd" "slskd/slskd" "prebuild" "latest" "/opt/slskd" "slskd-*-linux-x64.zip"
msg_info "Restoring config"
mv /opt/slskd.yml.bak /opt/slskd/config/slskd.yml
msg_ok "Restored config"
msg_info "Starting Service(s)"
msg_info "Starting Service"
systemctl start slskd
[[ -f /etc/systemd/system/soularr.service ]] && systemctl start soularr.timer
msg_ok "Started Service(s)"
msg_ok "Updated Slskd successfully!"
msg_ok "Started Service"
rm -rf $tmp_file
else
msg_ok "No ${APP} update required. ${APP} is already at v${RELEASE}"
fi
[[ -d /opt/soularr ]] && if check_for_gh_release "Soularr" "mrusse/soularr"; then
if systemctl is-active soularr.timer >/dev/null; then
msg_info "Stopping Timer and Service"
systemctl stop soularr.timer soularr.service
msg_ok "Stopped Timer and Service"
fi
msg_info "Updating Soularr"
cp /opt/soularr/config.ini /opt/config.ini.bak
cp /opt/soularr/run.sh /opt/run.sh.bak
cd /tmp
rm -rf /opt/soularr
curl -fsSL -o main.zip https://github.com/mrusse/soularr/archive/refs/heads/main.zip
$STD unzip main.zip
mv soularr-main /opt/soularr
cd /opt/soularr
$STD pip install -r requirements.txt
mv /opt/config.ini.bak /opt/soularr/config.ini
mv /opt/run.sh.bak /opt/soularr/run.sh
rm -rf /tmp/main.zip
msg_ok "Updated soularr"
msg_info "Backing up Soularr config"
cp /opt/soularr/config.ini /opt/soularr_config.ini.bak
cp /opt/soularr/run.sh /opt/soularr_run.sh.bak
msg_ok "Backed up Soularr config"
PYTHON_VERSION="3.11" setup_uv
CLEAN_INSTALL=1 fetch_and_deploy_gh_release "Soularr" "mrusse/soularr" "tarball" "latest" "/opt/soularr"
msg_info "Updating Soularr"
cd /opt/soularr
$STD uv venv -c venv
$STD source venv/bin/activate
$STD uv pip install -r requirements.txt
deactivate
msg_ok "Updated Soularr"
msg_info "Restoring Soularr config"
mv /opt/soularr_config.ini.bak /opt/soularr/config.ini
mv /opt/soularr_run.sh.bak /opt/soularr/run.sh
msg_ok "Restored Soularr config"
msg_info "Starting Soularr Timer"
systemctl restart soularr.timer
msg_ok "Started Soularr Timer"
msg_ok "Updated Soularr successfully!"
fi
msg_info "Starting soularr timer"
systemctl start soularr.timer
msg_ok "Started soularr timer"
exit
}
start

View File

@@ -33,15 +33,7 @@ function update_script() {
systemctl stop snowshare
msg_ok "Stopped Service"
msg_info "Backing up uploads"
[ -d /opt/snowshare/uploads ] && cp -a /opt/snowshare/uploads /opt/.snowshare_uploads_backup
msg_ok "Uploads backed up"
CLEAN_INSTALL=1 fetch_and_deploy_gh_release "snowshare" "TuroYT/snowshare" "tarball"
msg_info "Restoring uploads"
[ -d /opt/.snowshare_uploads_backup ] && rm -rf /opt/snowshare/uploads && cp -a /opt/.snowshare_uploads_backup /opt/snowshare/uploads
msg_ok "Uploads restored"
fetch_and_deploy_gh_release "snowshare" "TuroYT/snowshare" "tarball"
msg_info "Updating Snowshare"
cd /opt/snowshare

View File

@@ -43,16 +43,15 @@ function update_script() {
msg_info "Building Frontend (patience)"
cd /opt/wealthfolio
export BUILD_TARGET=web
$STD pnpm install --frozen-lockfile
$STD pnpm tsc
$STD pnpm vite build
$STD pnpm --filter frontend... build
msg_ok "Built Frontend"
msg_info "Building Backend (patience)"
cd /opt/wealthfolio/src-server
source ~/.cargo/env
$STD cargo build --release --manifest-path Cargo.toml
cp /opt/wealthfolio/src-server/target/release/wealthfolio-server /usr/local/bin/wealthfolio-server
$STD cargo build --release --manifest-path apps/server/Cargo.toml
cp /opt/wealthfolio/target/release/wealthfolio-server /usr/local/bin/wealthfolio-server
chmod +x /usr/local/bin/wealthfolio-server
msg_ok "Built Backend"
@@ -63,7 +62,7 @@ function update_script() {
msg_ok "Restored Data"
msg_info "Cleaning Up"
rm -rf /opt/wealthfolio/src-server/target
rm -rf /opt/wealthfolio/target
rm -rf /root/.cargo/registry
rm -rf /opt/wealthfolio/node_modules
msg_ok "Cleaned Up"

View File

@@ -1,35 +0,0 @@
{
"name": "Draw.IO",
"slug": "drawio",
"categories": [
12
],
"date_created": "2026-02-11",
"type": "ct",
"updateable": true,
"privileged": false,
"interface_port": 8080,
"documentation": "https://www.drawio.com/doc/",
"website": "https://www.drawio.com/",
"logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/draw-io.webp",
"config_path": "",
"description": "draw.io is a configurable diagramming and whiteboarding application, jointly owned and developed by draw.io Ltd (previously named JGraph) and draw.io AG.",
"install_methods": [
{
"type": "default",
"script": "ct/drawio.sh",
"resources": {
"cpu": 1,
"ram": 2048,
"hdd": 4,
"os": "Debian",
"version": "13"
}
}
],
"default_credentials": {
"username": null,
"password": null
},
"notes": []
}

View File

@@ -21,7 +21,7 @@
"resources": {
"cpu": 2,
"ram": 1024,
"hdd": 6,
"hdd": 4,
"os": "debian",
"version": "13"
}

View File

@@ -1,44 +0,0 @@
{
"name": "Frigate",
"slug": "frigate",
"categories": [
15
],
"date_created": "2026-02-13",
"type": "ct",
"updateable": false,
"privileged": false,
"config_path": "/config/config.yml",
"interface_port": 5000,
"documentation": "https://frigate.io/",
"website": "https://frigate.io/",
"logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/frigate-light.webp",
"description": "Frigate is a complete and local NVR (Network Video Recorder) with realtime AI object detection for CCTV cameras.",
"install_methods": [
{
"type": "default",
"script": "ct/frigate.sh",
"resources": {
"cpu": 4,
"ram": 4096,
"hdd": 20,
"os": "Debian",
"version": "12"
}
}
],
"default_credentials": {
"username": null,
"password": null
},
"notes": [
{
"text": "SemanticSearch is not pre-installed due to high resource requirements (8+ cores, 16-24GB RAM, GPU recommended). Manual configuration required if needed.",
"type": "info"
},
{
"text": "OpenVino detector may fail on older CPUs (pre-Haswell/AVX2). If you encounter 'Illegal instruction' errors, consider using alternative detectors.",
"type": "warning"
}
]
}

View File

@@ -0,0 +1,44 @@
{
"name": "Frigate",
"slug": "frigate",
"categories": [
15
],
"date_created": "2024-05-02",
"type": "ct",
"updateable": false,
"privileged": true,
"interface_port": 5000,
"documentation": "https://docs.frigate.video/",
"website": "https://frigate.video/",
"logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/frigate.webp",
"config_path": "",
"description": "Frigate is an open source NVR built around real-time AI object detection. All processing is performed locally on your own hardware, and your camera feeds never leave your home.",
"install_methods": [
{
"type": "default",
"script": "ct/frigate.sh",
"resources": {
"cpu": 4,
"ram": 4096,
"hdd": 20,
"os": "debian",
"version": "11"
}
}
],
"default_credentials": {
"username": null,
"password": null
},
"notes": [
{
"text": "Discussions (explore more advanced methods): `https://github.com/tteck/Proxmox/discussions/2711`",
"type": "info"
},
{
"text": "go2rtc Interface port:`1984`",
"type": "info"
}
]
}

View File

@@ -1,5 +1,5 @@
{
"generated": "2026-02-13T12:11:36Z",
"generated": "2026-02-10T12:19:02Z",
"versions": [
{
"slug": "2fauth",
@@ -15,13 +15,6 @@
"pinned": false,
"date": "2025-12-08T14:34:55Z"
},
{
"slug": "adguardhome-sync",
"repo": "bakito/adguardhome-sync",
"version": "v0.8.2",
"pinned": false,
"date": "2025-10-24T17:13:47Z"
},
{
"slug": "adventurelog",
"repo": "seanmorley15/adventurelog",
@@ -193,9 +186,9 @@
{
"slug": "cleanuparr",
"repo": "Cleanuparr/Cleanuparr",
"version": "v2.6.1",
"version": "v2.5.1",
"pinned": false,
"date": "2026-02-13T10:00:19Z"
"date": "2026-01-11T00:46:17Z"
},
{
"slug": "cloudreve",
@@ -207,9 +200,9 @@
{
"slug": "comfyui",
"repo": "comfyanonymous/ComfyUI",
"version": "v0.13.0",
"version": "v0.12.3",
"pinned": false,
"date": "2026-02-10T20:27:38Z"
"date": "2026-02-05T07:04:07Z"
},
{
"slug": "commafeed",
@@ -242,16 +235,16 @@
{
"slug": "cronicle",
"repo": "jhuckaby/Cronicle",
"version": "v0.9.106",
"version": "v0.9.105",
"pinned": false,
"date": "2026-02-11T17:11:46Z"
"date": "2026-02-05T18:16:11Z"
},
{
"slug": "cryptpad",
"repo": "cryptpad/cryptpad",
"version": "2026.2.0",
"version": "2025.9.0",
"pinned": false,
"date": "2026-02-11T15:39:05Z"
"date": "2025-10-22T10:06:29Z"
},
{
"slug": "dawarich",
@@ -270,9 +263,9 @@
{
"slug": "dispatcharr",
"repo": "Dispatcharr/Dispatcharr",
"version": "v0.19.0",
"version": "v0.18.1",
"pinned": false,
"date": "2026-02-10T21:18:10Z"
"date": "2026-01-27T17:09:11Z"
},
{
"slug": "docmost",
@@ -284,30 +277,23 @@
{
"slug": "domain-locker",
"repo": "Lissy93/domain-locker",
"version": "v0.1.3",
"version": "v0.1.2",
"pinned": false,
"date": "2026-02-11T10:03:32Z"
"date": "2025-11-14T22:08:23Z"
},
{
"slug": "domain-monitor",
"repo": "Hosteroid/domain-monitor",
"version": "v1.1.3",
"version": "v1.1.2",
"pinned": false,
"date": "2026-02-11T15:48:18Z"
"date": "2026-02-09T06:29:34Z"
},
{
"slug": "donetick",
"repo": "donetick/donetick",
"version": "v0.1.73",
"version": "v0.1.64",
"pinned": false,
"date": "2026-02-12T23:42:30Z"
},
{
"slug": "drawio",
"repo": "jgraph/drawio",
"version": "v29.3.6",
"pinned": false,
"date": "2026-01-28T18:25:02Z"
"date": "2025-10-03T05:18:24Z"
},
{
"slug": "duplicati",
@@ -333,9 +319,9 @@
{
"slug": "endurain",
"repo": "endurain-project/endurain",
"version": "v0.17.4",
"version": "v0.17.3",
"pinned": false,
"date": "2026-02-11T04:54:22Z"
"date": "2026-01-23T22:02:05Z"
},
{
"slug": "ersatztv",
@@ -403,9 +389,9 @@
{
"slug": "ghostfolio",
"repo": "ghostfolio/ghostfolio",
"version": "2.238.0",
"version": "2.237.0",
"pinned": false,
"date": "2026-02-12T18:28:55Z"
"date": "2026-02-08T13:59:53Z"
},
{
"slug": "gitea",
@@ -543,16 +529,9 @@
{
"slug": "huntarr",
"repo": "plexguide/Huntarr.io",
"version": "9.2.4.1",
"version": "9.2.3",
"pinned": false,
"date": "2026-02-12T22:17:47Z"
},
{
"slug": "immich-public-proxy",
"repo": "alangrainger/immich-public-proxy",
"version": "v1.15.1",
"pinned": false,
"date": "2026-01-26T08:04:27Z"
"date": "2026-02-07T04:44:20Z"
},
{
"slug": "inspircd",
@@ -571,23 +550,16 @@
{
"slug": "invoiceninja",
"repo": "invoiceninja/invoiceninja",
"version": "v5.12.59",
"version": "v5.12.55",
"pinned": false,
"date": "2026-02-13T02:26:13Z"
"date": "2026-02-05T01:06:15Z"
},
{
"slug": "jackett",
"repo": "Jackett/Jackett",
"version": "v0.24.1103",
"version": "v0.24.1089",
"pinned": false,
"date": "2026-02-13T05:53:23Z"
},
{
"slug": "jellystat",
"repo": "CyferShepard/Jellystat",
"version": "V1.1.8",
"pinned": false,
"date": "2026-02-08T08:15:00Z"
"date": "2026-02-10T05:55:59Z"
},
{
"slug": "joplin-server",
@@ -599,9 +571,9 @@
{
"slug": "jotty",
"repo": "fccview/jotty",
"version": "1.20.0",
"version": "1.19.1",
"pinned": false,
"date": "2026-02-12T09:23:30Z"
"date": "2026-01-26T21:30:39Z"
},
{
"slug": "kapowarr",
@@ -711,9 +683,9 @@
{
"slug": "libretranslate",
"repo": "LibreTranslate/LibreTranslate",
"version": "v1.9.0",
"version": "v1.8.4",
"pinned": false,
"date": "2026-02-10T19:05:48Z"
"date": "2026-02-02T17:45:16Z"
},
{
"slug": "lidarr",
@@ -746,9 +718,9 @@
{
"slug": "lubelogger",
"repo": "hargata/lubelog",
"version": "v1.6.0",
"version": "v1.5.9",
"pinned": false,
"date": "2026-02-10T20:16:32Z"
"date": "2026-02-09T17:36:13Z"
},
{
"slug": "mafl",
@@ -767,9 +739,9 @@
{
"slug": "mail-archiver",
"repo": "s1t5/mail-archiver",
"version": "2602.1",
"version": "2601.3",
"pinned": false,
"date": "2026-02-11T06:23:11Z"
"date": "2026-01-25T12:52:24Z"
},
{
"slug": "managemydamnlife",
@@ -795,9 +767,9 @@
{
"slug": "mediamanager",
"repo": "maxdorninger/MediaManager",
"version": "v1.12.3",
"version": "v1.12.2",
"pinned": false,
"date": "2026-02-11T16:45:40Z"
"date": "2026-02-08T19:18:29Z"
},
{
"slug": "mediamtx",
@@ -823,9 +795,9 @@
{
"slug": "metube",
"repo": "alexta69/metube",
"version": "2026.02.12",
"version": "2026.02.08",
"pinned": false,
"date": "2026-02-12T21:05:49Z"
"date": "2026-02-08T17:01:37Z"
},
{
"slug": "miniflux",
@@ -865,9 +837,9 @@
{
"slug": "navidrome",
"repo": "navidrome/navidrome",
"version": "v0.60.3",
"version": "v0.60.2",
"pinned": false,
"date": "2026-02-10T23:55:04Z"
"date": "2026-02-07T19:42:33Z"
},
{
"slug": "netbox",
@@ -876,13 +848,6 @@
"pinned": false,
"date": "2026-02-03T13:54:26Z"
},
{
"slug": "nextcloud-exporter",
"repo": "xperimental/nextcloud-exporter",
"version": "v0.9.0",
"pinned": false,
"date": "2025-10-12T20:03:10Z"
},
{
"slug": "nginx-ui",
"repo": "0xJacky/nginx-ui",
@@ -998,9 +963,9 @@
{
"slug": "pangolin",
"repo": "fosrl/pangolin",
"version": "1.15.4",
"version": "1.15.2",
"pinned": false,
"date": "2026-02-13T00:54:02Z"
"date": "2026-02-05T19:23:58Z"
},
{
"slug": "paperless-ai",
@@ -1026,9 +991,9 @@
{
"slug": "patchmon",
"repo": "PatchMon/PatchMon",
"version": "v1.4.0",
"version": "v1.3.7",
"pinned": false,
"date": "2026-02-13T10:39:03Z"
"date": "2025-12-25T11:08:14Z"
},
{
"slug": "paymenter",
@@ -1072,19 +1037,12 @@
"pinned": false,
"date": "2025-12-01T05:07:31Z"
},
{
"slug": "pihole-exporter",
"repo": "eko/pihole-exporter",
"version": "v1.2.0",
"pinned": false,
"date": "2025-07-29T19:15:37Z"
},
{
"slug": "planka",
"repo": "plankanban/planka",
"version": "v2.0.0",
"version": "v2.0.0-rc.4",
"pinned": false,
"date": "2026-02-11T13:50:10Z"
"date": "2025-09-04T12:41:17Z"
},
{
"slug": "plant-it",
@@ -1131,9 +1089,9 @@
{
"slug": "prometheus-alertmanager",
"repo": "prometheus/alertmanager",
"version": "v0.31.1",
"version": "v0.31.0",
"pinned": false,
"date": "2026-02-11T21:28:26Z"
"date": "2026-02-02T13:34:15Z"
},
{
"slug": "prometheus-blackbox-exporter",
@@ -1142,13 +1100,6 @@
"pinned": false,
"date": "2025-12-06T13:32:18Z"
},
{
"slug": "prometheus-paperless-ngx-exporter",
"repo": "hansmi/prometheus-paperless-exporter",
"version": "v0.0.9",
"pinned": false,
"date": "2025-12-08T20:37:45Z"
},
{
"slug": "prowlarr",
"repo": "Prowlarr/Prowlarr",
@@ -1173,9 +1124,9 @@
{
"slug": "pulse",
"repo": "rcourtman/Pulse",
"version": "v5.1.9",
"version": "v5.1.7",
"pinned": false,
"date": "2026-02-11T15:34:40Z"
"date": "2026-02-10T09:59:55Z"
},
{
"slug": "pve-scripts-local",
@@ -1191,13 +1142,6 @@
"pinned": false,
"date": "2025-11-19T23:54:34Z"
},
{
"slug": "qbittorrent-exporter",
"repo": "martabal/qbittorrent-exporter",
"version": "v1.13.2",
"pinned": false,
"date": "2025-12-13T22:59:03Z"
},
{
"slug": "qdrant",
"repo": "qdrant/qdrant",
@@ -1219,13 +1163,6 @@
"pinned": false,
"date": "2025-11-16T22:39:01Z"
},
{
"slug": "radicale",
"repo": "Kozea/Radicale",
"version": "v3.6.0",
"pinned": false,
"date": "2026-01-10T06:56:46Z"
},
{
"slug": "rclone",
"repo": "rclone/rclone",
@@ -1236,9 +1173,9 @@
{
"slug": "rdtclient",
"repo": "rogerfar/rdt-client",
"version": "v2.0.120",
"version": "v2.0.119",
"pinned": false,
"date": "2026-02-12T02:53:51Z"
"date": "2025-10-13T23:15:11Z"
},
{
"slug": "reactive-resume",
@@ -1292,16 +1229,16 @@
{
"slug": "scanopy",
"repo": "scanopy/scanopy",
"version": "v0.14.4",
"version": "v0.14.3",
"pinned": false,
"date": "2026-02-10T03:57:28Z"
"date": "2026-02-04T01:41:01Z"
},
{
"slug": "scraparr",
"repo": "thecfu/scraparr",
"version": "v3.0.3",
"version": "v2.2.5",
"pinned": false,
"date": "2026-02-12T14:20:56Z"
"date": "2025-10-07T12:34:31Z"
},
{
"slug": "seelf",
@@ -1338,13 +1275,6 @@
"pinned": false,
"date": "2026-01-16T12:08:28Z"
},
{
"slug": "slskd",
"repo": "slskd/slskd",
"version": "0.24.3",
"pinned": false,
"date": "2026-01-15T14:40:15Z"
},
{
"slug": "snipeit",
"repo": "grokability/snipe-it",
@@ -1355,9 +1285,9 @@
{
"slug": "snowshare",
"repo": "TuroYT/snowshare",
"version": "v1.3.5",
"version": "v1.3.3",
"pinned": false,
"date": "2026-02-11T10:24:51Z"
"date": "2026-02-09T10:52:12Z"
},
{
"slug": "sonarr",
@@ -1390,9 +1320,9 @@
{
"slug": "stirling-pdf",
"repo": "Stirling-Tools/Stirling-PDF",
"version": "v2.4.6",
"version": "v2.4.5",
"pinned": false,
"date": "2026-02-12T00:01:19Z"
"date": "2026-02-06T23:12:20Z"
},
{
"slug": "streamlink-webui",
@@ -1439,9 +1369,9 @@
{
"slug": "termix",
"repo": "Termix-SSH/Termix",
"version": "release-1.11.1-tag",
"version": "release-1.11.0-tag",
"pinned": false,
"date": "2026-02-13T04:49:16Z"
"date": "2026-01-25T02:09:52Z"
},
{
"slug": "the-lounge",
@@ -1467,9 +1397,9 @@
{
"slug": "tianji",
"repo": "msgbyte/tianji",
"version": "v1.31.12",
"version": "v1.31.10",
"pinned": false,
"date": "2026-02-12T19:06:14Z"
"date": "2026-02-04T17:21:04Z"
},
{
"slug": "traccar",
@@ -1481,9 +1411,9 @@
{
"slug": "tracearr",
"repo": "connorgallopo/Tracearr",
"version": "v1.4.17",
"version": "v1.4.12",
"pinned": false,
"date": "2026-02-11T01:33:21Z"
"date": "2026-01-28T23:29:37Z"
},
{
"slug": "tracktor",
@@ -1495,9 +1425,9 @@
{
"slug": "traefik",
"repo": "traefik/traefik",
"version": "v3.6.8",
"version": "v3.6.7",
"pinned": false,
"date": "2026-02-11T16:44:37Z"
"date": "2026-01-14T14:11:45Z"
},
{
"slug": "trilium",
@@ -1509,9 +1439,9 @@
{
"slug": "trip",
"repo": "itskovacs/TRIP",
"version": "1.40.0",
"version": "1.39.0",
"pinned": false,
"date": "2026-02-10T20:12:53Z"
"date": "2026-02-07T16:59:51Z"
},
{
"slug": "tududi",
@@ -1558,9 +1488,9 @@
{
"slug": "upsnap",
"repo": "seriousm4x/UpSnap",
"version": "5.2.8",
"version": "5.2.7",
"pinned": false,
"date": "2026-02-13T00:02:37Z"
"date": "2026-01-07T23:48:00Z"
},
{
"slug": "uptimekuma",
@@ -1572,9 +1502,9 @@
{
"slug": "vaultwarden",
"repo": "dani-garcia/vaultwarden",
"version": "1.35.3",
"version": "1.35.2",
"pinned": false,
"date": "2026-02-10T20:37:03Z"
"date": "2026-01-09T18:37:04Z"
},
{
"slug": "victoriametrics",
@@ -1600,9 +1530,9 @@
{
"slug": "wallos",
"repo": "ellite/Wallos",
"version": "v4.6.1",
"version": "v4.6.0",
"pinned": false,
"date": "2026-02-10T21:06:46Z"
"date": "2025-12-20T15:57:51Z"
},
{
"slug": "wanderer",
@@ -1635,9 +1565,9 @@
{
"slug": "wavelog",
"repo": "wavelog/wavelog",
"version": "2.3",
"version": "2.2.2",
"pinned": false,
"date": "2026-02-11T15:46:40Z"
"date": "2025-12-31T16:53:34Z"
},
{
"slug": "wealthfolio",
@@ -1663,9 +1593,9 @@
{
"slug": "wikijs",
"repo": "requarks/wiki",
"version": "v2.5.312",
"version": "v2.5.311",
"pinned": false,
"date": "2026-02-12T02:45:22Z"
"date": "2026-01-08T09:50:00Z"
},
{
"slug": "wishlist",
@@ -1712,9 +1642,9 @@
{
"slug": "zipline",
"repo": "diced/zipline",
"version": "v4.4.2",
"version": "v4.4.1",
"pinned": false,
"date": "2026-02-11T04:58:54Z"
"date": "2026-01-20T01:29:01Z"
},
{
"slug": "zitadel",

View File

@@ -21,7 +21,7 @@
"resources": {
"cpu": 1,
"ram": 512,
"hdd": 4,
"hdd": 2,
"os": "debian",
"version": "13"
}
@@ -32,7 +32,7 @@
"resources": {
"cpu": 1,
"ram": 256,
"hdd": 2,
"hdd": 1,
"os": "alpine",
"version": "3.23"
}

View File

@@ -33,7 +33,7 @@
},
"notes": [
{
"text": "Kutt needs so be served with an SSL certificate for its login to work. During install, you will be prompted to choose if you want to have Caddy installed for SSL termination or if you want to use your own reverse proxy (in that case point your reverse proxy to port 3000).",
"text": "Kutt needs so be served with an SSL certificate for its login to work. During install, you will be prompted to choose if you want to have Caddy installed for SSL termination or if you want to use your own reverse proxy (in that case point your reverse porxy to port 3000).",
"type": "info"
}
]

View File

@@ -12,7 +12,7 @@
"documentation": "https://radicale.org/master.html#documentation-1",
"website": "https://radicale.org/",
"logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/radicale.webp",
"config_path": "/etc/radicale/config",
"config_path": "/etc/radicale/config or ~/.config/radicale/config",
"description": "Radicale is a small but powerful CalDAV (calendars, to-do lists) and CardDAV (contacts)",
"install_methods": [
{

View File

@@ -1,5 +1,5 @@
{
"name": "Slskd",
"name": "slskd",
"slug": "slskd",
"categories": [
11
@@ -35,6 +35,10 @@
{
"text": "See /opt/slskd/config/slskd.yml to add your Soulseek credentials",
"type": "info"
},
{
"text": "This LXC includes Soularr; it needs to be configured (/opt/soularr/config.ini) before it will work",
"type": "info"
}
]
}

View File

@@ -32,10 +32,6 @@
"password": null
},
"notes": [
{
"text": "SQL Server (2025) SQLPAL is incompatible with Proxmox VE 9 (Kernel 6.12+) in LXC containers. Use a VM instead or the SQL-Server 2022 LXC.",
"type": "warning"
},
{
"text": "If you choose not to run the installation setup, execute: `/opt/mssql/bin/mssql-conf setup` in LXC shell.",
"type": "info"

View File

@@ -16,8 +16,7 @@ update_os
msg_info "Installing Dependencies"
$STD apt install -y \
python3-pip \
python3-libtorrent \
python3-setuptools
python3-libtorrent
msg_ok "Installed Dependencies"
msg_info "Installing Deluge"

View File

@@ -37,7 +37,7 @@ fetch_and_deploy_gh_release "dispatcharr" "Dispatcharr/Dispatcharr" "tarball"
msg_info "Installing Python Dependencies with uv"
cd /opt/dispatcharr
$STD uv venv --clear
$STD uv sync
$STD uv pip install -r requirements.txt --index-strategy unsafe-best-match
$STD uv pip install gunicorn gevent celery redis daphne
msg_ok "Installed Python Dependencies"

View File

@@ -1,25 +0,0 @@
#!/usr/bin/env bash
# Copyright (c) 2021-2026 community-scripts ORG
# Author: Slaviša Arežina (tremor021)
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
# Source: https://www.drawio.com/
source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
color
verb_ip6
catch_errors
setting_up_container
network_check
update_os
setup_hwaccel
msg_info "Installing Dependencies"
$STD apt install -y tomcat11
msg_ok "Installed Dependencies"
USE_ORIGINAL_FILENAME=true fetch_and_deploy_gh_release "drawio" "jgraph/drawio" "singlefile" "latest" "/var/lib/tomcat11/webapps" "draw.war"
motd_ssh
customize
cleanup_lxc

View File

@@ -31,10 +31,8 @@ setup_deb822_repo "matrix-org" \
"main"
echo "matrix-synapse-py3 matrix-synapse/server-name string $servername" | debconf-set-selections
echo "matrix-synapse-py3 matrix-synapse/report-stats boolean false" | debconf-set-selections
echo "exit 101" >/usr/sbin/policy-rc.d
chmod +x /usr/sbin/policy-rc.d
$STD apt install matrix-synapse-py3 -y
rm -f /usr/sbin/policy-rc.d
systemctl stop matrix-synapse
sed -i 's/127.0.0.1/0.0.0.0/g' /etc/matrix-synapse/homeserver.yaml
sed -i 's/'\''::1'\'', //g' /etc/matrix-synapse/homeserver.yaml
SECRET=$(openssl rand -hex 32)

View File

@@ -38,18 +38,6 @@ rm -f "$DEB_FILE"
echo "$LATEST_VERSION" >~/.emqx
msg_ok "Installed EMQX"
read -r -p "${TAB3}Would you like to disable the EMQX MQ feature? (reduces disk/CPU usage) <y/N> " prompt
if [[ ${prompt,,} =~ ^(y|yes)$ ]]; then
msg_info "Disabling EMQX MQ feature"
mkdir -p /etc/emqx
if ! grep -q "^mq.enable" /etc/emqx/emqx.conf 2>/dev/null; then
echo "mq.enable = false" >>/etc/emqx/emqx.conf
else
sed -i 's/^mq.enable.*/mq.enable = false/' /etc/emqx/emqx.conf
fi
msg_ok "Disabled EMQX MQ feature"
fi
msg_info "Starting EMQX service"
$STD systemctl enable -q --now emqx
msg_ok "Enabled EMQX service"

View File

@@ -1,8 +1,8 @@
#!/usr/bin/env bash
# Copyright (c) 2021-2026 community-scripts ORG
# Authors: MickLesk (CanbiZ)
# Co-Authors: remz1337
# Copyright (c) 2021-2026 tteck
# Author: tteck (tteckster)
# Co-Author: remz1337
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
# Source: https://frigate.video/
@@ -14,221 +14,60 @@ setting_up_container
network_check
update_os
source /etc/os-release
if [[ "$VERSION_ID" != "12" ]]; then
msg_error "Frigate requires Debian 12 (Bookworm) due to Python 3.11 dependencies"
exit 1
fi
msg_info "Converting APT sources to DEB822 format"
if [ -f /etc/apt/sources.list ]; then
cat > /etc/apt/sources.list.d/debian.sources <<'EOF'
Types: deb
URIs: http://deb.debian.org/debian
Suites: bookworm
Components: main contrib
Signed-By: /usr/share/keyrings/debian-archive-keyring.gpg
Types: deb
URIs: http://deb.debian.org/debian
Suites: bookworm-updates
Components: main contrib
Signed-By: /usr/share/keyrings/debian-archive-keyring.gpg
Types: deb
URIs: http://security.debian.org
Suites: bookworm-security
Components: main contrib
Signed-By: /usr/share/keyrings/debian-archive-keyring.gpg
EOF
mv /etc/apt/sources.list /etc/apt/sources.list.bak
$STD apt update
fi
msg_ok "Converted APT sources"
msg_info "Installing Dependencies"
$STD apt install -y \
xz-utils \
python3 \
python3-dev \
python3-pip \
gcc \
pkg-config \
libhdf5-dev \
build-essential \
automake \
libtool \
ccache \
libusb-1.0-0-dev \
apt-transport-https \
cmake \
git \
libgtk-3-dev \
libavcodec-dev \
libavformat-dev \
libswscale-dev \
libv4l-dev \
libxvidcore-dev \
libx264-dev \
libjpeg-dev \
libpng-dev \
libtiff-dev \
gfortran \
openexr \
libssl-dev \
libtbbmalloc2 \
libtbb-dev \
libdc1394-dev \
libopenexr-dev \
libgstreamer-plugins-base1.0-dev \
libgstreamer1.0-dev \
tclsh \
libopenblas-dev \
liblapack-dev \
make \
moreutils
msg_info "Installing Dependencies (Patience)"
$STD apt-get install -y {git,ca-certificates,automake,build-essential,xz-utils,libtool,ccache,pkg-config,libgtk-3-dev,libavcodec-dev,libavformat-dev,libswscale-dev,libv4l-dev,libxvidcore-dev,libx264-dev,libjpeg-dev,libpng-dev,libtiff-dev,gfortran,openexr,libatlas-base-dev,libssl-dev,libtbb2,libtbb-dev,libdc1394-22-dev,libopenexr-dev,libgstreamer-plugins-base1.0-dev,libgstreamer1.0-dev,gcc,gfortran,libopenblas-dev,liblapack-dev,libusb-1.0-0-dev,jq,moreutils}
msg_ok "Installed Dependencies"
msg_info "Setup Python3"
$STD apt-get install -y {python3,python3-dev,python3-setuptools,python3-distutils,python3-pip}
$STD pip install --upgrade pip
msg_ok "Setup Python3"
NODE_VERSION="22" setup_nodejs
msg_info "Installing go2rtc"
mkdir -p /usr/local/go2rtc/bin
cd /usr/local/go2rtc/bin
curl -fsSL "https://github.com/AlexxIT/go2rtc/releases/latest/download/go2rtc_linux_amd64" -o "go2rtc"
chmod +x go2rtc
$STD ln -svf /usr/local/go2rtc/bin/go2rtc /usr/local/bin/go2rtc
msg_ok "Installed go2rtc"
setup_hwaccel
export TARGETARCH="amd64"
export CCACHE_DIR=/root/.ccache
export CCACHE_MAXSIZE=2G
export APT_KEY_DONT_WARN_ON_DANGEROUS_USAGE=DontWarn
export PIP_BREAK_SYSTEM_PACKAGES=1
export NVIDIA_VISIBLE_DEVICES=all
export NVIDIA_DRIVER_CAPABILITIES="compute,video,utility"
export TOKENIZERS_PARALLELISM=true
export TRANSFORMERS_NO_ADVISORY_WARNINGS=1
export OPENCV_FFMPEG_LOGLEVEL=8
export HAILORT_LOGGER_PATH=NONE
fetch_and_deploy_gh_release "frigate" "blakeblackshear/frigate" "tarball" "v0.16.4" "/opt/frigate"
msg_info "Building Nginx"
$STD bash /opt/frigate/docker/main/build_nginx.sh
sed -e '/s6-notifyoncheck/ s/^#*/#/' -i /opt/frigate/docker/main/rootfs/etc/s6-overlay/s6-rc.d/nginx/run
ln -sf /usr/local/nginx/sbin/nginx /usr/local/bin/nginx
msg_ok "Built Nginx"
msg_info "Building SQLite Extensions"
$STD bash /opt/frigate/docker/main/build_sqlite_vec.sh
msg_ok "Built SQLite Extensions"
fetch_and_deploy_gh_release "go2rtc" "AlexxIT/go2rtc" "singlefile" "latest" "/usr/local/go2rtc/bin" "go2rtc_linux_amd64"
msg_info "Installing Tempio"
sed -i 's|/rootfs/usr/local|/usr/local|g' /opt/frigate/docker/main/install_tempio.sh
$STD bash /opt/frigate/docker/main/install_tempio.sh
ln -sf /usr/local/tempio/bin/tempio /usr/local/bin/tempio
msg_ok "Installed Tempio"
msg_info "Building libUSB"
fetch_and_deploy_gh_release "libusb" "libusb/libusb" "tarball" "v1.0.26" "/opt/libusb"
cd /opt/libusb
$STD ./bootstrap.sh
$STD ./configure CC='ccache gcc' CCX='ccache g++' --disable-udev --enable-shared
$STD make -j "$(nproc)"
cd /opt/libusb/libusb
mkdir -p /usr/local/lib /usr/local/include/libusb-1.0 /usr/local/lib/pkgconfig
$STD bash ../libtool --mode=install /usr/bin/install -c libusb-1.0.la /usr/local/lib
install -c -m 644 libusb.h /usr/local/include/libusb-1.0
cd /opt/libusb/
install -c -m 644 libusb-1.0.pc /usr/local/lib/pkgconfig
ldconfig
msg_ok "Built libUSB"
msg_info "Installing Python Dependencies"
$STD pip3 install -r /opt/frigate/docker/main/requirements.txt
msg_ok "Installed Python Dependencies"
msg_info "Building Python Wheels (Patience)"
mkdir -p /wheels
sed -i 's|^SQLITE3_VERSION=.*|SQLITE3_VERSION="version-3.46.0"|g' /opt/frigate/docker/main/build_pysqlite3.sh
$STD bash /opt/frigate/docker/main/build_pysqlite3.sh
for i in {1..3}; do
$STD pip3 wheel --wheel-dir=/wheels -r /opt/frigate/docker/main/requirements-wheels.txt --default-timeout=300 --retries=3 && break
[[ $i -lt 3 ]] && sleep 10
done
msg_ok "Built Python Wheels"
NODE_VERSION="22" NODE_MODULE="yarn" setup_nodejs
msg_info "Downloading Inference Models"
mkdir -p /models /openvino-model
wget -q -O /edgetpu_model.tflite https://github.com/google-coral/test_data/raw/release-frogfish/ssdlite_mobiledet_coco_qat_postprocess_edgetpu.tflite
wget -q -O /models/cpu_model.tflite https://github.com/google-coral/test_data/raw/release-frogfish/ssdlite_mobiledet_coco_qat_postprocess.tflite
cp /opt/frigate/labelmap.txt /labelmap.txt
msg_ok "Downloaded Inference Models"
msg_info "Downloading Audio Model"
wget -q -O /tmp/yamnet.tar.gz https://www.kaggle.com/api/v1/models/google/yamnet/tfLite/classification-tflite/1/download
$STD tar xzf /tmp/yamnet.tar.gz -C /
mv /1.tflite /cpu_audio_model.tflite
cp /opt/frigate/audio-labelmap.txt /audio-labelmap.txt
rm -f /tmp/yamnet.tar.gz
msg_ok "Downloaded Audio Model"
msg_info "Installing HailoRT Runtime"
$STD bash /opt/frigate/docker/main/install_hailort.sh
msg_info "Installing Frigate v0.14.1 (Perseverance)"
cd ~
mkdir -p /opt/frigate/models
curl -fsSL "https://github.com/blakeblackshear/frigate/archive/refs/tags/v0.14.1.tar.gz" -o "frigate.tar.gz"
tar -xzf frigate.tar.gz -C /opt/frigate --strip-components 1
rm -rf frigate.tar.gz
cd /opt/frigate
$STD pip3 wheel --wheel-dir=/wheels -r /opt/frigate/docker/main/requirements-wheels.txt
cp -a /opt/frigate/docker/main/rootfs/. /
sed -i '/^.*unset DEBIAN_FRONTEND.*$/d' /opt/frigate/docker/main/install_deps.sh
echo "libedgetpu1-max libedgetpu/accepted-eula boolean true" | debconf-set-selections
echo "libedgetpu1-max libedgetpu/install-confirm-max boolean true" | debconf-set-selections
$STD bash /opt/frigate/docker/main/install_deps.sh
export TARGETARCH="amd64"
echo 'libc6 libraries/restart-without-asking boolean true' | debconf-set-selections
$STD /opt/frigate/docker/main/install_deps.sh
$STD apt update
$STD ln -svf /usr/lib/btbn-ffmpeg/bin/ffmpeg /usr/local/bin/ffmpeg
$STD ln -svf /usr/lib/btbn-ffmpeg/bin/ffprobe /usr/local/bin/ffprobe
$STD pip3 install -U /wheels/*.whl
ldconfig
msg_ok "Installed HailoRT Runtime"
msg_info "Installing OpenVino"
$STD pip3 install -r /opt/frigate/docker/main/requirements-ov.txt
msg_ok "Installed OpenVino"
msg_info "Building OpenVino Model"
cd /models
wget -q http://download.tensorflow.org/models/object_detection/ssdlite_mobilenet_v2_coco_2018_05_09.tar.gz
$STD tar -zxf ssdlite_mobilenet_v2_coco_2018_05_09.tar.gz --no-same-owner
if python3 /opt/frigate/docker/main/build_ov_model.py 2>&1; then
cp /models/ssdlite_mobilenet_v2.xml /openvino-model/
cp /models/ssdlite_mobilenet_v2.bin /openvino-model/
wget -q https://github.com/openvinotoolkit/open_model_zoo/raw/master/data/dataset_classes/coco_91cl_bkgr.txt -O /openvino-model/coco_91cl_bkgr.txt
sed -i 's/truck/car/g' /openvino-model/coco_91cl_bkgr.txt
msg_ok "Built OpenVino Model"
else
msg_warn "OpenVino build failed (CPU may not support required instructions). Frigate will use CPU model."
fi
msg_info "Building Frigate Application (Patience)"
cd /opt/frigate
$STD pip3 install -r /opt/frigate/docker/main/requirements-dev.txt
$STD bash /opt/frigate/.devcontainer/initialize.sh
$STD /opt/frigate/.devcontainer/initialize.sh
$STD make version
cd /opt/frigate/web
$STD npm install
$STD npm run build
cp -r /opt/frigate/web/dist/* /opt/frigate/web/
sed -i '/^s6-svc -O \.$/s/^/#/' /opt/frigate/docker/main/rootfs/etc/s6-overlay/s6-rc.d/frigate/run
msg_ok "Built Frigate Application"
msg_info "Configuring Frigate"
mkdir -p /config /media/frigate
cp -r /opt/frigate/config/. /config
curl -fsSL "https://github.com/intel-iot-devkit/sample-videos/raw/master/person-bicycle-car-detection.mp4" -o "/media/frigate/person-bicycle-car-detection.mp4"
echo "tmpfs /tmp/cache tmpfs defaults 0 0" >>/etc/fstab
cat <<EOF >/etc/frigate.env
DEFAULT_FFMPEG_VERSION="7.0"
INCLUDED_FFMPEG_VERSIONS="7.0:5.0"
EOF
sed -i '/^s6-svc -O \.$/s/^/#/' /opt/frigate/docker/main/rootfs/etc/s6-overlay/s6-rc.d/frigate/run
cat <<EOF >/config/config.yml
mqtt:
enabled: false
cameras:
test:
ffmpeg:
#hwaccel_args: preset-vaapi
inputs:
- path: /media/frigate/person-bicycle-car-detection.mp4
input_args: -re -stream_loop -1 -fflags +genpts
@@ -239,42 +78,96 @@ cameras:
height: 1080
width: 1920
fps: 5
auth:
enabled: false
detect:
enabled: false
EOF
ln -sf /config/config.yml /opt/frigate/config/config.yml
if [[ "$CTTYPE" == "0" ]]; then
sed -i -e 's/^kvm:x:104:$/render:x:104:root,frigate/' -e 's/^render:x:105:root$/kvm:x:105:/' /etc/group
else
sed -i -e 's/^kvm:x:104:$/render:x:104:frigate/' -e 's/^render:x:105:$/kvm:x:105:/' /etc/group
fi
echo "tmpfs /tmp/cache tmpfs defaults 0 0" >>/etc/fstab
msg_ok "Installed Frigate"
if grep -q -o -m1 -E 'avx[^ ]*|sse4_2' /proc/cpuinfo; then
if grep -q -o -m1 -E 'avx[^ ]*' /proc/cpuinfo; then
msg_ok "AVX Support Detected"
msg_info "Installing Openvino Object Detection Model (Resilience)"
$STD pip install -r /opt/frigate/docker/main/requirements-ov.txt
cd /opt/frigate/models
export ENABLE_ANALYTICS=NO
$STD /usr/local/bin/omz_downloader --name ssdlite_mobilenet_v2 --num_attempts 2
$STD /usr/local/bin/omz_converter --name ssdlite_mobilenet_v2 --precision FP16 --mo /usr/local/bin/mo
cd /
cp -r /opt/frigate/models/public/ssdlite_mobilenet_v2 openvino-model
curl -fsSL "https://github.com/openvinotoolkit/open_model_zoo/raw/master/data/dataset_classes/coco_91cl_bkgr.txt" -o "openvino-model/coco_91cl_bkgr.txt"
sed -i 's/truck/car/g' openvino-model/coco_91cl_bkgr.txt
cat <<EOF >>/config/config.yml
ffmpeg:
hwaccel_args: auto
detectors:
detector01:
ov:
type: openvino
device: CPU
model:
path: /openvino-model/FP16/ssdlite_mobilenet_v2.xml
model:
width: 300
height: 300
input_tensor: nhwc
input_pixel_format: bgr
path: /openvino-model/ssdlite_mobilenet_v2.xml
labelmap_path: /openvino-model/coco_91cl_bkgr.txt
EOF
msg_ok "Installed Openvino Object Detection Model"
else
cat <<EOF >>/config/config.yml
ffmpeg:
hwaccel_args: auto
model:
path: /cpu_model.tflite
EOF
fi
msg_ok "Configured Frigate"
msg_info "Installing Coral Object Detection Model (Patience)"
cd /opt/frigate
export CCACHE_DIR=/root/.ccache
export CCACHE_MAXSIZE=2G
curl -fsSL "https://github.com/libusb/libusb/archive/v1.0.26.zip" -o "v1.0.26.zip"
$STD unzip v1.0.26.zip
rm v1.0.26.zip
cd libusb-1.0.26
$STD ./bootstrap.sh
$STD ./configure --disable-udev --enable-shared
$STD make -j $(nproc --all)
cd /opt/frigate/libusb-1.0.26/libusb
mkdir -p /usr/local/lib
$STD /bin/bash ../libtool --mode=install /usr/bin/install -c libusb-1.0.la '/usr/local/lib'
mkdir -p /usr/local/include/libusb-1.0
$STD /usr/bin/install -c -m 644 libusb.h '/usr/local/include/libusb-1.0'
ldconfig
cd /
curl -fsSL "https://github.com/google-coral/test_data/raw/release-frogfish/ssdlite_mobiledet_coco_qat_postprocess_edgetpu.tflite" -o "edgetpu_model.tflite"
curl -fsSL "https://github.com/google-coral/test_data/raw/release-frogfish/ssdlite_mobiledet_coco_qat_postprocess.tflite" -o "cpu_model.tflite"
cp /opt/frigate/labelmap.txt /labelmap.txt
curl -fsSL "https://www.kaggle.com/api/v1/models/google/yamnet/tfLite/classification-tflite/1/download" -o "yamnet-tflite-classification-tflite-v1.tar.gz"
tar xzf yamnet-tflite-classification-tflite-v1.tar.gz
rm -rf yamnet-tflite-classification-tflite-v1.tar.gz
mv 1.tflite cpu_audio_model.tflite
cp /opt/frigate/audio-labelmap.txt /audio-labelmap.txt
mkdir -p /media/frigate
curl -fsSL "https://github.com/intel-iot-devkit/sample-videos/raw/master/person-bicycle-car-detection.mp4" -o "/media/frigate/person-bicycle-car-detection.mp4"
msg_ok "Installed Coral Object Detection Model"
msg_info "Building Nginx with Custom Modules"
$STD /opt/frigate/docker/main/build_nginx.sh
sed -e '/s6-notifyoncheck/ s/^#*/#/' -i /opt/frigate/docker/main/rootfs/etc/s6-overlay/s6-rc.d/nginx/run
ln -sf /usr/local/nginx/sbin/nginx /usr/local/bin/nginx
msg_ok "Built Nginx"
msg_info "Installing Tempio"
sed -i 's|/rootfs/usr/local|/usr/local|g' /opt/frigate/docker/main/install_tempio.sh
$STD /opt/frigate/docker/main/install_tempio.sh
ln -sf /usr/local/tempio/bin/tempio /usr/local/bin/tempio
msg_ok "Installed Tempio"
msg_info "Creating Services"
cat <<EOF >/etc/systemd/system/create_directories.service
[Unit]
Description=Create necessary directories for Frigate logs
Before=frigate.service go2rtc.service nginx.service
Description=Create necessary directories for logs
[Service]
Type=oneshot
@@ -283,11 +176,13 @@ ExecStart=/bin/bash -c '/bin/mkdir -p /dev/shm/logs/{frigate,go2rtc,nginx} && /b
[Install]
WantedBy=multi-user.target
EOF
systemctl enable -q --now create_directories
sleep 3
cat <<EOF >/etc/systemd/system/go2rtc.service
[Unit]
Description=go2rtc streaming service
After=network.target create_directories.service
Description=go2rtc service
After=network.target
After=create_directories.service
StartLimitIntervalSec=0
[Service]
@@ -295,8 +190,7 @@ Type=simple
Restart=always
RestartSec=1
User=root
EnvironmentFile=/etc/frigate.env
ExecStartPre=+rm -f /dev/shm/logs/go2rtc/current
ExecStartPre=+rm /dev/shm/logs/go2rtc/current
ExecStart=/bin/bash -c "bash /opt/frigate/docker/main/rootfs/etc/s6-overlay/s6-rc.d/go2rtc/run 2> >(/usr/bin/ts '%%Y-%%m-%%d %%H:%%M:%%.S ' >&2) | /usr/bin/ts '%%Y-%%m-%%d %%H:%%M:%%.S '"
StandardOutput=file:/dev/shm/logs/go2rtc/current
StandardError=file:/dev/shm/logs/go2rtc/current
@@ -304,11 +198,13 @@ StandardError=file:/dev/shm/logs/go2rtc/current
[Install]
WantedBy=multi-user.target
EOF
systemctl enable -q --now go2rtc
sleep 3
cat <<EOF >/etc/systemd/system/frigate.service
[Unit]
Description=Frigate NVR service
After=go2rtc.service create_directories.service
Description=Frigate service
After=go2rtc.service
After=create_directories.service
StartLimitIntervalSec=0
[Service]
@@ -316,8 +212,8 @@ Type=simple
Restart=always
RestartSec=1
User=root
EnvironmentFile=/etc/frigate.env
ExecStartPre=+rm -f /dev/shm/logs/frigate/current
# Environment=PLUS_API_KEY=
ExecStartPre=+rm /dev/shm/logs/frigate/current
ExecStart=/bin/bash -c "bash /opt/frigate/docker/main/rootfs/etc/s6-overlay/s6-rc.d/frigate/run 2> >(/usr/bin/ts '%%Y-%%m-%%d %%H:%%M:%%.S ' >&2) | /usr/bin/ts '%%Y-%%m-%%d %%H:%%M:%%.S '"
StandardOutput=file:/dev/shm/logs/frigate/current
StandardError=file:/dev/shm/logs/frigate/current
@@ -325,11 +221,13 @@ StandardError=file:/dev/shm/logs/frigate/current
[Install]
WantedBy=multi-user.target
EOF
systemctl enable -q --now frigate
sleep 3
cat <<EOF >/etc/systemd/system/nginx.service
[Unit]
Description=Nginx reverse proxy for Frigate
After=frigate.service create_directories.service
Description=Nginx service
After=frigate.service
After=create_directories.service
StartLimitIntervalSec=0
[Service]
@@ -337,7 +235,7 @@ Type=simple
Restart=always
RestartSec=1
User=root
ExecStartPre=+rm -f /dev/shm/logs/nginx/current
ExecStartPre=+rm /dev/shm/logs/nginx/current
ExecStart=/bin/bash -c "bash /opt/frigate/docker/main/rootfs/etc/s6-overlay/s6-rc.d/nginx/run 2> >(/usr/bin/ts '%%Y-%%m-%%d %%H:%%M:%%.S ' >&2) | /usr/bin/ts '%%Y-%%m-%%d %%H:%%M:%%.S '"
StandardOutput=file:/dev/shm/logs/nginx/current
StandardError=file:/dev/shm/logs/nginx/current
@@ -345,20 +243,8 @@ StandardError=file:/dev/shm/logs/nginx/current
[Install]
WantedBy=multi-user.target
EOF
systemctl daemon-reload
systemctl enable -q --now create_directories
sleep 2
systemctl enable -q --now go2rtc
sleep 2
systemctl enable -q --now frigate
sleep 2
systemctl enable -q --now nginx
msg_ok "Created Services"
msg_info "Cleaning Up"
rm -rf /opt/libusb /wheels /models/*.tar.gz
msg_ok "Cleaned Up"
msg_ok "Configured Services"
motd_ssh
customize

View File

@@ -289,7 +289,7 @@ ML_DIR="${APP_DIR}/machine-learning"
GEO_DIR="${INSTALL_DIR}/geodata"
mkdir -p {"${APP_DIR}","${UPLOAD_DIR}","${GEO_DIR}","${INSTALL_DIR}"/cache}
fetch_and_deploy_gh_release "Immich" "immich-app/immich" "tarball" "v2.5.6" "$SRC_DIR"
fetch_and_deploy_gh_release "Immich" "immich-app/immich" "tarball" "v2.5.5" "$SRC_DIR"
PNPM_VERSION="$(jq -r '.packageManager | split("@")[1]' ${SRC_DIR}/package.json)"
NODE_VERSION="24" NODE_MODULE="pnpm@${PNPM_VERSION}" setup_nodejs

View File

@@ -20,19 +20,10 @@ msg_ok "Installed Docker"
msg_info "Detecting latest Kasm Workspaces release"
KASM_URL=$(curl -fsSL "https://www.kasm.com/downloads" | tr '\n' ' ' | grep -oE 'https://kasm-static-content[^"]*kasm_release_[0-9]+\.[0-9]+\.[0-9]+\.[a-z0-9]+\.tar\.gz' | head -n 1)
if [[ -z "$KASM_URL" ]]; then
SERVICE_IMAGE_URL=$(curl -fsSL "https://www.kasm.com/downloads" | tr '\n' ' ' | grep -oE 'https://kasm-static-content[^"]*kasm_release_service_images_amd64_[0-9]+\.[0-9]+\.[0-9]+\.tar\.gz' | head -n 1)
if [[ -n "$SERVICE_IMAGE_URL" ]]; then
KASM_VERSION=$(echo "$SERVICE_IMAGE_URL" | sed -E 's/.*kasm_release_service_images_amd64_([0-9]+\.[0-9]+\.[0-9]+).*/\1/')
KASM_URL="https://kasm-static-content.s3.amazonaws.com/kasm_release_${KASM_VERSION}.tar.gz"
fi
else
KASM_VERSION=$(echo "$KASM_URL" | sed -E 's/.*kasm_release_([0-9]+\.[0-9]+\.[0-9]+).*/\1/')
fi
if [[ -z "$KASM_URL" ]] || [[ -z "$KASM_VERSION" ]]; then
msg_error "Unable to detect latest Kasm release URL."
exit 1
fi
KASM_VERSION=$(echo "$KASM_URL" | sed -E 's/.*kasm_release_([0-9]+\.[0-9]+\.[0-9]+).*/\1/')
msg_ok "Detected Kasm Workspaces version $KASM_VERSION"
msg_warn "WARNING: This script will run an external installer from a third-party source (https://www.kasmweb.com/)."

View File

@@ -37,13 +37,18 @@ PYTHON_VERSION="3.12" setup_uv
fetch_and_deploy_gh_release "libretranslate" "LibreTranslate/LibreTranslate" "tarball"
msg_info "Setup LibreTranslate (Patience)"
TORCH_VERSION=$(grep -Eo '"torch ==[0-9]+\.[0-9]+\.[0-9]+' /opt/libretranslate/pyproject.toml |
tail -n1 | sed 's/.*==//')
if [[ -z "$TORCH_VERSION" ]]; then
TORCH_VERSION="2.5.0"
fi
cd /opt/libretranslate
$STD uv venv --clear .venv --python 3.12
$STD source .venv/bin/activate
$STD uv pip install --upgrade pip
$STD uv pip install "setuptools<81"
$STD uv pip install --upgrade pip setuptools
$STD uv pip install Babel==2.12.1
$STD .venv/bin/python scripts/compile_locales.py
$STD uv pip install "torch==${TORCH_VERSION}" --extra-index-url https://download.pytorch.org/whl/cpu
$STD uv pip install "numpy<2"
$STD uv pip install .
$STD uv pip install libretranslate

View File

@@ -38,10 +38,6 @@ for server in "${servers[@]}"; do
fi
done
msg_info "Installing dependencies"
$STD apt install -y inotify-tools
msg_ok "Installed dependencies"
msg_info "Installing Collabora Online"
curl -fsSL https://collaboraoffice.com/downloads/gpg/collaboraonline-release-keyring.gpg -o /etc/apt/keyrings/collaboraonline-release-keyring.gpg
cat <<EOF >/etc/apt/sources.list.d/colloboraonline.sources
@@ -152,15 +148,8 @@ COLLABORATION_JWT_SECRET=
# FRONTEND_FULL_TEXT_SEARCH_ENABLED=true
# SEARCH_EXTRACTOR_TIKA_TIKA_URL=<your-tika-url>
## Uncomment below to enable PosixFS Collaborative Mode
## Increase inotify watch/instance limits on your PVE host:
### sysctl -w fs.inotify.max_user_watches=1048576
### sysctl -w fs.inotify.max_user_instances=1024
# STORAGE_USERS_POSIX_ENABLE_COLLABORATION=true
# STORAGE_USERS_POSIX_WATCH_TYPE=inotifywait
# STORAGE_USERS_POSIX_WATCH_FS=true
# STORAGE_USERS_POSIX_WATCH_PATH=<path-to-storage-or-bind-mount>
## User files location - experimental - use at your own risk! - ZFS, NFS v4.2+ supported - CIFS/SMB not supported
## External storage test - Only NFS v4.2+ is supported
## User files
# STORAGE_USERS_POSIX_ROOT=<path-to-your-bind_mount>
EOF

View File

@@ -24,7 +24,7 @@ setup_hwaccel
PYTHON_VERSION="3.12" setup_uv
msg_info "Installing Open WebUI"
$STD uv tool install --python 3.12 --constraint <(echo "numba>=0.60") open-webui[all]
$STD uv tool install --python 3.12 open-webui[all]
msg_ok "Installed Open WebUI"
read -r -p "${TAB3}Would you like to add Ollama? <y/N> " prompt

View File

@@ -178,7 +178,7 @@ http:
servers:
- url: "http://$LOCAL_IP:3000"
EOF
$STD npm run db:push
$STD npm run db:sqlite:push
. /etc/os-release
if [ "$VERSION_CODENAME" = "trixie" ]; then

View File

@@ -14,51 +14,42 @@ network_check
update_os
msg_info "Installing Dependencies"
$STD apt install -y apache2-utils
$STD apt install -y \
apache2-utils \
python3-pip \
python3-venv
msg_ok "Installed Dependencies"
PYTHON_VERSION="3.13" setup_uv
fetch_and_deploy_gh_release "Radicale" "Kozea/Radicale" "tarball" "latest" "/opt/radicale"
msg_info "Setting up Radicale"
cd /opt/radicale
python3 -m venv /opt/radicale
source /opt/radicale/bin/activate
$STD python3 -m pip install --upgrade https://github.com/Kozea/Radicale/archive/master.tar.gz
RNDPASS=$(openssl rand -base64 18 | tr -dc 'a-zA-Z0-9' | head -c13)
$STD htpasswd -c -b -5 /opt/radicale/users admin "$RNDPASS"
$STD htpasswd -c -b -5 /opt/radicale/users admin $RNDPASS
{
echo "Radicale Credentials"
echo "Admin User: admin"
echo "Admin Password: $RNDPASS"
} >>~/radicale.creds
msg_ok "Done setting up Radicale"
mkdir -p /etc/radicale
cat <<EOF >/etc/radicale/config
[server]
hosts = 0.0.0.0:5232
msg_info "Setup Service"
[auth]
type = htpasswd
htpasswd_filename = /opt/radicale/users
htpasswd_encryption = sha512
[storage]
type = multifilesystem
filesystem_folder = /var/lib/radicale/collections
[web]
type = internal
cat <<EOF >/opt/radicale/start.sh
#!/usr/bin/env bash
source /opt/radicale/bin/activate
python3 -m radicale --storage-filesystem-folder=/var/lib/radicale/collections --hosts 0.0.0.0:5232 --auth-type htpasswd --auth-htpasswd-filename /opt/radicale/users --auth-htpasswd-encryption sha512
EOF
msg_ok "Set up Radicale"
msg_info "Creating Service"
chmod +x /opt/radicale/start.sh
cat <<EOF >/etc/systemd/system/radicale.service
[Unit]
Description=A simple CalDAV (calendar) and CardDAV (contact) server
After=network.target
Requires=network.target
[Service]
WorkingDirectory=/opt/radicale
ExecStart=/usr/local/bin/uv run -m radicale --config /etc/radicale/config
ExecStart=/opt/radicale/start.sh
Restart=on-failure
# User=radicale
# Deny other users access to the calendar data

View File

@@ -3,7 +3,7 @@
# Copyright (c) 2021-2026 community-scripts ORG
# Author: vhsdream
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
# Source: https://github.com/slskd/slskd/, https://github.com/mrusse/soularr
# Source: https://github.com/slskd/slskd/, https://soularr.net
source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
color
@@ -13,71 +13,71 @@ setting_up_container
network_check
update_os
fetch_and_deploy_gh_release "Slskd" "slskd/slskd" "prebuild" "latest" "/opt/slskd" "slskd-*-linux-x64.zip"
msg_info "Installing Dependencies"
$STD apt install -y \
python3-pip
msg_ok "Installed Dependencies"
msg_info "Configuring Slskd"
msg_info "Setup ${APPLICATION}"
tmp_file=$(mktemp)
RELEASE=$(curl -s https://api.github.com/repos/slskd/slskd/releases/latest | grep "tag_name" | awk '{print substr($2, 2, length($2)-3) }')
curl -fsSL "https://github.com/slskd/slskd/releases/download/${RELEASE}/slskd-${RELEASE}-linux-x64.zip" -o $tmp_file
$STD unzip $tmp_file -d /opt/${APPLICATION}
echo "${RELEASE}" >/opt/${APPLICATION}_version.txt
JWT_KEY=$(openssl rand -base64 44)
SLSKD_API_KEY=$(openssl rand -base64 44)
cp /opt/slskd/config/slskd.example.yml /opt/slskd/config/slskd.yml
cp /opt/${APPLICATION}/config/slskd.example.yml /opt/${APPLICATION}/config/slskd.yml
sed -i \
-e '/web:/,/cidr/s/^# //' \
-e '/https:/,/port: 5031/s/false/true/' \
-e '/port: 5030/,/socket/s/,.*$//' \
-e '/content_path:/,/authentication/s/false/true/' \
-e "\|web:|,\|cidr|s|^#||" \
-e "\|https:|,\|5031|s|false|true|" \
-e "\|api_keys|,\|cidr|s|<some.*$|$SLSKD_API_KEY|; \
s|role: readonly|role: readwrite|; \
s|0.0.0.0/0,::/0|& # Replace this with your subnet|" \
-e "\|soulseek|,\|write_queue|s|^#||" \
-e "\|jwt:|,\|ttl|s|key: ~|key: $JWT_KEY|" \
-e '/soulseek/,/write_queue/s/^# //' \
-e 's/^.*picture/#&/' /opt/slskd/config/slskd.yml
msg_ok "Configured Slskd"
-e "s|^ picture|# picture|" \
/opt/${APPLICATION}/config/slskd.yml
msg_ok "Setup ${APPLICATION}"
read -rp "${TAB3}Do you want to install Soularr? y/N " soularr
if [[ ${soularr,,} =~ ^(y|yes)$ ]]; then
PYTHON_VERSION="3.11" setup_uv
fetch_and_deploy_gh_release "Soularr" "mrusse/soularr" "tarball" "latest" "/opt/soularr"
cd /opt/soularr
$STD uv venv venv
$STD source venv/bin/activate
$STD uv pip install -r requirements.txt
sed -i \
-e "\|[Slskd]|,\|host_url|s|yourslskdapikeygoeshere|$SLSKD_API_KEY|" \
-e "/host_url/s/slskd/localhost/" \
/opt/soularr/config.ini
cat <<EOF >/opt/soularr/run.sh
#!/usr/bin/env bash
msg_info "Installing Soularr"
rm -rf /usr/lib/python3.*/EXTERNALLY-MANAGED
cd /tmp
curl -fsSL -o main.zip https://github.com/mrusse/soularr/archive/refs/heads/main.zip
$STD unzip main.zip
mv soularr-main /opt/soularr
cd /opt/soularr
$STD pip install -r requirements.txt
sed -i \
-e "\|[Slskd]|,\|host_url|s|yourslskdapikeygoeshere|$SLSKD_API_KEY|" \
-e "/host_url/s/slskd/localhost/" \
/opt/soularr/config.ini
sed -i \
-e "/#This\|#Default\|INTERVAL/{N;d;}" \
-e "/while\|#Pass/d" \
-e "\|python|s|app|opt/soularr|; s|python|python3|" \
-e "/dt/,+2d" \
/opt/soularr/run.sh
sed -i -E "/(soularr.py)/s/.{5}$//; /if/,/fi/s/.{4}//" /opt/soularr/run.sh
chmod +x /opt/soularr/run.sh
msg_ok "Installed Soularr"
if ps aux | grep "[s]oularr.py" >/dev/null; then
echo "Soularr is already running. Exiting..."
exit 1
else
source /opt/soularr/venv/bin/activate
uv run python3 -u /opt/soularr/soularr.py --config-dir /opt/soularr
fi
EOF
chmod +x /opt/soularr/run.sh
deactivate
msg_ok "Installed Soularr"
fi
msg_info "Creating Service"
cat <<EOF >/etc/systemd/system/slskd.service
msg_info "Creating Services"
cat <<EOF >/etc/systemd/system/${APPLICATION}.service
[Unit]
Description=Slskd Service
Description=${APPLICATION} Service
After=network.target
Wants=network.target
[Service]
WorkingDirectory=/opt/slskd
ExecStart=/opt/slskd/slskd --config /opt/slskd/config/slskd.yml
WorkingDirectory=/opt/${APPLICATION}
ExecStart=/opt/${APPLICATION}/slskd --config /opt/${APPLICATION}/config/slskd.yml
Restart=always
[Install]
WantedBy=multi-user.target
EOF
if [[ -d /opt/soularr ]]; then
cat <<EOF >/etc/systemd/system/soularr.timer
cat <<EOF >/etc/systemd/system/soularr.timer
[Unit]
Description=Soularr service timer
RefuseManualStart=no
@@ -85,15 +85,15 @@ RefuseManualStop=no
[Timer]
Persistent=true
# run every 10 minutes
OnCalendar=*-*-* *:0/10:00
# run every 5 minutes
OnCalendar=*-*-* *:0/5:00
Unit=soularr.service
[Install]
WantedBy=timers.target
EOF
cat <<EOF >/etc/systemd/system/soularr.service
cat <<EOF >/etc/systemd/system/soularr.service
[Unit]
Description=Soularr service
After=network.target slskd.service
@@ -106,9 +106,10 @@ ExecStart=/bin/bash -c /opt/soularr/run.sh
[Install]
WantedBy=multi-user.target
EOF
msg_warn "Add your Lidarr API key to Soularr in '/opt/soularr/config.ini', then run 'systemctl enable --now soularr.timer'"
fi
systemctl enable -q --now slskd
systemctl enable -q --now ${APPLICATION}
systemctl enable -q soularr.timer
rm -rf $tmp_file
rm -rf /tmp/main.zip
msg_ok "Created Services"
motd_ssh

View File

@@ -15,18 +15,16 @@ update_os
msg_info "Installing Dependencies"
$STD apt install -y apt-transport-https
curl -fsSL "https://dl.ui.com/unifi/unifi-repo.gpg" -o "/usr/share/keyrings/unifi-repo.gpg"
cat <<EOF | sudo tee /etc/apt/sources.list.d/100-ubnt-unifi.sources >/dev/null
Types: deb
URIs: https://www.ui.com/downloads/unifi/debian
Suites: stable
Components: ubiquiti
Architectures: amd64
Signed-By: /usr/share/keyrings/unifi-repo.gpg
EOF
$STD apt update
msg_ok "Installed Dependencies"
setup_deb822_repo \
"unifi" \
"https://dl.ui.com/unifi/unifi-repo.gpg" \
"https://www.ui.com/downloads/unifi/debian" \
"stable" \
"ubiquiti" \
"amd64"
JAVA_VERSION="21" setup_java
if lscpu | grep -q 'avx'; then

View File

@@ -28,15 +28,15 @@ fetch_and_deploy_gh_release "wealthfolio" "afadil/wealthfolio" "tarball"
msg_info "Building Frontend (patience)"
cd /opt/wealthfolio
export BUILD_TARGET=web
$STD pnpm install --frozen-lockfile
$STD pnpm tsc
$STD pnpm vite build
$STD pnpm --filter frontend... build
msg_ok "Built Frontend"
msg_info "Building Backend (patience)"
cd /opt/wealthfolio/src-server
$STD cargo build --release --manifest-path Cargo.toml
cp /opt/wealthfolio/src-server/target/release/wealthfolio-server /usr/local/bin/wealthfolio-server
source ~/.cargo/env
$STD cargo build --release --manifest-path apps/server/Cargo.toml
cp /opt/wealthfolio/target/release/wealthfolio-server /usr/local/bin/wealthfolio-server
chmod +x /usr/local/bin/wealthfolio-server
msg_ok "Built Backend"
@@ -58,7 +58,7 @@ echo "WF_PASSWORD=${WF_PASSWORD}" >~/wealthfolio.creds
msg_ok "Configured Wealthfolio"
msg_info "Cleaning Up"
rm -rf /opt/wealthfolio/src-server/target
rm -rf /opt/wealthfolio/target
rm -rf /root/.cargo/registry
rm -rf /opt/wealthfolio/node_modules
msg_ok "Cleaned Up"

File diff suppressed because it is too large Load Diff

View File

@@ -3078,10 +3078,10 @@ settings_menu() {
case "$choice" in
1) diagnostics_menu ;;
2) ${EDITOR:-nano} /usr/local/community-scripts/default.vars ;;
2) nano /usr/local/community-scripts/default.vars ;;
3)
if [ -f "$(get_app_defaults_path)" ]; then
${EDITOR:-nano} "$(get_app_defaults_path)"
nano "$(get_app_defaults_path)"
else
# Back was selected (no app.vars available)
return
@@ -3636,9 +3636,6 @@ $PCT_OPTIONS_STRING"
exit 214
fi
msg_ok "Storage space validated"
# Report installation start to API (early - captures failed installs too)
post_to_api
fi
create_lxc_container || exit $?
@@ -4013,9 +4010,6 @@ EOF'
# Install SSH keys
install_ssh_keys_into_ct
# Start timer for duration tracking
start_install_timer
# Run application installer
# Disable error trap - container errors are handled internally via flag file
set +Eeuo pipefail # Disable ALL error handling temporarily
@@ -4046,10 +4040,9 @@ EOF'
if [[ $install_exit_code -ne 0 ]]; then
msg_error "Installation failed in container ${CTID} (exit code: ${install_exit_code})"
# Copy install log from container BEFORE API call so get_error_text() can read it
# Copy both logs from container before potential deletion
local build_log_copied=false
local install_log_copied=false
local host_install_log="/tmp/install-lxc-${CTID}-${SESSION_ID}.log"
if [[ -n "$CTID" && -n "${SESSION_ID:-}" ]]; then
# Copy BUILD_LOG (creation log) if it exists
@@ -4057,22 +4050,15 @@ EOF'
cp "${BUILD_LOG}" "/tmp/create-lxc-${CTID}-${SESSION_ID}.log" 2>/dev/null && build_log_copied=true
fi
# Copy INSTALL_LOG from container to host
if pct pull "$CTID" "/root/.install-${SESSION_ID}.log" "$host_install_log" 2>/dev/null; then
# Copy INSTALL_LOG from container
if pct pull "$CTID" "/root/.install-${SESSION_ID}.log" "/tmp/install-lxc-${CTID}-${SESSION_ID}.log" 2>/dev/null; then
install_log_copied=true
# Point INSTALL_LOG to host copy so get_error_text() finds it
INSTALL_LOG="$host_install_log"
fi
fi
# Report failure to telemetry API (now with log available on host)
post_update_to_api "failed" "$install_exit_code"
# Show available logs
if [[ -n "$CTID" && -n "${SESSION_ID:-}" ]]; then
# Show available logs
echo ""
[[ "$build_log_copied" == true ]] && echo -e "${GN}${CL} Container creation log: ${BL}/tmp/create-lxc-${CTID}-${SESSION_ID}.log${CL}"
[[ "$install_log_copied" == true ]] && echo -e "${GN}${CL} Installation log: ${BL}${host_install_log}${CL}"
[[ "$install_log_copied" == true ]] && echo -e "${GN}${CL} Installation log: ${BL}/tmp/install-lxc-${CTID}-${SESSION_ID}.log${CL}"
fi
# Dev mode: Keep container or open breakpoint shell
@@ -5137,9 +5123,9 @@ EOF
# api_exit_script()
#
# - Exit trap handler for reporting to API telemetry
# - Captures exit code and reports to PocketBase using centralized error descriptions
# - Uses explain_exit_code() from api.func for consistent error messages
# - Posts failure status with exit code to API (error description resolved automatically)
# - Captures exit code and reports to API using centralized error descriptions
# - Uses explain_exit_code() from error_handler.func for consistent error messages
# - Posts failure status with exit code to API (error description added automatically)
# - Only executes on non-zero exit codes
# ------------------------------------------------------------------------------
api_exit_script() {
@@ -5152,6 +5138,6 @@ api_exit_script() {
if command -v pveversion >/dev/null 2>&1; then
trap 'api_exit_script' EXIT
fi
trap 'post_update_to_api "failed" "$?"' ERR
trap 'post_update_to_api "failed" "130"' SIGINT
trap 'post_update_to_api "failed" "143"' SIGTERM
trap 'post_update_to_api "failed" "$BASH_COMMAND"' ERR
trap 'post_update_to_api "failed" "INTERRUPTED"' SIGINT
trap 'post_update_to_api "failed" "TERMINATED"' SIGTERM

View File

@@ -27,90 +27,100 @@
# ------------------------------------------------------------------------------
# explain_exit_code()
#
# - Canonical version is defined in api.func (sourced before this file)
# - This section only provides a fallback if api.func was not loaded
# - See api.func SECTION 1 for the authoritative exit code mappings
# - Maps numeric exit codes to human-readable error descriptions
# - Supports:
# * Generic/Shell errors (1, 2, 126, 127, 128, 130, 137, 139, 143)
# * Package manager errors (APT, DPKG: 100, 101, 255)
# * Node.js/npm errors (243-249, 254)
# * Python/pip/uv errors (210-212)
# * PostgreSQL errors (231-234)
# * MySQL/MariaDB errors (241-244)
# * MongoDB errors (251-254)
# * Proxmox custom codes (200-231)
# - Returns description string for given exit code
# ------------------------------------------------------------------------------
if ! declare -f explain_exit_code &>/dev/null; then
explain_exit_code() {
local code="$1"
case "$code" in
1) echo "General error / Operation not permitted" ;;
2) echo "Misuse of shell builtins (e.g. syntax error)" ;;
6) echo "curl: DNS resolution failed (could not resolve host)" ;;
7) echo "curl: Failed to connect (network unreachable / host down)" ;;
22) echo "curl: HTTP error returned (404, 429, 500+)" ;;
28) echo "curl: Operation timeout (network slow or server not responding)" ;;
35) echo "curl: SSL/TLS handshake failed (certificate error)" ;;
100) echo "APT: Package manager error (broken packages / dependency problems)" ;;
101) echo "APT: Configuration error (bad sources.list, malformed config)" ;;
102) echo "APT: Lock held by another process (dpkg/apt still running)" ;;
124) echo "Command timed out (timeout command)" ;;
126) echo "Command invoked cannot execute (permission problem?)" ;;
127) echo "Command not found" ;;
128) echo "Invalid argument to exit" ;;
130) echo "Terminated by Ctrl+C (SIGINT)" ;;
134) echo "Process aborted (SIGABRT - possibly Node.js heap overflow)" ;;
137) echo "Killed (SIGKILL / Out of memory?)" ;;
139) echo "Segmentation fault (core dumped)" ;;
141) echo "Broken pipe (SIGPIPE - output closed prematurely)" ;;
143) echo "Terminated (SIGTERM)" ;;
150) echo "Systemd: Service failed to start" ;;
151) echo "Systemd: Service unit not found" ;;
152) echo "Permission denied (EACCES)" ;;
153) echo "Build/compile failed (make/gcc/cmake)" ;;
154) echo "Node.js: Native addon build failed (node-gyp)" ;;
160) echo "Python: Virtualenv / uv environment missing or broken" ;;
161) echo "Python: Dependency resolution failed" ;;
162) echo "Python: Installation aborted (permissions or EXTERNALLY-MANAGED)" ;;
170) echo "PostgreSQL: Connection failed (server not running / wrong socket)" ;;
171) echo "PostgreSQL: Authentication failed (bad user/password)" ;;
172) echo "PostgreSQL: Database does not exist" ;;
173) echo "PostgreSQL: Fatal error in query / syntax" ;;
180) echo "MySQL/MariaDB: Connection failed (server not running / wrong socket)" ;;
181) echo "MySQL/MariaDB: Authentication failed (bad user/password)" ;;
182) echo "MySQL/MariaDB: Database does not exist" ;;
183) echo "MySQL/MariaDB: Fatal error in query / syntax" ;;
190) echo "MongoDB: Connection failed (server not running)" ;;
191) echo "MongoDB: Authentication failed (bad user/password)" ;;
192) echo "MongoDB: Database not found" ;;
193) echo "MongoDB: Fatal query error" ;;
200) echo "Proxmox: Failed to create lock file" ;;
203) echo "Proxmox: Missing CTID variable" ;;
204) echo "Proxmox: Missing PCT_OSTYPE variable" ;;
205) echo "Proxmox: Invalid CTID (<100)" ;;
206) echo "Proxmox: CTID already in use" ;;
207) echo "Proxmox: Password contains unescaped special characters" ;;
208) echo "Proxmox: Invalid configuration (DNS/MAC/Network format)" ;;
209) echo "Proxmox: Container creation failed" ;;
210) echo "Proxmox: Cluster not quorate" ;;
211) echo "Proxmox: Timeout waiting for template lock" ;;
212) echo "Proxmox: Storage type 'iscsidirect' does not support containers (VMs only)" ;;
213) echo "Proxmox: Storage type does not support 'rootdir' content" ;;
214) echo "Proxmox: Not enough storage space" ;;
215) echo "Proxmox: Container created but not listed (ghost state)" ;;
216) echo "Proxmox: RootFS entry missing in config" ;;
217) echo "Proxmox: Storage not accessible" ;;
218) echo "Proxmox: Template file corrupted or incomplete" ;;
219) echo "Proxmox: CephFS does not support containers - use RBD" ;;
220) echo "Proxmox: Unable to resolve template path" ;;
221) echo "Proxmox: Template file not readable" ;;
222) echo "Proxmox: Template download failed" ;;
223) echo "Proxmox: Template not available after download" ;;
224) echo "Proxmox: PBS storage is for backups only" ;;
225) echo "Proxmox: No template available for OS/Version" ;;
231) echo "Proxmox: LXC stack upgrade failed" ;;
243) echo "Node.js: Out of memory (JavaScript heap out of memory)" ;;
245) echo "Node.js: Invalid command-line option" ;;
246) echo "Node.js: Internal JavaScript Parse Error" ;;
247) echo "Node.js: Fatal internal error" ;;
248) echo "Node.js: Invalid C++ addon / N-API failure" ;;
249) echo "npm/pnpm/yarn: Unknown fatal error" ;;
255) echo "DPKG: Fatal internal error" ;;
*) echo "Unknown error" ;;
esac
}
fi
explain_exit_code() {
local code="$1"
case "$code" in
# --- Generic / Shell ---
1) echo "General error / Operation not permitted" ;;
2) echo "Misuse of shell builtins (e.g. syntax error)" ;;
126) echo "Command invoked cannot execute (permission problem?)" ;;
127) echo "Command not found" ;;
128) echo "Invalid argument to exit" ;;
130) echo "Terminated by Ctrl+C (SIGINT)" ;;
137) echo "Killed (SIGKILL / Out of memory?)" ;;
139) echo "Segmentation fault (core dumped)" ;;
143) echo "Terminated (SIGTERM)" ;;
# --- Package manager / APT / DPKG ---
100) echo "APT: Package manager error (broken packages / dependency problems)" ;;
101) echo "APT: Configuration error (bad sources.list, malformed config)" ;;
255) echo "DPKG: Fatal internal error" ;;
# --- Node.js / npm / pnpm / yarn ---
243) echo "Node.js: Out of memory (JavaScript heap out of memory)" ;;
245) echo "Node.js: Invalid command-line option" ;;
246) echo "Node.js: Internal JavaScript Parse Error" ;;
247) echo "Node.js: Fatal internal error" ;;
248) echo "Node.js: Invalid C++ addon / N-API failure" ;;
249) echo "Node.js: Inspector error" ;;
254) echo "npm/pnpm/yarn: Unknown fatal error" ;;
# --- Python / pip / uv ---
210) echo "Python: Virtualenv / uv environment missing or broken" ;;
211) echo "Python: Dependency resolution failed" ;;
212) echo "Python: Installation aborted (permissions or EXTERNALLY-MANAGED)" ;;
# --- PostgreSQL ---
231) echo "PostgreSQL: Connection failed (server not running / wrong socket)" ;;
232) echo "PostgreSQL: Authentication failed (bad user/password)" ;;
233) echo "PostgreSQL: Database does not exist" ;;
234) echo "PostgreSQL: Fatal error in query / syntax" ;;
# --- MySQL / MariaDB ---
241) echo "MySQL/MariaDB: Connection failed (server not running / wrong socket)" ;;
242) echo "MySQL/MariaDB: Authentication failed (bad user/password)" ;;
243) echo "MySQL/MariaDB: Database does not exist" ;;
244) echo "MySQL/MariaDB: Fatal error in query / syntax" ;;
# --- MongoDB ---
251) echo "MongoDB: Connection failed (server not running)" ;;
252) echo "MongoDB: Authentication failed (bad user/password)" ;;
253) echo "MongoDB: Database not found" ;;
254) echo "MongoDB: Fatal query error" ;;
# --- Proxmox Custom Codes ---
200) echo "Proxmox: Failed to create lock file" ;;
203) echo "Proxmox: Missing CTID variable" ;;
204) echo "Proxmox: Missing PCT_OSTYPE variable" ;;
205) echo "Proxmox: Invalid CTID (<100)" ;;
206) echo "Proxmox: CTID already in use" ;;
207) echo "Proxmox: Password contains unescaped special characters" ;;
208) echo "Proxmox: Invalid configuration (DNS/MAC/Network format)" ;;
209) echo "Proxmox: Container creation failed" ;;
210) echo "Proxmox: Cluster not quorate" ;;
211) echo "Proxmox: Timeout waiting for template lock" ;;
212) echo "Proxmox: Storage type 'iscsidirect' does not support containers (VMs only)" ;;
213) echo "Proxmox: Storage type does not support 'rootdir' content" ;;
214) echo "Proxmox: Not enough storage space" ;;
215) echo "Proxmox: Container created but not listed (ghost state)" ;;
216) echo "Proxmox: RootFS entry missing in config" ;;
217) echo "Proxmox: Storage not accessible" ;;
219) echo "Proxmox: CephFS does not support containers - use RBD" ;;
224) echo "Proxmox: PBS storage is for backups only" ;;
218) echo "Proxmox: Template file corrupted or incomplete" ;;
220) echo "Proxmox: Unable to resolve template path" ;;
221) echo "Proxmox: Template file not readable" ;;
222) echo "Proxmox: Template download failed" ;;
223) echo "Proxmox: Template not available after download" ;;
225) echo "Proxmox: No template available for OS/Version" ;;
231) echo "Proxmox: LXC stack upgrade failed" ;;
# --- Default ---
*) echo "Unknown error" ;;
esac
}
# ==============================================================================
# SECTION 2: ERROR HANDLERS
@@ -187,7 +197,12 @@ error_handler() {
# Create error flag file with exit code for host detection
echo "$exit_code" >"/root/.install-${SESSION_ID:-error}.failed" 2>/dev/null || true
# Log path is shown by host as combined log - no need to show container path
if declare -f msg_custom >/dev/null 2>&1; then
msg_custom "📋" "${YW}" "Log saved to: ${container_log}"
else
echo -e "${YW}Log saved to:${CL} ${BL}${container_log}${CL}"
fi
else
# HOST CONTEXT: Show local log path and offer container cleanup
if declare -f msg_custom >/dev/null 2>&1; then
@@ -198,11 +213,6 @@ error_handler() {
# Offer to remove container if it exists (build errors after container creation)
if [[ -n "${CTID:-}" ]] && command -v pct &>/dev/null && pct status "$CTID" &>/dev/null; then
# Report failure to API before container cleanup
if declare -f post_update_to_api &>/dev/null; then
post_update_to_api "failed" "$exit_code"
fi
echo ""
echo -en "${YW}Remove broken container ${CTID}? (Y/n) [auto-remove in 60s]: ${CL}"
@@ -243,18 +253,6 @@ error_handler() {
# ------------------------------------------------------------------------------
on_exit() {
local exit_code=$?
# Report orphaned "installing" records to telemetry API
# Catches ALL exit paths: errors (non-zero), signals, AND clean exits where
# post_to_api was called ("installing" sent) but post_update_to_api was never called
if [[ "${POST_TO_API_DONE:-}" == "true" && "${POST_UPDATE_DONE:-}" != "true" ]]; then
if declare -f post_update_to_api >/dev/null 2>&1; then
if [[ $exit_code -ne 0 ]]; then
post_update_to_api "failed" "$exit_code"
else
post_update_to_api "failed" "1"
fi
fi
fi
[[ -n "${lockfile:-}" && -e "$lockfile" ]] && rm -f "$lockfile"
exit "$exit_code"
}
@@ -267,10 +265,6 @@ on_exit() {
# - Exits with code 130 (128 + SIGINT=2)
# ------------------------------------------------------------------------------
on_interrupt() {
# Report interruption to telemetry API (prevents stuck "installing" records)
if declare -f post_update_to_api >/dev/null 2>&1; then
post_update_to_api "failed" "130"
fi
if declare -f msg_error >/dev/null 2>&1; then
msg_error "Interrupted by user (SIGINT)"
else
@@ -288,10 +282,6 @@ on_interrupt() {
# - Triggered by external process termination
# ------------------------------------------------------------------------------
on_terminate() {
# Report termination to telemetry API (prevents stuck "installing" records)
if declare -f post_update_to_api >/dev/null 2>&1; then
post_update_to_api "failed" "143"
fi
if declare -f msg_error >/dev/null 2>&1; then
msg_error "Terminated by signal (SIGTERM)"
else

View File

@@ -465,7 +465,6 @@ manage_tool_repository() {
msg_error "Failed to download MongoDB GPG key"
return 1
fi
chmod 644 "/etc/apt/keyrings/mongodb-server-${version}.gpg"
# Setup repository
local distro_codename
@@ -1295,33 +1294,12 @@ setup_deb822_repo() {
return 1
}
# Import GPG key (auto-detect binary vs ASCII-armored format)
local tmp_gpg
tmp_gpg=$(mktemp) || return 1
curl -fsSL "$gpg_url" -o "$tmp_gpg" || {
msg_error "Failed to download GPG key for ${name}"
rm -f "$tmp_gpg"
# Import GPG
curl -fsSL "$gpg_url" | gpg --dearmor --yes -o "/etc/apt/keyrings/${name}.gpg" || {
msg_error "Failed to import GPG key for ${name}"
return 1
}
if grep -q "BEGIN PGP" "$tmp_gpg" 2>/dev/null; then
# ASCII-armored — dearmor to binary
gpg --dearmor --yes -o "/etc/apt/keyrings/${name}.gpg" < "$tmp_gpg" || {
msg_error "Failed to dearmor GPG key for ${name}"
rm -f "$tmp_gpg"
return 1
}
else
# Already in binary GPG format — copy directly
cp "$tmp_gpg" "/etc/apt/keyrings/${name}.gpg" || {
msg_error "Failed to install GPG key for ${name}"
rm -f "$tmp_gpg"
return 1
}
fi
rm -f "$tmp_gpg"
chmod 644 "/etc/apt/keyrings/${name}.gpg"
# Write deb822
{
echo "Types: deb"

View File

@@ -75,37 +75,14 @@ pct exec "$CTID" -- bash -c '
set -e
export DEBIAN_FRONTEND=noninteractive
# Source os-release properly (handles quoted values)
source /etc/os-release
ID=$(grep "^ID=" /etc/os-release | cut -d"=" -f2)
VER=$(grep "^VERSION_CODENAME=" /etc/os-release | cut -d"=" -f2)
# Fallback if DNS is poisoned or blocked
# fallback if DNS is poisoned or blocked
ORIG_RESOLV="/etc/resolv.conf"
BACKUP_RESOLV="/tmp/resolv.conf.backup"
# Check DNS resolution using multiple methods (dig may not be installed)
dns_check_failed=true
if command -v dig &>/dev/null; then
if dig +short pkgs.tailscale.com 2>/dev/null | grep -qvE "^127\.|^0\.0\.0\.0$|^$"; then
dns_check_failed=false
fi
elif command -v host &>/dev/null; then
if host pkgs.tailscale.com 2>/dev/null | grep -q "has address"; then
dns_check_failed=false
fi
elif command -v nslookup &>/dev/null; then
if nslookup pkgs.tailscale.com 2>/dev/null | grep -q "Address:"; then
dns_check_failed=false
fi
elif command -v getent &>/dev/null; then
if getent hosts pkgs.tailscale.com &>/dev/null; then
dns_check_failed=false
fi
else
# No DNS tools available, try curl directly and assume DNS works
dns_check_failed=false
fi
if $dns_check_failed; then
if ! dig +short pkgs.tailscale.com | grep -qvE "^127\.|^0\.0\.0\.0$"; then
echo "[INFO] DNS resolution for pkgs.tailscale.com failed (blocked or redirected)."
echo "[INFO] Temporarily overriding /etc/resolv.conf with Cloudflare DNS (1.1.1.1)"
cp "$ORIG_RESOLV" "$BACKUP_RESOLV"
@@ -115,22 +92,17 @@ fi
if ! command -v curl &>/dev/null; then
echo "[INFO] curl not found, installing..."
apt-get update -qq
apt update -qq
apt install -y curl >/dev/null
apt-get install -y curl >/dev/null
fi
# Ensure keyrings directory exists
mkdir -p /usr/share/keyrings
curl -fsSL "https://pkgs.tailscale.com/stable/${ID}/${VERSION_CODENAME}.noarmor.gpg" \
curl -fsSL https://pkgs.tailscale.com/stable/${ID}/${VER}.noarmor.gpg \
| tee /usr/share/keyrings/tailscale-archive-keyring.gpg >/dev/null
echo "deb [signed-by=/usr/share/keyrings/tailscale-archive-keyring.gpg] https://pkgs.tailscale.com/stable/${ID} ${VERSION_CODENAME} main" \
echo "deb [signed-by=/usr/share/keyrings/tailscale-archive-keyring.gpg] https://pkgs.tailscale.com/stable/${ID} ${VER} main" \
>/etc/apt/sources.list.d/tailscale.list
apt-get update -qq
apt update -qq
apt install -y tailscale >/dev/null
apt-get install -y tailscale >/dev/null
if [[ -f /tmp/resolv.conf.backup ]]; then
echo "[INFO] Restoring original /etc/resolv.conf"

View File

@@ -131,7 +131,7 @@ function detect_service() {
function backup_container() {
msg_info "Creating backup for container $1"
vzdump $1 --compress zstd --storage $STORAGE_CHOICE -notes-template "{{guestname}} - community-scripts backup updater" >/dev/null 2>&1
vzdump $1 --compress zstd --storage $STORAGE_CHOICE -notes-template "community-scripts backup updater" >/dev/null 2>&1
status=$?
if [ $status -eq 0 ]; then
@@ -151,11 +151,11 @@ function get_backup_storages() {
split($0, a, ":")
type = a[1]
name = a[2]
gsub(/^[ \t]+|[ \t]+$/, "", name)
sub(/^ +/, "", name)
has_content = 0
has_backup = 0
}
/^[ \t]*content/ {
/^ +content/ {
has_content = 1
if ($0 ~ /backup/) has_backup = 1
}

View File

@@ -70,7 +70,7 @@ function error_handler() {
local line_number="$1"
local command="$2"
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
post_update_to_api "failed" "${exit_code}"
post_update_to_api "failed" "${command}"
echo -e "\n$error_message\n"
cleanup_vmid
}
@@ -203,6 +203,7 @@ function exit-script() {
function default_settings() {
VMID=$(get_valid_nextid)
FORMAT=",efitype=4m"
MACHINE=""
DISK_SIZE="4G"
DISK_CACHE=""
@@ -258,9 +259,11 @@ function advanced_settings() {
3>&1 1>&2 2>&3); then
if [ "$MACH" = q35 ]; then
echo -e "${CONTAINERTYPE}${BOLD}${DGN}Machine Type: ${BGN}$MACH${CL}"
FORMAT=""
MACHINE=" -machine q35"
else
echo -e "${CONTAINERTYPE}${BOLD}${DGN}Machine Type: ${BGN}$MACH${CL}"
FORMAT=",efitype=4m"
MACHINE=""
fi
else
@@ -473,45 +476,31 @@ case $STORAGE_TYPE in
nfs | dir | cifs)
DISK_EXT=".qcow2"
DISK_REF="$VMID/"
DISK_IMPORT="--format qcow2"
DISK_IMPORT="-format qcow2"
THIN=""
;;
btrfs)
DISK_EXT=".raw"
DISK_REF="$VMID/"
DISK_IMPORT="--format raw"
DISK_IMPORT="-format raw"
FORMAT=",efitype=4m"
THIN=""
;;
*)
DISK_EXT=""
DISK_REF=""
DISK_IMPORT="--format raw"
;;
esac
for i in {0,1}; do
disk="DISK$i"
eval DISK"${i}"=vm-"${VMID}"-disk-"${i}"${DISK_EXT:-}
eval DISK"${i}"_REF="${STORAGE}":"${DISK_REF:-}"${!disk}
done
msg_info "Creating a Arch Linux VM"
qm create $VMID -agent 1${MACHINE} -tablet 0 -localtime 1 -bios ovmf${CPU_TYPE} -cores $CORE_COUNT -memory $RAM_SIZE \
-name $HN -tags community-script -net0 virtio,bridge=$BRG,macaddr=$MAC$VLAN$MTU -onboot 1 -ostype l26 -scsihw virtio-scsi-pci
if qm disk import --help >/dev/null 2>&1; then
IMPORT_CMD=(qm disk import)
else
IMPORT_CMD=(qm importdisk)
fi
IMPORT_OUT="$("${IMPORT_CMD[@]}" "$VMID" "${FILE}" "$STORAGE" ${DISK_IMPORT:-} 2>&1 || true)"
DISK_REF_IMPORTED="$(printf '%s\n' "$IMPORT_OUT" | sed -n "s/.*successfully imported disk '\([^']\+\)'.*/\1/p" | tr -d "\r\"'")"
[[ -z "$DISK_REF_IMPORTED" ]] && DISK_REF_IMPORTED="$(pvesm list "$STORAGE" | awk -v id="$VMID" '$5 ~ ("vm-"id"-disk-") {print $1":"$5}' | sort | tail -n1)"
[[ -z "$DISK_REF_IMPORTED" ]] && {
msg_error "Unable to determine imported disk reference."
echo "$IMPORT_OUT"
exit 1
}
msg_ok "Imported disk (${CL}${BL}${DISK_REF_IMPORTED}${CL})"
pvesm alloc $STORAGE $VMID $DISK0 4M 1>&/dev/null
qm importdisk $VMID ${FILE} $STORAGE ${DISK_IMPORT:-} 1>&/dev/null
qm set $VMID \
-efidisk0 ${STORAGE}:0,efitype=4m \
-scsi0 ${DISK_REF_IMPORTED},${DISK_CACHE}${THIN%,} \
-efidisk0 ${DISK0_REF}${FORMAT} \
-scsi0 ${DISK1_REF},${DISK_CACHE}${THIN}size=${DISK_SIZE} \
-ide2 ${STORAGE}:cloudinit \
-boot order=scsi0 \
-serial0 socket >/dev/null

View File

@@ -70,7 +70,7 @@ function error_handler() {
local line_number="$1"
local command="$2"
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
post_update_to_api "failed" "${exit_code}"
post_update_to_api "failed" "${command}"
echo -e "\n$error_message\n"
cleanup_vmid
}
@@ -201,17 +201,6 @@ function exit-script() {
exit
}
function select_cloud_init() {
if (whiptail --backtitle "Proxmox VE Helper Scripts" --title "CLOUD-INIT" \
--yesno "Enable Cloud-Init for VM configuration?\n\nCloud-Init allows automatic configuration of:\n- User accounts and passwords\n- SSH keys\n- Network settings (DHCP/Static)\n- DNS configuration\n\nYou can also configure these settings later in Proxmox UI.\n\nNote: Without Cloud-Init, the nocloud image will be used with console auto-login." --defaultno 18 68); then
CLOUD_INIT="yes"
echo -e "${CLOUD}${BOLD}${DGN}Cloud-Init: ${BGN}yes${CL}"
else
CLOUD_INIT="no"
echo -e "${CLOUD}${BOLD}${DGN}Cloud-Init: ${BGN}no${CL}"
fi
}
function default_settings() {
VMID=$(get_valid_nextid)
FORMAT=",efitype=4m"
@@ -227,6 +216,7 @@ function default_settings() {
VLAN=""
MTU=""
START_VM="yes"
CLOUD_INIT="no"
METHOD="default"
echo -e "${CONTAINERID}${BOLD}${DGN}Virtual Machine ID: ${BGN}${VMID}${CL}"
echo -e "${CONTAINERTYPE}${BOLD}${DGN}Machine Type: ${BGN}i440fx${CL}"
@@ -240,7 +230,7 @@ function default_settings() {
echo -e "${MACADDRESS}${BOLD}${DGN}MAC Address: ${BGN}${MAC}${CL}"
echo -e "${VLANTAG}${BOLD}${DGN}VLAN: ${BGN}Default${CL}"
echo -e "${DEFAULT}${BOLD}${DGN}Interface MTU Size: ${BGN}Default${CL}"
select_cloud_init
echo -e "${CLOUD}${BOLD}${DGN}Configure Cloud-init: ${BGN}no${CL}"
echo -e "${GATEWAY}${BOLD}${DGN}Start VM when completed: ${BGN}yes${CL}"
echo -e "${CREATING}${BOLD}${DGN}Creating a Debian 13 VM using the above default settings${CL}"
}
@@ -410,7 +400,13 @@ function advanced_settings() {
exit-script
fi
select_cloud_init
if (whiptail --backtitle "Proxmox VE Helper Scripts" --title "CLOUD-INIT" --yesno "Configure the VM with Cloud-init?" --defaultno 10 58); then
echo -e "${CLOUD}${BOLD}${DGN}Configure Cloud-init: ${BGN}yes${CL}"
CLOUD_INIT="yes"
else
echo -e "${CLOUD}${BOLD}${DGN}Configure Cloud-init: ${BGN}no${CL}"
CLOUD_INIT="no"
fi
if (whiptail --backtitle "Proxmox VE Helper Scripts" --title "START VIRTUAL MACHINE" --yesno "Start VM when completed?" 10 58); then
echo -e "${GATEWAY}${BOLD}${DGN}Start VM when completed: ${BGN}yes${CL}"
@@ -477,17 +473,6 @@ else
fi
msg_ok "Using ${CL}${BL}$STORAGE${CL} ${GN}for Storage Location."
msg_ok "Virtual Machine ID is ${CL}${BL}$VMID${CL}."
# ==============================================================================
# PREREQUISITES
# ==============================================================================
if ! command -v virt-customize &>/dev/null; then
msg_info "Installing libguestfs-tools"
apt-get update >/dev/null 2>&1
apt-get install -y libguestfs-tools >/dev/null 2>&1
msg_ok "Installed libguestfs-tools"
fi
msg_info "Retrieving the URL for the Debian 13 Qcow2 Disk Image"
if [ "$CLOUD_INIT" == "yes" ]; then
URL=https://cloud.debian.org/images/cloud/trixie/latest/debian-13-genericcloud-amd64.qcow2
@@ -501,50 +486,6 @@ echo -en "\e[1A\e[0K"
FILE=$(basename $URL)
msg_ok "Downloaded ${CL}${BL}${FILE}${CL}"
# ==============================================================================
# IMAGE CUSTOMIZATION
# ==============================================================================
msg_info "Customizing ${FILE} image"
WORK_FILE=$(mktemp --suffix=.qcow2)
cp "$FILE" "$WORK_FILE"
# Set hostname
virt-customize -q -a "$WORK_FILE" --hostname "${HN}" >/dev/null 2>&1
# Prepare for unique machine-id on first boot
virt-customize -q -a "$WORK_FILE" --run-command "truncate -s 0 /etc/machine-id" >/dev/null 2>&1
virt-customize -q -a "$WORK_FILE" --run-command "rm -f /var/lib/dbus/machine-id" >/dev/null 2>&1
# Disable systemd-firstboot to prevent interactive prompts blocking the console
virt-customize -q -a "$WORK_FILE" --run-command "systemctl disable systemd-firstboot.service 2>/dev/null; rm -f /etc/systemd/system/sysinit.target.wants/systemd-firstboot.service; ln -sf /dev/null /etc/systemd/system/systemd-firstboot.service" >/dev/null 2>&1 || true
# Pre-seed firstboot settings so it won't prompt even if triggered
virt-customize -q -a "$WORK_FILE" --run-command "echo 'Etc/UTC' > /etc/timezone && ln -sf /usr/share/zoneinfo/Etc/UTC /etc/localtime" >/dev/null 2>&1 || true
virt-customize -q -a "$WORK_FILE" --run-command "touch /etc/locale.conf" >/dev/null 2>&1 || true
if [ "$CLOUD_INIT" == "yes" ]; then
# Cloud-Init handles SSH and login
virt-customize -q -a "$WORK_FILE" --run-command "sed -i 's/^#*PermitRootLogin.*/PermitRootLogin yes/' /etc/ssh/sshd_config" >/dev/null 2>&1 || true
virt-customize -q -a "$WORK_FILE" --run-command "sed -i 's/^#*PasswordAuthentication.*/PasswordAuthentication yes/' /etc/ssh/sshd_config" >/dev/null 2>&1 || true
else
# Configure auto-login on serial console (ttyS0) and virtual console (tty1)
virt-customize -q -a "$WORK_FILE" --run-command "mkdir -p /etc/systemd/system/serial-getty@ttyS0.service.d" >/dev/null 2>&1 || true
virt-customize -q -a "$WORK_FILE" --run-command 'cat > /etc/systemd/system/serial-getty@ttyS0.service.d/autologin.conf << EOF
[Service]
ExecStart=
ExecStart=-/sbin/agetty --autologin root --noclear %I \$TERM
EOF' >/dev/null 2>&1 || true
virt-customize -q -a "$WORK_FILE" --run-command "mkdir -p /etc/systemd/system/getty@tty1.service.d" >/dev/null 2>&1 || true
virt-customize -q -a "$WORK_FILE" --run-command 'cat > /etc/systemd/system/getty@tty1.service.d/autologin.conf << EOF
[Service]
ExecStart=
ExecStart=-/sbin/agetty --autologin root --noclear %I \$TERM
EOF' >/dev/null 2>&1 || true
fi
msg_ok "Customized image"
STORAGE_TYPE=$(pvesm status -storage "$STORAGE" | awk 'NR>1 {print $2}')
case $STORAGE_TYPE in
nfs | dir)
@@ -560,11 +501,6 @@ btrfs)
FORMAT=",efitype=4m"
THIN=""
;;
*)
DISK_EXT=""
DISK_REF=""
DISK_IMPORT="-format raw"
;;
esac
for i in {0,1}; do
disk="DISK$i"
@@ -576,7 +512,7 @@ msg_info "Creating a Debian 13 VM"
qm create $VMID -agent 1${MACHINE} -tablet 0 -localtime 1 -bios ovmf${CPU_TYPE} -cores $CORE_COUNT -memory $RAM_SIZE \
-name $HN -tags community-script -net0 virtio,bridge=$BRG,macaddr=$MAC$VLAN$MTU -onboot 1 -ostype l26 -scsihw virtio-scsi-pci
pvesm alloc $STORAGE $VMID $DISK0 4M 1>&/dev/null
qm importdisk $VMID ${WORK_FILE} $STORAGE ${DISK_IMPORT:-} 1>&/dev/null
qm importdisk $VMID ${FILE} $STORAGE ${DISK_IMPORT:-} 1>&/dev/null
if [ "$CLOUD_INIT" == "yes" ]; then
qm set $VMID \
-efidisk0 ${DISK0_REF}${FORMAT} \
@@ -591,10 +527,6 @@ else
-boot order=scsi0 \
-serial0 socket >/dev/null
fi
# Clean up work file
rm -f "$WORK_FILE"
DESCRIPTION=$(
cat <<EOF
<div align='center'>

View File

@@ -70,7 +70,7 @@ function error_handler() {
local line_number="$1"
local command="$2"
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
post_update_to_api "failed" "${exit_code}"
post_update_to_api "failed" "${command}"
echo -e "\n$error_message\n"
cleanup_vmid
}
@@ -501,11 +501,6 @@ btrfs)
FORMAT=",efitype=4m"
THIN=""
;;
*)
DISK_EXT=""
DISK_REF=""
DISK_IMPORT="-format raw"
;;
esac
for i in {0,1}; do
disk="DISK$i"

View File

@@ -45,7 +45,7 @@ function error_handler() {
local line_number="$1"
local command="$2"
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
post_update_to_api "failed" "${exit_code}"
post_update_to_api "failed" "${command}"
echo -e "\n$error_message\n"
cleanup_vmid
}

View File

@@ -74,7 +74,7 @@ function error_handler() {
local line_number="$1"
local command="$2"
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
post_update_to_api "failed" "${exit_code}"
post_update_to_api "failed" "${command}"
echo -e "\n$error_message\n"
cleanup_vmid
}

View File

@@ -71,7 +71,7 @@ function error_handler() {
local line_number="$1"
local command="$2"
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
post_update_to_api "failed" "${exit_code}"
post_update_to_api "failed" "${command}"
echo -e "\n$error_message\n"
cleanup_vmid
}
@@ -566,11 +566,6 @@ zfspool)
DISK_REF=""
DISK_IMPORT="-format raw"
;;
*)
DISK_EXT=""
DISK_REF=""
DISK_IMPORT="-format raw"
;;
esac
DISK_VAR="vm-${VMID}-disk-0${DISK_EXT:-}"

View File

@@ -70,7 +70,7 @@ function error_handler() {
local line_number="$1"
local command="$2"
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
post_update_to_api "failed" "${exit_code}"
post_update_to_api "failed" "${command}"
echo -e "\n$error_message\n"
cleanup_vmid
}
@@ -487,11 +487,6 @@ btrfs)
FORMAT=",efitype=4m"
THIN=""
;;
*)
DISK_EXT=""
DISK_REF=""
DISK_IMPORT="-format raw"
;;
esac
for i in {0,1,2}; do
disk="DISK$i"

View File

@@ -74,7 +74,7 @@ function error_handler() {
local exit_code="$?"
local line_number="$1"
local command="$2"
post_update_to_api "failed" "$exit_code"
post_update_to_api "failed" "$command"
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
echo -e "\n$error_message\n"
cleanup_vmid

View File

@@ -48,7 +48,7 @@ function error_handler() {
local exit_code="$?"
local line_number="$1"
local command="$2"
post_update_to_api "failed" "$exit_code"
post_update_to_api "failed" "$command"
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
echo -e "\n$error_message\n"
cleanup_vmid
@@ -619,11 +619,6 @@ btrfs)
FORMAT=",efitype=4m"
THIN=""
;;
*)
DISK_EXT=""
DISK_REF=""
DISK_IMPORT="-format raw"
;;
esac
for i in {0,1}; do
disk="DISK$i"

View File

@@ -71,7 +71,7 @@ function error_handler() {
local line_number="$1"
local command="$2"
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
post_update_to_api "failed" "${exit_code}"
post_update_to_api "failed" "${command}"
echo -e "\n$error_message\n"
cleanup_vmid
}
@@ -500,11 +500,6 @@ btrfs)
FORMAT=",efitype=4m"
THIN=""
;;
*)
DISK_EXT=""
DISK_REF=""
DISK_IMPORT="-format raw"
;;
esac
for i in {0,1,2}; do
disk="DISK$i"

View File

@@ -79,7 +79,7 @@ function error_handler() {
local line_number="$1"
local command="$2"
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
post_update_to_api "failed" "${exit_code}"
post_update_to_api "failed" "${command}"
echo -e "\n$error_message\n"
cleanup_vmid
}
@@ -402,11 +402,6 @@ nfs | dir)
DISK_REF="$VMID/"
DISK_IMPORT="-format qcow2"
;;
*)
DISK_EXT=""
DISK_REF=""
DISK_IMPORT="-format raw"
;;
esac
for i in {0,1}; do
disk="DISK$i"

View File

@@ -66,7 +66,7 @@ function error_handler() {
local exit_code="$?"
local line_number="$1"
local command="$2"
post_update_to_api "failed" "$exit_code"
post_update_to_api "failed" "$command"
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
echo -e "\n$error_message\n"
cleanup_vmid
@@ -482,11 +482,6 @@ btrfs)
FORMAT=",efitype=4m"
THIN=""
;;
*)
DISK_EXT=""
DISK_REF=""
DISK_IMPORT="-format raw"
;;
esac
for i in {0,1}; do
disk="DISK$i"

View File

@@ -69,7 +69,7 @@ function error_handler() {
local exit_code="$?"
local line_number="$1"
local command="$2"
post_update_to_api "failed" "$exit_code"
post_update_to_api "failed" "$command"
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
echo -e "\n$error_message\n"
cleanup_vmid
@@ -484,11 +484,6 @@ btrfs)
FORMAT=",efitype=4m"
THIN=""
;;
*)
DISK_EXT=""
DISK_REF=""
DISK_IMPORT="-format raw"
;;
esac
for i in {0,1}; do
disk="DISK$i"

View File

@@ -68,7 +68,7 @@ function error_handler() {
local exit_code="$?"
local line_number="$1"
local command="$2"
post_update_to_api "failed" "$exit_code"
post_update_to_api "failed" "$command"
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
echo -e "\n$error_message\n"
cleanup_vmid
@@ -483,11 +483,6 @@ btrfs)
FORMAT=",efitype=4m"
THIN=""
;;
*)
DISK_EXT=""
DISK_REF=""
DISK_IMPORT="-format raw"
;;
esac
for i in {0,1}; do
disk="DISK$i"

View File

@@ -69,7 +69,7 @@ function error_handler() {
local line_number="$1"
local command="$2"
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
post_update_to_api "failed" "${exit_code}"
post_update_to_api "failed" "${command}"
echo -e "\n$error_message\n"
cleanup_vmid
}