mirror of
https://github.com/community-scripts/ProxmoxVE.git
synced 2026-02-13 16:53:27 +01:00
Compare commits
2 Commits
github-act
...
fix/debian
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
acd5a97a63 | ||
|
|
b2d4660db4 |
73
CHANGELOG.md
73
CHANGELOG.md
@@ -401,74 +401,6 @@ Exercise vigilance regarding copycat or coat-tailing sites that seek to exploit
|
|||||||
|
|
||||||
</details>
|
</details>
|
||||||
|
|
||||||
## 2026-02-13
|
|
||||||
|
|
||||||
### 🚀 Updated Scripts
|
|
||||||
|
|
||||||
- #### 🐞 Bug Fixes
|
|
||||||
|
|
||||||
- OpenWebUI: pin numba constraint [@MickLesk](https://github.com/MickLesk) ([#11874](https://github.com/community-scripts/ProxmoxVE/pull/11874))
|
|
||||||
- Planka: add migrate step to update function [@ZimmermannLeon](https://github.com/ZimmermannLeon) ([#11877](https://github.com/community-scripts/ProxmoxVE/pull/11877))
|
|
||||||
- Pangolin: switch sqlite-specific back to generic [@MickLesk](https://github.com/MickLesk) ([#11868](https://github.com/community-scripts/ProxmoxVE/pull/11868))
|
|
||||||
- [Hotfix] Jotty: Copy contents of config backup into /opt/jotty/config [@vhsdream](https://github.com/vhsdream) ([#11864](https://github.com/community-scripts/ProxmoxVE/pull/11864))
|
|
||||||
|
|
||||||
- #### 🔧 Refactor
|
|
||||||
|
|
||||||
- chore(donetick): add config entry for v0.1.73 [@tomfrenzel](https://github.com/tomfrenzel) ([#11872](https://github.com/community-scripts/ProxmoxVE/pull/11872))
|
|
||||||
- Refactor: Radicale [@vhsdream](https://github.com/vhsdream) ([#11850](https://github.com/community-scripts/ProxmoxVE/pull/11850))
|
|
||||||
|
|
||||||
### 💾 Core
|
|
||||||
|
|
||||||
- #### 🔧 Refactor
|
|
||||||
|
|
||||||
- core: retry reporting with fallback payloads [@MickLesk](https://github.com/MickLesk) ([#11885](https://github.com/community-scripts/ProxmoxVE/pull/11885))
|
|
||||||
|
|
||||||
### 📡 API
|
|
||||||
|
|
||||||
- #### ✨ New Features
|
|
||||||
|
|
||||||
- error-handler: Implement json_escape and enhance error handling [@MickLesk](https://github.com/MickLesk) ([#11875](https://github.com/community-scripts/ProxmoxVE/pull/11875))
|
|
||||||
|
|
||||||
### 🌐 Website
|
|
||||||
|
|
||||||
- #### 📝 Script Information
|
|
||||||
|
|
||||||
- SQLServer-2025: add PVE9/Kernel 6.x incompatibility warning [@MickLesk](https://github.com/MickLesk) ([#11829](https://github.com/community-scripts/ProxmoxVE/pull/11829))
|
|
||||||
|
|
||||||
## 2026-02-12
|
|
||||||
|
|
||||||
### 🚀 Updated Scripts
|
|
||||||
|
|
||||||
- #### 🐞 Bug Fixes
|
|
||||||
|
|
||||||
- EMQX: increase disk to 6GB and add optional MQ disable prompt [@MickLesk](https://github.com/MickLesk) ([#11844](https://github.com/community-scripts/ProxmoxVE/pull/11844))
|
|
||||||
- Increased the Grafana container default disk size. [@shtefko](https://github.com/shtefko) ([#11840](https://github.com/community-scripts/ProxmoxVE/pull/11840))
|
|
||||||
- Pangolin: Update database generation command in install script [@tremor021](https://github.com/tremor021) ([#11825](https://github.com/community-scripts/ProxmoxVE/pull/11825))
|
|
||||||
- Deluge: add python3-setuptools as dep [@MickLesk](https://github.com/MickLesk) ([#11833](https://github.com/community-scripts/ProxmoxVE/pull/11833))
|
|
||||||
- Dispatcharr: migrate to uv sync [@MickLesk](https://github.com/MickLesk) ([#11831](https://github.com/community-scripts/ProxmoxVE/pull/11831))
|
|
||||||
|
|
||||||
- #### ✨ New Features
|
|
||||||
|
|
||||||
- Archlinux-VM: fix LVM/LVM-thin storage and improve error reporting | VM's add correct exit_code for analytics [@MickLesk](https://github.com/MickLesk) ([#11842](https://github.com/community-scripts/ProxmoxVE/pull/11842))
|
|
||||||
- Debian13-VM: Optimize First Boot & add noCloud/Cloud Selection [@MickLesk](https://github.com/MickLesk) ([#11810](https://github.com/community-scripts/ProxmoxVE/pull/11810))
|
|
||||||
|
|
||||||
### 💾 Core
|
|
||||||
|
|
||||||
- #### ✨ New Features
|
|
||||||
|
|
||||||
- tools.func: auto-detect binary vs armored GPG keys in setup_deb822_repo [@MickLesk](https://github.com/MickLesk) ([#11841](https://github.com/community-scripts/ProxmoxVE/pull/11841))
|
|
||||||
- core: remove old Go API and extend misc/api.func with new backend [@MickLesk](https://github.com/MickLesk) ([#11822](https://github.com/community-scripts/ProxmoxVE/pull/11822))
|
|
||||||
|
|
||||||
- #### 🔧 Refactor
|
|
||||||
|
|
||||||
- error_handler: prevent stuck 'installing' status [@MickLesk](https://github.com/MickLesk) ([#11845](https://github.com/community-scripts/ProxmoxVE/pull/11845))
|
|
||||||
|
|
||||||
### 🧰 Tools
|
|
||||||
|
|
||||||
- #### 🐞 Bug Fixes
|
|
||||||
|
|
||||||
- Tailscale: fix DNS check and keyrings directory issues [@MickLesk](https://github.com/MickLesk) ([#11837](https://github.com/community-scripts/ProxmoxVE/pull/11837))
|
|
||||||
|
|
||||||
## 2026-02-11
|
## 2026-02-11
|
||||||
|
|
||||||
### 🆕 New Scripts
|
### 🆕 New Scripts
|
||||||
@@ -479,16 +411,11 @@ Exercise vigilance regarding copycat or coat-tailing sites that seek to exploit
|
|||||||
|
|
||||||
- #### 🐞 Bug Fixes
|
- #### 🐞 Bug Fixes
|
||||||
|
|
||||||
- dispatcharr: include port 9191 in success-message [@MickLesk](https://github.com/MickLesk) ([#11808](https://github.com/community-scripts/ProxmoxVE/pull/11808))
|
|
||||||
- fix: make donetick 0.1.71 compatible [@tomfrenzel](https://github.com/tomfrenzel) ([#11804](https://github.com/community-scripts/ProxmoxVE/pull/11804))
|
- fix: make donetick 0.1.71 compatible [@tomfrenzel](https://github.com/tomfrenzel) ([#11804](https://github.com/community-scripts/ProxmoxVE/pull/11804))
|
||||||
- Kasm: Support new version URL format without hash suffix [@MickLesk](https://github.com/MickLesk) ([#11787](https://github.com/community-scripts/ProxmoxVE/pull/11787))
|
- Kasm: Support new version URL format without hash suffix [@MickLesk](https://github.com/MickLesk) ([#11787](https://github.com/community-scripts/ProxmoxVE/pull/11787))
|
||||||
- LibreTranslate: Remove Torch [@tremor021](https://github.com/tremor021) ([#11783](https://github.com/community-scripts/ProxmoxVE/pull/11783))
|
- LibreTranslate: Remove Torch [@tremor021](https://github.com/tremor021) ([#11783](https://github.com/community-scripts/ProxmoxVE/pull/11783))
|
||||||
- Snowshare: fix update script [@TuroYT](https://github.com/TuroYT) ([#11726](https://github.com/community-scripts/ProxmoxVE/pull/11726))
|
- Snowshare: fix update script [@TuroYT](https://github.com/TuroYT) ([#11726](https://github.com/community-scripts/ProxmoxVE/pull/11726))
|
||||||
|
|
||||||
- #### ✨ New Features
|
|
||||||
|
|
||||||
- [Feature] OpenCloud: support PosixFS Collaborative Mode [@vhsdream](https://github.com/vhsdream) ([#11806](https://github.com/community-scripts/ProxmoxVE/pull/11806))
|
|
||||||
|
|
||||||
### 💾 Core
|
### 💾 Core
|
||||||
|
|
||||||
- #### 🔧 Refactor
|
- #### 🔧 Refactor
|
||||||
|
|||||||
5
api/.env.example
Normal file
5
api/.env.example
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
MONGO_USER=
|
||||||
|
MONGO_PASSWORD=
|
||||||
|
MONGO_IP=
|
||||||
|
MONGO_PORT=
|
||||||
|
MONGO_DATABASE=
|
||||||
23
api/go.mod
Normal file
23
api/go.mod
Normal file
@@ -0,0 +1,23 @@
|
|||||||
|
module proxmox-api
|
||||||
|
|
||||||
|
go 1.24.0
|
||||||
|
|
||||||
|
require (
|
||||||
|
github.com/gorilla/mux v1.8.1
|
||||||
|
github.com/joho/godotenv v1.5.1
|
||||||
|
github.com/rs/cors v1.11.1
|
||||||
|
go.mongodb.org/mongo-driver v1.17.2
|
||||||
|
)
|
||||||
|
|
||||||
|
require (
|
||||||
|
github.com/golang/snappy v0.0.4 // indirect
|
||||||
|
github.com/klauspost/compress v1.16.7 // indirect
|
||||||
|
github.com/montanaflynn/stats v0.7.1 // indirect
|
||||||
|
github.com/xdg-go/pbkdf2 v1.0.0 // indirect
|
||||||
|
github.com/xdg-go/scram v1.1.2 // indirect
|
||||||
|
github.com/xdg-go/stringprep v1.0.4 // indirect
|
||||||
|
github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78 // indirect
|
||||||
|
golang.org/x/crypto v0.45.0 // indirect
|
||||||
|
golang.org/x/sync v0.18.0 // indirect
|
||||||
|
golang.org/x/text v0.31.0 // indirect
|
||||||
|
)
|
||||||
56
api/go.sum
Normal file
56
api/go.sum
Normal file
@@ -0,0 +1,56 @@
|
|||||||
|
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||||
|
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
|
github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
|
||||||
|
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||||
|
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||||
|
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||||
|
github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY=
|
||||||
|
github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ=
|
||||||
|
github.com/joho/godotenv v1.5.1 h1:7eLL/+HRGLY0ldzfGMeQkb7vMd0as4CfYvUVzLqw0N0=
|
||||||
|
github.com/joho/godotenv v1.5.1/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4=
|
||||||
|
github.com/klauspost/compress v1.16.7 h1:2mk3MPGNzKyxErAw8YaohYh69+pa4sIQSC0fPGCFR9I=
|
||||||
|
github.com/klauspost/compress v1.16.7/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
|
||||||
|
github.com/montanaflynn/stats v0.7.1 h1:etflOAAHORrCC44V+aR6Ftzort912ZU+YLiSTuV8eaE=
|
||||||
|
github.com/montanaflynn/stats v0.7.1/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow=
|
||||||
|
github.com/rs/cors v1.11.1 h1:eU3gRzXLRK57F5rKMGMZURNdIG4EoAmX8k94r9wXWHA=
|
||||||
|
github.com/rs/cors v1.11.1/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU=
|
||||||
|
github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c=
|
||||||
|
github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI=
|
||||||
|
github.com/xdg-go/scram v1.1.2 h1:FHX5I5B4i4hKRVRBCFRxq1iQRej7WO3hhBuJf+UUySY=
|
||||||
|
github.com/xdg-go/scram v1.1.2/go.mod h1:RT/sEzTbU5y00aCK8UOx6R7YryM0iF1N2MOmC3kKLN4=
|
||||||
|
github.com/xdg-go/stringprep v1.0.4 h1:XLI/Ng3O1Atzq0oBs3TWm+5ZVgkq2aqdlvP9JtoZ6c8=
|
||||||
|
github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM=
|
||||||
|
github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78 h1:ilQV1hzziu+LLM3zUTJ0trRztfwgjqKnBWNtSRkbmwM=
|
||||||
|
github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78/go.mod h1:aL8wCCfTfSfmXjznFBSZNN13rSJjlIOI1fUNAtF7rmI=
|
||||||
|
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||||
|
go.mongodb.org/mongo-driver v1.17.2 h1:gvZyk8352qSfzyZ2UMWcpDpMSGEr1eqE4T793SqyhzM=
|
||||||
|
go.mongodb.org/mongo-driver v1.17.2/go.mod h1:Hy04i7O2kC4RS06ZrhPRqj/u4DTYkFDAAccj+rVKqgQ=
|
||||||
|
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||||
|
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||||
|
golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q=
|
||||||
|
golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4=
|
||||||
|
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||||
|
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
|
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||||
|
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||||
|
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I=
|
||||||
|
golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
||||||
|
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||||
|
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||||
|
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
|
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
|
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||||
|
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
|
||||||
|
golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM=
|
||||||
|
golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM=
|
||||||
|
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
|
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
|
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||||
|
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
450
api/main.go
Normal file
450
api/main.go
Normal file
@@ -0,0 +1,450 @@
|
|||||||
|
// Copyright (c) 2021-2026 community-scripts ORG
|
||||||
|
// Author: Michel Roegl-Brunner (michelroegl-brunner)
|
||||||
|
// License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"strconv"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/gorilla/mux"
|
||||||
|
"github.com/joho/godotenv"
|
||||||
|
"github.com/rs/cors"
|
||||||
|
"go.mongodb.org/mongo-driver/bson"
|
||||||
|
"go.mongodb.org/mongo-driver/bson/primitive"
|
||||||
|
"go.mongodb.org/mongo-driver/mongo"
|
||||||
|
"go.mongodb.org/mongo-driver/mongo/options"
|
||||||
|
)
|
||||||
|
|
||||||
|
var client *mongo.Client
|
||||||
|
var collection *mongo.Collection
|
||||||
|
|
||||||
|
func loadEnv() {
|
||||||
|
if err := godotenv.Load(); err != nil {
|
||||||
|
log.Fatal("Error loading .env file")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// DataModel represents a single document in MongoDB
|
||||||
|
type DataModel struct {
|
||||||
|
ID primitive.ObjectID `json:"id" bson:"_id,omitempty"`
|
||||||
|
CT_TYPE uint `json:"ct_type" bson:"ct_type"`
|
||||||
|
DISK_SIZE float32 `json:"disk_size" bson:"disk_size"`
|
||||||
|
CORE_COUNT uint `json:"core_count" bson:"core_count"`
|
||||||
|
RAM_SIZE uint `json:"ram_size" bson:"ram_size"`
|
||||||
|
OS_TYPE string `json:"os_type" bson:"os_type"`
|
||||||
|
OS_VERSION string `json:"os_version" bson:"os_version"`
|
||||||
|
DISABLEIP6 string `json:"disableip6" bson:"disableip6"`
|
||||||
|
NSAPP string `json:"nsapp" bson:"nsapp"`
|
||||||
|
METHOD string `json:"method" bson:"method"`
|
||||||
|
CreatedAt time.Time `json:"created_at" bson:"created_at"`
|
||||||
|
PVEVERSION string `json:"pve_version" bson:"pve_version"`
|
||||||
|
STATUS string `json:"status" bson:"status"`
|
||||||
|
RANDOM_ID string `json:"random_id" bson:"random_id"`
|
||||||
|
TYPE string `json:"type" bson:"type"`
|
||||||
|
ERROR string `json:"error" bson:"error"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type StatusModel struct {
|
||||||
|
RANDOM_ID string `json:"random_id" bson:"random_id"`
|
||||||
|
ERROR string `json:"error" bson:"error"`
|
||||||
|
STATUS string `json:"status" bson:"status"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type CountResponse struct {
|
||||||
|
TotalEntries int64 `json:"total_entries"`
|
||||||
|
StatusCount map[string]int64 `json:"status_count"`
|
||||||
|
NSAPPCount map[string]int64 `json:"nsapp_count"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// ConnectDatabase initializes the MongoDB connection
|
||||||
|
func ConnectDatabase() {
|
||||||
|
loadEnv()
|
||||||
|
|
||||||
|
mongoURI := fmt.Sprintf("mongodb://%s:%s@%s:%s",
|
||||||
|
os.Getenv("MONGO_USER"),
|
||||||
|
os.Getenv("MONGO_PASSWORD"),
|
||||||
|
os.Getenv("MONGO_IP"),
|
||||||
|
os.Getenv("MONGO_PORT"))
|
||||||
|
|
||||||
|
database := os.Getenv("MONGO_DATABASE")
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
var err error
|
||||||
|
client, err = mongo.Connect(ctx, options.Client().ApplyURI(mongoURI))
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal("Failed to connect to MongoDB!", err)
|
||||||
|
}
|
||||||
|
collection = client.Database(database).Collection("data_models")
|
||||||
|
fmt.Println("Connected to MongoDB on 10.10.10.18")
|
||||||
|
}
|
||||||
|
|
||||||
|
// UploadJSON handles API requests and stores data as a document in MongoDB
|
||||||
|
func UploadJSON(w http.ResponseWriter, r *http.Request) {
|
||||||
|
var input DataModel
|
||||||
|
|
||||||
|
if err := json.NewDecoder(r.Body).Decode(&input); err != nil {
|
||||||
|
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
input.CreatedAt = time.Now()
|
||||||
|
|
||||||
|
_, err := collection.InsertOne(context.Background(), input)
|
||||||
|
if err != nil {
|
||||||
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Println("Received data:", input)
|
||||||
|
w.WriteHeader(http.StatusCreated)
|
||||||
|
json.NewEncoder(w).Encode(map[string]string{"message": "Data saved successfully"})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateStatus updates the status of a record based on RANDOM_ID
|
||||||
|
func UpdateStatus(w http.ResponseWriter, r *http.Request) {
|
||||||
|
var input StatusModel
|
||||||
|
|
||||||
|
if err := json.NewDecoder(r.Body).Decode(&input); err != nil {
|
||||||
|
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
filter := bson.M{"random_id": input.RANDOM_ID}
|
||||||
|
update := bson.M{"$set": bson.M{"status": input.STATUS, "error": input.ERROR}}
|
||||||
|
|
||||||
|
_, err := collection.UpdateOne(context.Background(), filter, update)
|
||||||
|
if err != nil {
|
||||||
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Println("Updated data:", input)
|
||||||
|
w.WriteHeader(http.StatusOK)
|
||||||
|
json.NewEncoder(w).Encode(map[string]string{"message": "Record updated successfully"})
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetDataJSON fetches all data from MongoDB
|
||||||
|
func GetDataJSON(w http.ResponseWriter, r *http.Request) {
|
||||||
|
var records []DataModel
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
cursor, err := collection.Find(ctx, bson.M{})
|
||||||
|
if err != nil {
|
||||||
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer cursor.Close(ctx)
|
||||||
|
|
||||||
|
for cursor.Next(ctx) {
|
||||||
|
var record DataModel
|
||||||
|
if err := cursor.Decode(&record); err != nil {
|
||||||
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
records = append(records, record)
|
||||||
|
}
|
||||||
|
|
||||||
|
w.Header().Set("Content-Type", "application/json")
|
||||||
|
json.NewEncoder(w).Encode(records)
|
||||||
|
}
|
||||||
|
func GetPaginatedData(w http.ResponseWriter, r *http.Request) {
|
||||||
|
page, _ := strconv.Atoi(r.URL.Query().Get("page"))
|
||||||
|
limit, _ := strconv.Atoi(r.URL.Query().Get("limit"))
|
||||||
|
if page < 1 {
|
||||||
|
page = 1
|
||||||
|
}
|
||||||
|
if limit < 1 {
|
||||||
|
limit = 10
|
||||||
|
}
|
||||||
|
skip := (page - 1) * limit
|
||||||
|
var records []DataModel
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
options := options.Find().SetSkip(int64(skip)).SetLimit(int64(limit))
|
||||||
|
cursor, err := collection.Find(ctx, bson.M{}, options)
|
||||||
|
if err != nil {
|
||||||
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer cursor.Close(ctx)
|
||||||
|
|
||||||
|
for cursor.Next(ctx) {
|
||||||
|
var record DataModel
|
||||||
|
if err := cursor.Decode(&record); err != nil {
|
||||||
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
records = append(records, record)
|
||||||
|
}
|
||||||
|
|
||||||
|
w.Header().Set("Content-Type", "application/json")
|
||||||
|
json.NewEncoder(w).Encode(records)
|
||||||
|
}
|
||||||
|
|
||||||
|
func GetSummary(w http.ResponseWriter, r *http.Request) {
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
totalCount, err := collection.CountDocuments(ctx, bson.M{})
|
||||||
|
if err != nil {
|
||||||
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
statusCount := make(map[string]int64)
|
||||||
|
nsappCount := make(map[string]int64)
|
||||||
|
|
||||||
|
pipeline := []bson.M{
|
||||||
|
{"$group": bson.M{"_id": "$status", "count": bson.M{"$sum": 1}}},
|
||||||
|
}
|
||||||
|
cursor, err := collection.Aggregate(ctx, pipeline)
|
||||||
|
if err == nil {
|
||||||
|
for cursor.Next(ctx) {
|
||||||
|
var result struct {
|
||||||
|
ID string `bson:"_id"`
|
||||||
|
Count int64 `bson:"count"`
|
||||||
|
}
|
||||||
|
if err := cursor.Decode(&result); err == nil {
|
||||||
|
statusCount[result.ID] = result.Count
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pipeline = []bson.M{
|
||||||
|
{"$group": bson.M{"_id": "$nsapp", "count": bson.M{"$sum": 1}}},
|
||||||
|
}
|
||||||
|
cursor, err = collection.Aggregate(ctx, pipeline)
|
||||||
|
if err == nil {
|
||||||
|
for cursor.Next(ctx) {
|
||||||
|
var result struct {
|
||||||
|
ID string `bson:"_id"`
|
||||||
|
Count int64 `bson:"count"`
|
||||||
|
}
|
||||||
|
if err := cursor.Decode(&result); err == nil {
|
||||||
|
nsappCount[result.ID] = result.Count
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
response := CountResponse{
|
||||||
|
TotalEntries: totalCount,
|
||||||
|
StatusCount: statusCount,
|
||||||
|
NSAPPCount: nsappCount,
|
||||||
|
}
|
||||||
|
|
||||||
|
w.Header().Set("Content-Type", "application/json")
|
||||||
|
json.NewEncoder(w).Encode(response)
|
||||||
|
}
|
||||||
|
|
||||||
|
func GetByNsapp(w http.ResponseWriter, r *http.Request) {
|
||||||
|
nsapp := r.URL.Query().Get("nsapp")
|
||||||
|
var records []DataModel
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
cursor, err := collection.Find(ctx, bson.M{"nsapp": nsapp})
|
||||||
|
if err != nil {
|
||||||
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer cursor.Close(ctx)
|
||||||
|
|
||||||
|
for cursor.Next(ctx) {
|
||||||
|
var record DataModel
|
||||||
|
if err := cursor.Decode(&record); err != nil {
|
||||||
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
records = append(records, record)
|
||||||
|
}
|
||||||
|
|
||||||
|
w.Header().Set("Content-Type", "application/json")
|
||||||
|
json.NewEncoder(w).Encode(records)
|
||||||
|
}
|
||||||
|
|
||||||
|
func GetByDateRange(w http.ResponseWriter, r *http.Request) {
|
||||||
|
|
||||||
|
startDate := r.URL.Query().Get("start_date")
|
||||||
|
endDate := r.URL.Query().Get("end_date")
|
||||||
|
|
||||||
|
if startDate == "" || endDate == "" {
|
||||||
|
http.Error(w, "Both start_date and end_date are required", http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
start, err := time.Parse("2006-01-02T15:04:05.999999+00:00", startDate+"T00:00:00+00:00")
|
||||||
|
if err != nil {
|
||||||
|
http.Error(w, "Invalid start_date format", http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
end, err := time.Parse("2006-01-02T15:04:05.999999+00:00", endDate+"T23:59:59+00:00")
|
||||||
|
if err != nil {
|
||||||
|
http.Error(w, "Invalid end_date format", http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var records []DataModel
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
cursor, err := collection.Find(ctx, bson.M{
|
||||||
|
"created_at": bson.M{
|
||||||
|
"$gte": start,
|
||||||
|
"$lte": end,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer cursor.Close(ctx)
|
||||||
|
|
||||||
|
for cursor.Next(ctx) {
|
||||||
|
var record DataModel
|
||||||
|
if err := cursor.Decode(&record); err != nil {
|
||||||
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
records = append(records, record)
|
||||||
|
}
|
||||||
|
|
||||||
|
w.Header().Set("Content-Type", "application/json")
|
||||||
|
json.NewEncoder(w).Encode(records)
|
||||||
|
}
|
||||||
|
func GetByStatus(w http.ResponseWriter, r *http.Request) {
|
||||||
|
status := r.URL.Query().Get("status")
|
||||||
|
var records []DataModel
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
cursor, err := collection.Find(ctx, bson.M{"status": status})
|
||||||
|
if err != nil {
|
||||||
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer cursor.Close(ctx)
|
||||||
|
|
||||||
|
for cursor.Next(ctx) {
|
||||||
|
var record DataModel
|
||||||
|
if err := cursor.Decode(&record); err != nil {
|
||||||
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
records = append(records, record)
|
||||||
|
}
|
||||||
|
|
||||||
|
w.Header().Set("Content-Type", "application/json")
|
||||||
|
json.NewEncoder(w).Encode(records)
|
||||||
|
}
|
||||||
|
|
||||||
|
func GetByOS(w http.ResponseWriter, r *http.Request) {
|
||||||
|
osType := r.URL.Query().Get("os_type")
|
||||||
|
osVersion := r.URL.Query().Get("os_version")
|
||||||
|
var records []DataModel
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
cursor, err := collection.Find(ctx, bson.M{"os_type": osType, "os_version": osVersion})
|
||||||
|
if err != nil {
|
||||||
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer cursor.Close(ctx)
|
||||||
|
|
||||||
|
for cursor.Next(ctx) {
|
||||||
|
var record DataModel
|
||||||
|
if err := cursor.Decode(&record); err != nil {
|
||||||
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
records = append(records, record)
|
||||||
|
}
|
||||||
|
|
||||||
|
w.Header().Set("Content-Type", "application/json")
|
||||||
|
json.NewEncoder(w).Encode(records)
|
||||||
|
}
|
||||||
|
|
||||||
|
func GetErrors(w http.ResponseWriter, r *http.Request) {
|
||||||
|
errorCount := make(map[string]int)
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
cursor, err := collection.Find(ctx, bson.M{"error": bson.M{"$ne": ""}})
|
||||||
|
if err != nil {
|
||||||
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer cursor.Close(ctx)
|
||||||
|
|
||||||
|
for cursor.Next(ctx) {
|
||||||
|
var record DataModel
|
||||||
|
if err := cursor.Decode(&record); err != nil {
|
||||||
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if record.ERROR != "" {
|
||||||
|
errorCount[record.ERROR]++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type ErrorCountResponse struct {
|
||||||
|
Error string `json:"error"`
|
||||||
|
Count int `json:"count"`
|
||||||
|
}
|
||||||
|
|
||||||
|
var errorCounts []ErrorCountResponse
|
||||||
|
for err, count := range errorCount {
|
||||||
|
errorCounts = append(errorCounts, ErrorCountResponse{
|
||||||
|
Error: err,
|
||||||
|
Count: count,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
w.Header().Set("Content-Type", "application/json")
|
||||||
|
json.NewEncoder(w).Encode(struct {
|
||||||
|
ErrorCounts []ErrorCountResponse `json:"error_counts"`
|
||||||
|
}{
|
||||||
|
ErrorCounts: errorCounts,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
ConnectDatabase()
|
||||||
|
|
||||||
|
router := mux.NewRouter()
|
||||||
|
router.HandleFunc("/upload", UploadJSON).Methods("POST")
|
||||||
|
router.HandleFunc("/upload/updatestatus", UpdateStatus).Methods("POST")
|
||||||
|
router.HandleFunc("/data/json", GetDataJSON).Methods("GET")
|
||||||
|
router.HandleFunc("/data/paginated", GetPaginatedData).Methods("GET")
|
||||||
|
router.HandleFunc("/data/summary", GetSummary).Methods("GET")
|
||||||
|
router.HandleFunc("/data/nsapp", GetByNsapp).Methods("GET")
|
||||||
|
router.HandleFunc("/data/date", GetByDateRange).Methods("GET")
|
||||||
|
router.HandleFunc("/data/status", GetByStatus).Methods("GET")
|
||||||
|
router.HandleFunc("/data/os", GetByOS).Methods("GET")
|
||||||
|
router.HandleFunc("/data/errors", GetErrors).Methods("GET")
|
||||||
|
|
||||||
|
c := cors.New(cors.Options{
|
||||||
|
AllowedOrigins: []string{"*"},
|
||||||
|
AllowedMethods: []string{"GET", "POST"},
|
||||||
|
AllowedHeaders: []string{"Content-Type", "Authorization"},
|
||||||
|
AllowCredentials: true,
|
||||||
|
})
|
||||||
|
|
||||||
|
handler := c.Handler(router)
|
||||||
|
|
||||||
|
fmt.Println("Server running on port 8080")
|
||||||
|
log.Fatal(http.ListenAndServe(":8080", handler))
|
||||||
|
}
|
||||||
@@ -9,7 +9,7 @@ APP="Alpine-Grafana"
|
|||||||
var_tags="${var_tags:-alpine;monitoring}"
|
var_tags="${var_tags:-alpine;monitoring}"
|
||||||
var_cpu="${var_cpu:-1}"
|
var_cpu="${var_cpu:-1}"
|
||||||
var_ram="${var_ram:-256}"
|
var_ram="${var_ram:-256}"
|
||||||
var_disk="${var_disk:-2}"
|
var_disk="${var_disk:-1}"
|
||||||
var_os="${var_os:-alpine}"
|
var_os="${var_os:-alpine}"
|
||||||
var_version="${var_version:-3.23}"
|
var_version="${var_version:-3.23}"
|
||||||
var_unprivileged="${var_unprivileged:-1}"
|
var_unprivileged="${var_unprivileged:-1}"
|
||||||
|
|||||||
@@ -28,7 +28,6 @@ function update_script() {
|
|||||||
exit
|
exit
|
||||||
fi
|
fi
|
||||||
msg_info "Updating Deluge"
|
msg_info "Updating Deluge"
|
||||||
ensure_dependencies python3-setuptools
|
|
||||||
$STD apt update
|
$STD apt update
|
||||||
$STD pip3 install deluge[all] --upgrade
|
$STD pip3 install deluge[all] --upgrade
|
||||||
msg_ok "Updated Deluge"
|
msg_ok "Updated Deluge"
|
||||||
|
|||||||
@@ -104,7 +104,7 @@ function update_script() {
|
|||||||
cd /opt/dispatcharr
|
cd /opt/dispatcharr
|
||||||
rm -rf .venv
|
rm -rf .venv
|
||||||
$STD uv venv --clear
|
$STD uv venv --clear
|
||||||
$STD uv sync
|
$STD uv pip install -r requirements.txt --index-strategy unsafe-best-match
|
||||||
$STD uv pip install gunicorn gevent celery redis daphne
|
$STD uv pip install gunicorn gevent celery redis daphne
|
||||||
msg_ok "Updated Dispatcharr Backend"
|
msg_ok "Updated Dispatcharr Backend"
|
||||||
|
|
||||||
@@ -144,4 +144,4 @@ description
|
|||||||
msg_ok "Completed successfully!\n"
|
msg_ok "Completed successfully!\n"
|
||||||
echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}"
|
echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}"
|
||||||
echo -e "${INFO}${YW} Access it using the following URL:${CL}"
|
echo -e "${INFO}${YW} Access it using the following URL:${CL}"
|
||||||
echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:9191${CL}"
|
echo -e "${TAB}${GATEWAY}${BGN}http://${IP}${CL}"
|
||||||
|
|||||||
@@ -42,8 +42,7 @@ function update_script() {
|
|||||||
|
|
||||||
msg_info "Restoring Configurations"
|
msg_info "Restoring Configurations"
|
||||||
mv /opt/selfhosted.yaml /opt/donetick/config
|
mv /opt/selfhosted.yaml /opt/donetick/config
|
||||||
grep -q 'http://localhost"$' /opt/donetick/config/selfhosted.yaml || sed -i '/https:\/\/localhost"$/a\ - "http://localhost"' /opt/donetick/config/selfhosted.yaml
|
sed -i '/capacitor:\/\/localhost/d' /opt/donetick/config/selfhosted.yaml
|
||||||
grep -q 'capacitor://localhost' /opt/donetick/config/selfhosted.yaml || sed -i '/http:\/\/localhost"$/a\ - "capacitor://localhost"' /opt/donetick/config/selfhosted.yaml
|
|
||||||
mv /opt/donetick.db /opt/donetick
|
mv /opt/donetick.db /opt/donetick
|
||||||
msg_ok "Restored Configurations"
|
msg_ok "Restored Configurations"
|
||||||
|
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ APP="EMQX"
|
|||||||
var_tags="${var_tags:-mqtt}"
|
var_tags="${var_tags:-mqtt}"
|
||||||
var_cpu="${var_cpu:-2}"
|
var_cpu="${var_cpu:-2}"
|
||||||
var_ram="${var_ram:-1024}"
|
var_ram="${var_ram:-1024}"
|
||||||
var_disk="${var_disk:-6}"
|
var_disk="${var_disk:-4}"
|
||||||
var_os="${var_os:-debian}"
|
var_os="${var_os:-debian}"
|
||||||
var_version="${var_version:-13}"
|
var_version="${var_version:-13}"
|
||||||
var_unprivileged="${var_unprivileged:-1}"
|
var_unprivileged="${var_unprivileged:-1}"
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ APP="Grafana"
|
|||||||
var_tags="${var_tags:-monitoring;visualization}"
|
var_tags="${var_tags:-monitoring;visualization}"
|
||||||
var_cpu="${var_cpu:-1}"
|
var_cpu="${var_cpu:-1}"
|
||||||
var_ram="${var_ram:-512}"
|
var_ram="${var_ram:-512}"
|
||||||
var_disk="${var_disk:-4}"
|
var_disk="${var_disk:-2}"
|
||||||
var_os="${var_os:-debian}"
|
var_os="${var_os:-debian}"
|
||||||
var_version="${var_version:-13}"
|
var_version="${var_version:-13}"
|
||||||
var_unprivileged="${var_unprivileged:-1}"
|
var_unprivileged="${var_unprivileged:-1}"
|
||||||
|
|||||||
@@ -46,7 +46,7 @@ function update_script() {
|
|||||||
msg_info "Restoring configuration & data"
|
msg_info "Restoring configuration & data"
|
||||||
mv /opt/app.env /opt/jotty/.env
|
mv /opt/app.env /opt/jotty/.env
|
||||||
[[ -d /opt/data ]] && mv /opt/data /opt/jotty/data
|
[[ -d /opt/data ]] && mv /opt/data /opt/jotty/data
|
||||||
[[ -d /opt/jotty/config ]] && cp -a /opt/config/* /opt/jotty/config && rm -rf /opt/config
|
[[ -d /opt/jotty/config ]] && mv /opt/config/* /opt/jotty/config
|
||||||
msg_ok "Restored configuration & data"
|
msg_ok "Restored configuration & data"
|
||||||
|
|
||||||
msg_info "Starting Service"
|
msg_info "Starting Service"
|
||||||
|
|||||||
@@ -30,7 +30,7 @@ function update_script() {
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
RELEASE="v5.0.2"
|
RELEASE="v5.0.2"
|
||||||
if check_for_gh_release "OpenCloud" "opencloud-eu/opencloud" "${RELEASE}"; then
|
if check_for_gh_release "opencloud" "opencloud-eu/opencloud" "${RELEASE}"; then
|
||||||
msg_info "Stopping services"
|
msg_info "Stopping services"
|
||||||
systemctl stop opencloud opencloud-wopi
|
systemctl stop opencloud opencloud-wopi
|
||||||
msg_ok "Stopped services"
|
msg_ok "Stopped services"
|
||||||
@@ -38,21 +38,9 @@ function update_script() {
|
|||||||
msg_info "Updating packages"
|
msg_info "Updating packages"
|
||||||
$STD apt-get update
|
$STD apt-get update
|
||||||
$STD apt-get dist-upgrade -y
|
$STD apt-get dist-upgrade -y
|
||||||
ensure_dependencies "inotify-tools"
|
|
||||||
msg_ok "Updated packages"
|
msg_ok "Updated packages"
|
||||||
|
|
||||||
CLEAN_INSTALL=1 fetch_and_deploy_gh_release "OpenCloud" "opencloud-eu/opencloud" "singlefile" "${RELEASE}" "/usr/bin" "opencloud-*-linux-amd64"
|
CLEAN_INSTALL=1 fetch_and_deploy_gh_release "opencloud" "opencloud-eu/opencloud" "singlefile" "${RELEASE}" "/usr/bin" "opencloud-*-linux-amd64"
|
||||||
|
|
||||||
if ! grep -q 'POSIX_WATCH' /etc/opencloud/opencloud.env; then
|
|
||||||
sed -i '/^## External/i ## Uncomment below to enable PosixFS Collaborative Mode\
|
|
||||||
## Increase inotify watch/instance limits on your PVE host:\
|
|
||||||
### sysctl -w fs.inotify.max_user_watches=1048576\
|
|
||||||
### sysctl -w fs.inotify.max_user_instances=1024\
|
|
||||||
# STORAGE_USERS_POSIX_ENABLE_COLLABORATION=true\
|
|
||||||
# STORAGE_USERS_POSIX_WATCH_TYPE=inotifywait\
|
|
||||||
# STORAGE_USERS_POSIX_WATCH_FS=true\
|
|
||||||
# STORAGE_USERS_POSIX_WATCH_PATH=<path-to-storage-or-bind-mount>' /etc/opencloud/opencloud.env
|
|
||||||
fi
|
|
||||||
|
|
||||||
msg_info "Starting services"
|
msg_info "Starting services"
|
||||||
systemctl start opencloud opencloud-wopi
|
systemctl start opencloud opencloud-wopi
|
||||||
|
|||||||
@@ -44,7 +44,7 @@ function update_script() {
|
|||||||
|
|
||||||
msg_info "Installing uv-based Open-WebUI"
|
msg_info "Installing uv-based Open-WebUI"
|
||||||
PYTHON_VERSION="3.12" setup_uv
|
PYTHON_VERSION="3.12" setup_uv
|
||||||
$STD uv tool install --python 3.12 --constraint <(echo "numba>=0.60") open-webui[all]
|
$STD uv tool install --python 3.12 open-webui[all]
|
||||||
msg_ok "Installed uv-based Open-WebUI"
|
msg_ok "Installed uv-based Open-WebUI"
|
||||||
|
|
||||||
msg_info "Restoring data"
|
msg_info "Restoring data"
|
||||||
@@ -126,7 +126,7 @@ EOF
|
|||||||
|
|
||||||
msg_info "Updating Open WebUI via uv"
|
msg_info "Updating Open WebUI via uv"
|
||||||
PYTHON_VERSION="3.12" setup_uv
|
PYTHON_VERSION="3.12" setup_uv
|
||||||
$STD uv tool install --force --python 3.12 --constraint <(echo "numba>=0.60") open-webui[all]
|
$STD uv tool upgrade --python 3.12 open-webui[all]
|
||||||
systemctl restart open-webui
|
systemctl restart open-webui
|
||||||
msg_ok "Updated Open WebUI"
|
msg_ok "Updated Open WebUI"
|
||||||
msg_ok "Updated successfully!"
|
msg_ok "Updated successfully!"
|
||||||
|
|||||||
@@ -51,7 +51,7 @@ function update_script() {
|
|||||||
$STD npm run db:generate
|
$STD npm run db:generate
|
||||||
$STD npm run build
|
$STD npm run build
|
||||||
$STD npm run build:cli
|
$STD npm run build:cli
|
||||||
$STD npm run db:push
|
$STD npm run db:sqlite:push
|
||||||
cp -R .next/standalone ./
|
cp -R .next/standalone ./
|
||||||
chmod +x ./dist/cli.mjs
|
chmod +x ./dist/cli.mjs
|
||||||
cp server/db/names.json ./dist/names.json
|
cp server/db/names.json ./dist/names.json
|
||||||
|
|||||||
@@ -61,12 +61,6 @@ function update_script() {
|
|||||||
rm -rf "$BK"
|
rm -rf "$BK"
|
||||||
msg_ok "Restored data"
|
msg_ok "Restored data"
|
||||||
|
|
||||||
msg_ok "Migrate Database"
|
|
||||||
cd /opt/planka
|
|
||||||
$STD npm run db:upgrade
|
|
||||||
$STD npm run db:migrate
|
|
||||||
msg_ok "Migrated Database"
|
|
||||||
|
|
||||||
msg_info "Starting Service"
|
msg_info "Starting Service"
|
||||||
systemctl start planka
|
systemctl start planka
|
||||||
msg_ok "Started Service"
|
msg_ok "Started Service"
|
||||||
|
|||||||
@@ -28,55 +28,16 @@ function update_script() {
|
|||||||
exit
|
exit
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if check_for_gh_release "Radicale" "Kozea/Radicale"; then
|
msg_info "Updating ${APP}"
|
||||||
msg_info "Stopping service"
|
$STD python3 -m venv /opt/radicale
|
||||||
systemctl stop radicale
|
source /opt/radicale/bin/activate
|
||||||
msg_ok "Stopped service"
|
$STD python3 -m pip install --upgrade https://github.com/Kozea/Radicale/archive/master.tar.gz
|
||||||
|
msg_ok "Updated ${APP}"
|
||||||
|
|
||||||
msg_info "Backing up users file"
|
msg_info "Starting Service"
|
||||||
cp /opt/radicale/users /opt/radicale_users_backup
|
systemctl enable -q --now radicale
|
||||||
msg_ok "Backed up users file"
|
msg_ok "Started Service"
|
||||||
|
msg_ok "Updated successfully!"
|
||||||
PYTHON_VERSION="3.13" setup_uv
|
|
||||||
CLEAN_INSTALL=1 fetch_and_deploy_gh_release "Radicale" "Kozea/Radicale" "tarball" "latest" "/opt/radicale"
|
|
||||||
|
|
||||||
msg_info "Restoring users file"
|
|
||||||
rm -f /opt/radicale/users
|
|
||||||
mv /opt/radicale_users_backup /opt/radicale/users
|
|
||||||
msg_ok "Restored users file"
|
|
||||||
|
|
||||||
if grep -q 'start.sh' /etc/systemd/system/radicale.service; then
|
|
||||||
sed -i -e '/^Description/i[Unit]' \
|
|
||||||
-e '\|^ExecStart|iWorkingDirectory=/opt/radicale' \
|
|
||||||
-e 's|^ExecStart=.*|ExecStart=/usr/local/bin/uv run -m radicale --config /etc/radicale/config|' /etc/systemd/system/radicale.service
|
|
||||||
systemctl daemon-reload
|
|
||||||
fi
|
|
||||||
if [[ ! -f /etc/radicale/config ]]; then
|
|
||||||
msg_info "Migrating to config file (/etc/radicale/config)"
|
|
||||||
mkdir -p /etc/radicale
|
|
||||||
cat <<EOF >/etc/radicale/config
|
|
||||||
[server]
|
|
||||||
hosts = 0.0.0.0:5232
|
|
||||||
|
|
||||||
[auth]
|
|
||||||
type = htpasswd
|
|
||||||
htpasswd_filename = /opt/radicale/users
|
|
||||||
htpasswd_encryption = sha512
|
|
||||||
|
|
||||||
[storage]
|
|
||||||
type = multifilesystem
|
|
||||||
filesystem_folder = /var/lib/radicale/collections
|
|
||||||
|
|
||||||
[web]
|
|
||||||
type = internal
|
|
||||||
EOF
|
|
||||||
msg_ok "Migrated to config (/etc/radicale/config)"
|
|
||||||
fi
|
|
||||||
msg_info "Starting service"
|
|
||||||
systemctl start radicale
|
|
||||||
msg_ok "Started service"
|
|
||||||
msg_ok "Updated Successfully!"
|
|
||||||
fi
|
|
||||||
exit
|
exit
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -21,7 +21,7 @@
|
|||||||
"resources": {
|
"resources": {
|
||||||
"cpu": 2,
|
"cpu": 2,
|
||||||
"ram": 1024,
|
"ram": 1024,
|
||||||
"hdd": 6,
|
"hdd": 4,
|
||||||
"os": "debian",
|
"os": "debian",
|
||||||
"version": "13"
|
"version": "13"
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
{
|
{
|
||||||
"generated": "2026-02-13T12:11:36Z",
|
"generated": "2026-02-11T18:22:25Z",
|
||||||
"versions": [
|
"versions": [
|
||||||
{
|
{
|
||||||
"slug": "2fauth",
|
"slug": "2fauth",
|
||||||
@@ -193,9 +193,9 @@
|
|||||||
{
|
{
|
||||||
"slug": "cleanuparr",
|
"slug": "cleanuparr",
|
||||||
"repo": "Cleanuparr/Cleanuparr",
|
"repo": "Cleanuparr/Cleanuparr",
|
||||||
"version": "v2.6.1",
|
"version": "v2.5.1",
|
||||||
"pinned": false,
|
"pinned": false,
|
||||||
"date": "2026-02-13T10:00:19Z"
|
"date": "2026-01-11T00:46:17Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"slug": "cloudreve",
|
"slug": "cloudreve",
|
||||||
@@ -298,9 +298,9 @@
|
|||||||
{
|
{
|
||||||
"slug": "donetick",
|
"slug": "donetick",
|
||||||
"repo": "donetick/donetick",
|
"repo": "donetick/donetick",
|
||||||
"version": "v0.1.73",
|
"version": "v0.1.71",
|
||||||
"pinned": false,
|
"pinned": false,
|
||||||
"date": "2026-02-12T23:42:30Z"
|
"date": "2026-02-11T06:01:13Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"slug": "drawio",
|
"slug": "drawio",
|
||||||
@@ -403,9 +403,9 @@
|
|||||||
{
|
{
|
||||||
"slug": "ghostfolio",
|
"slug": "ghostfolio",
|
||||||
"repo": "ghostfolio/ghostfolio",
|
"repo": "ghostfolio/ghostfolio",
|
||||||
"version": "2.238.0",
|
"version": "2.237.0",
|
||||||
"pinned": false,
|
"pinned": false,
|
||||||
"date": "2026-02-12T18:28:55Z"
|
"date": "2026-02-08T13:59:53Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"slug": "gitea",
|
"slug": "gitea",
|
||||||
@@ -543,9 +543,9 @@
|
|||||||
{
|
{
|
||||||
"slug": "huntarr",
|
"slug": "huntarr",
|
||||||
"repo": "plexguide/Huntarr.io",
|
"repo": "plexguide/Huntarr.io",
|
||||||
"version": "9.2.4.1",
|
"version": "9.2.3",
|
||||||
"pinned": false,
|
"pinned": false,
|
||||||
"date": "2026-02-12T22:17:47Z"
|
"date": "2026-02-07T04:44:20Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"slug": "immich-public-proxy",
|
"slug": "immich-public-proxy",
|
||||||
@@ -571,16 +571,16 @@
|
|||||||
{
|
{
|
||||||
"slug": "invoiceninja",
|
"slug": "invoiceninja",
|
||||||
"repo": "invoiceninja/invoiceninja",
|
"repo": "invoiceninja/invoiceninja",
|
||||||
"version": "v5.12.59",
|
"version": "v5.12.55",
|
||||||
"pinned": false,
|
"pinned": false,
|
||||||
"date": "2026-02-13T02:26:13Z"
|
"date": "2026-02-05T01:06:15Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"slug": "jackett",
|
"slug": "jackett",
|
||||||
"repo": "Jackett/Jackett",
|
"repo": "Jackett/Jackett",
|
||||||
"version": "v0.24.1103",
|
"version": "v0.24.1094",
|
||||||
"pinned": false,
|
"pinned": false,
|
||||||
"date": "2026-02-13T05:53:23Z"
|
"date": "2026-02-11T06:01:16Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"slug": "jellystat",
|
"slug": "jellystat",
|
||||||
@@ -599,9 +599,9 @@
|
|||||||
{
|
{
|
||||||
"slug": "jotty",
|
"slug": "jotty",
|
||||||
"repo": "fccview/jotty",
|
"repo": "fccview/jotty",
|
||||||
"version": "1.20.0",
|
"version": "1.19.1",
|
||||||
"pinned": false,
|
"pinned": false,
|
||||||
"date": "2026-02-12T09:23:30Z"
|
"date": "2026-01-26T21:30:39Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"slug": "kapowarr",
|
"slug": "kapowarr",
|
||||||
@@ -823,9 +823,9 @@
|
|||||||
{
|
{
|
||||||
"slug": "metube",
|
"slug": "metube",
|
||||||
"repo": "alexta69/metube",
|
"repo": "alexta69/metube",
|
||||||
"version": "2026.02.12",
|
"version": "2026.02.08",
|
||||||
"pinned": false,
|
"pinned": false,
|
||||||
"date": "2026-02-12T21:05:49Z"
|
"date": "2026-02-08T17:01:37Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"slug": "miniflux",
|
"slug": "miniflux",
|
||||||
@@ -998,9 +998,9 @@
|
|||||||
{
|
{
|
||||||
"slug": "pangolin",
|
"slug": "pangolin",
|
||||||
"repo": "fosrl/pangolin",
|
"repo": "fosrl/pangolin",
|
||||||
"version": "1.15.4",
|
"version": "1.15.2",
|
||||||
"pinned": false,
|
"pinned": false,
|
||||||
"date": "2026-02-13T00:54:02Z"
|
"date": "2026-02-05T19:23:58Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"slug": "paperless-ai",
|
"slug": "paperless-ai",
|
||||||
@@ -1026,9 +1026,9 @@
|
|||||||
{
|
{
|
||||||
"slug": "patchmon",
|
"slug": "patchmon",
|
||||||
"repo": "PatchMon/PatchMon",
|
"repo": "PatchMon/PatchMon",
|
||||||
"version": "v1.4.0",
|
"version": "v1.3.7",
|
||||||
"pinned": false,
|
"pinned": false,
|
||||||
"date": "2026-02-13T10:39:03Z"
|
"date": "2025-12-25T11:08:14Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"slug": "paymenter",
|
"slug": "paymenter",
|
||||||
@@ -1131,9 +1131,9 @@
|
|||||||
{
|
{
|
||||||
"slug": "prometheus-alertmanager",
|
"slug": "prometheus-alertmanager",
|
||||||
"repo": "prometheus/alertmanager",
|
"repo": "prometheus/alertmanager",
|
||||||
"version": "v0.31.1",
|
"version": "v0.31.0",
|
||||||
"pinned": false,
|
"pinned": false,
|
||||||
"date": "2026-02-11T21:28:26Z"
|
"date": "2026-02-02T13:34:15Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"slug": "prometheus-blackbox-exporter",
|
"slug": "prometheus-blackbox-exporter",
|
||||||
@@ -1219,13 +1219,6 @@
|
|||||||
"pinned": false,
|
"pinned": false,
|
||||||
"date": "2025-11-16T22:39:01Z"
|
"date": "2025-11-16T22:39:01Z"
|
||||||
},
|
},
|
||||||
{
|
|
||||||
"slug": "radicale",
|
|
||||||
"repo": "Kozea/Radicale",
|
|
||||||
"version": "v3.6.0",
|
|
||||||
"pinned": false,
|
|
||||||
"date": "2026-01-10T06:56:46Z"
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
"slug": "rclone",
|
"slug": "rclone",
|
||||||
"repo": "rclone/rclone",
|
"repo": "rclone/rclone",
|
||||||
@@ -1236,9 +1229,9 @@
|
|||||||
{
|
{
|
||||||
"slug": "rdtclient",
|
"slug": "rdtclient",
|
||||||
"repo": "rogerfar/rdt-client",
|
"repo": "rogerfar/rdt-client",
|
||||||
"version": "v2.0.120",
|
"version": "v2.0.119",
|
||||||
"pinned": false,
|
"pinned": false,
|
||||||
"date": "2026-02-12T02:53:51Z"
|
"date": "2025-10-13T23:15:11Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"slug": "reactive-resume",
|
"slug": "reactive-resume",
|
||||||
@@ -1299,9 +1292,9 @@
|
|||||||
{
|
{
|
||||||
"slug": "scraparr",
|
"slug": "scraparr",
|
||||||
"repo": "thecfu/scraparr",
|
"repo": "thecfu/scraparr",
|
||||||
"version": "v3.0.3",
|
"version": "v3.0.1",
|
||||||
"pinned": false,
|
"pinned": false,
|
||||||
"date": "2026-02-12T14:20:56Z"
|
"date": "2026-02-11T17:42:23Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"slug": "seelf",
|
"slug": "seelf",
|
||||||
@@ -1390,9 +1383,9 @@
|
|||||||
{
|
{
|
||||||
"slug": "stirling-pdf",
|
"slug": "stirling-pdf",
|
||||||
"repo": "Stirling-Tools/Stirling-PDF",
|
"repo": "Stirling-Tools/Stirling-PDF",
|
||||||
"version": "v2.4.6",
|
"version": "v2.4.5",
|
||||||
"pinned": false,
|
"pinned": false,
|
||||||
"date": "2026-02-12T00:01:19Z"
|
"date": "2026-02-06T23:12:20Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"slug": "streamlink-webui",
|
"slug": "streamlink-webui",
|
||||||
@@ -1439,9 +1432,9 @@
|
|||||||
{
|
{
|
||||||
"slug": "termix",
|
"slug": "termix",
|
||||||
"repo": "Termix-SSH/Termix",
|
"repo": "Termix-SSH/Termix",
|
||||||
"version": "release-1.11.1-tag",
|
"version": "release-1.11.0-tag",
|
||||||
"pinned": false,
|
"pinned": false,
|
||||||
"date": "2026-02-13T04:49:16Z"
|
"date": "2026-01-25T02:09:52Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"slug": "the-lounge",
|
"slug": "the-lounge",
|
||||||
@@ -1467,9 +1460,9 @@
|
|||||||
{
|
{
|
||||||
"slug": "tianji",
|
"slug": "tianji",
|
||||||
"repo": "msgbyte/tianji",
|
"repo": "msgbyte/tianji",
|
||||||
"version": "v1.31.12",
|
"version": "v1.31.10",
|
||||||
"pinned": false,
|
"pinned": false,
|
||||||
"date": "2026-02-12T19:06:14Z"
|
"date": "2026-02-04T17:21:04Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"slug": "traccar",
|
"slug": "traccar",
|
||||||
@@ -1558,9 +1551,9 @@
|
|||||||
{
|
{
|
||||||
"slug": "upsnap",
|
"slug": "upsnap",
|
||||||
"repo": "seriousm4x/UpSnap",
|
"repo": "seriousm4x/UpSnap",
|
||||||
"version": "5.2.8",
|
"version": "5.2.7",
|
||||||
"pinned": false,
|
"pinned": false,
|
||||||
"date": "2026-02-13T00:02:37Z"
|
"date": "2026-01-07T23:48:00Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"slug": "uptimekuma",
|
"slug": "uptimekuma",
|
||||||
@@ -1663,9 +1656,9 @@
|
|||||||
{
|
{
|
||||||
"slug": "wikijs",
|
"slug": "wikijs",
|
||||||
"repo": "requarks/wiki",
|
"repo": "requarks/wiki",
|
||||||
"version": "v2.5.312",
|
"version": "v2.5.311",
|
||||||
"pinned": false,
|
"pinned": false,
|
||||||
"date": "2026-02-12T02:45:22Z"
|
"date": "2026-01-08T09:50:00Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"slug": "wishlist",
|
"slug": "wishlist",
|
||||||
|
|||||||
@@ -21,7 +21,7 @@
|
|||||||
"resources": {
|
"resources": {
|
||||||
"cpu": 1,
|
"cpu": 1,
|
||||||
"ram": 512,
|
"ram": 512,
|
||||||
"hdd": 4,
|
"hdd": 2,
|
||||||
"os": "debian",
|
"os": "debian",
|
||||||
"version": "13"
|
"version": "13"
|
||||||
}
|
}
|
||||||
@@ -32,7 +32,7 @@
|
|||||||
"resources": {
|
"resources": {
|
||||||
"cpu": 1,
|
"cpu": 1,
|
||||||
"ram": 256,
|
"ram": 256,
|
||||||
"hdd": 2,
|
"hdd": 1,
|
||||||
"os": "alpine",
|
"os": "alpine",
|
||||||
"version": "3.23"
|
"version": "3.23"
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -12,7 +12,7 @@
|
|||||||
"documentation": "https://radicale.org/master.html#documentation-1",
|
"documentation": "https://radicale.org/master.html#documentation-1",
|
||||||
"website": "https://radicale.org/",
|
"website": "https://radicale.org/",
|
||||||
"logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/radicale.webp",
|
"logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/radicale.webp",
|
||||||
"config_path": "/etc/radicale/config",
|
"config_path": "/etc/radicale/config or ~/.config/radicale/config",
|
||||||
"description": "Radicale is a small but powerful CalDAV (calendars, to-do lists) and CardDAV (contacts)",
|
"description": "Radicale is a small but powerful CalDAV (calendars, to-do lists) and CardDAV (contacts)",
|
||||||
"install_methods": [
|
"install_methods": [
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -32,10 +32,6 @@
|
|||||||
"password": null
|
"password": null
|
||||||
},
|
},
|
||||||
"notes": [
|
"notes": [
|
||||||
{
|
|
||||||
"text": "SQL Server (2025) SQLPAL is incompatible with Proxmox VE 9 (Kernel 6.12+) in LXC containers. Use a VM instead or the SQL-Server 2022 LXC.",
|
|
||||||
"type": "warning"
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
"text": "If you choose not to run the installation setup, execute: `/opt/mssql/bin/mssql-conf setup` in LXC shell.",
|
"text": "If you choose not to run the installation setup, execute: `/opt/mssql/bin/mssql-conf setup` in LXC shell.",
|
||||||
"type": "info"
|
"type": "info"
|
||||||
|
|||||||
@@ -16,8 +16,7 @@ update_os
|
|||||||
msg_info "Installing Dependencies"
|
msg_info "Installing Dependencies"
|
||||||
$STD apt install -y \
|
$STD apt install -y \
|
||||||
python3-pip \
|
python3-pip \
|
||||||
python3-libtorrent \
|
python3-libtorrent
|
||||||
python3-setuptools
|
|
||||||
msg_ok "Installed Dependencies"
|
msg_ok "Installed Dependencies"
|
||||||
|
|
||||||
msg_info "Installing Deluge"
|
msg_info "Installing Deluge"
|
||||||
|
|||||||
@@ -37,7 +37,7 @@ fetch_and_deploy_gh_release "dispatcharr" "Dispatcharr/Dispatcharr" "tarball"
|
|||||||
msg_info "Installing Python Dependencies with uv"
|
msg_info "Installing Python Dependencies with uv"
|
||||||
cd /opt/dispatcharr
|
cd /opt/dispatcharr
|
||||||
$STD uv venv --clear
|
$STD uv venv --clear
|
||||||
$STD uv sync
|
$STD uv pip install -r requirements.txt --index-strategy unsafe-best-match
|
||||||
$STD uv pip install gunicorn gevent celery redis daphne
|
$STD uv pip install gunicorn gevent celery redis daphne
|
||||||
msg_ok "Installed Python Dependencies"
|
msg_ok "Installed Python Dependencies"
|
||||||
|
|
||||||
|
|||||||
@@ -38,18 +38,6 @@ rm -f "$DEB_FILE"
|
|||||||
echo "$LATEST_VERSION" >~/.emqx
|
echo "$LATEST_VERSION" >~/.emqx
|
||||||
msg_ok "Installed EMQX"
|
msg_ok "Installed EMQX"
|
||||||
|
|
||||||
read -r -p "${TAB3}Would you like to disable the EMQX MQ feature? (reduces disk/CPU usage) <y/N> " prompt
|
|
||||||
if [[ ${prompt,,} =~ ^(y|yes)$ ]]; then
|
|
||||||
msg_info "Disabling EMQX MQ feature"
|
|
||||||
mkdir -p /etc/emqx
|
|
||||||
if ! grep -q "^mq.enable" /etc/emqx/emqx.conf 2>/dev/null; then
|
|
||||||
echo "mq.enable = false" >>/etc/emqx/emqx.conf
|
|
||||||
else
|
|
||||||
sed -i 's/^mq.enable.*/mq.enable = false/' /etc/emqx/emqx.conf
|
|
||||||
fi
|
|
||||||
msg_ok "Disabled EMQX MQ feature"
|
|
||||||
fi
|
|
||||||
|
|
||||||
msg_info "Starting EMQX service"
|
msg_info "Starting EMQX service"
|
||||||
$STD systemctl enable -q --now emqx
|
$STD systemctl enable -q --now emqx
|
||||||
msg_ok "Enabled EMQX service"
|
msg_ok "Enabled EMQX service"
|
||||||
|
|||||||
@@ -38,10 +38,6 @@ for server in "${servers[@]}"; do
|
|||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
|
|
||||||
msg_info "Installing dependencies"
|
|
||||||
$STD apt install -y inotify-tools
|
|
||||||
msg_ok "Installed dependencies"
|
|
||||||
|
|
||||||
msg_info "Installing Collabora Online"
|
msg_info "Installing Collabora Online"
|
||||||
curl -fsSL https://collaboraoffice.com/downloads/gpg/collaboraonline-release-keyring.gpg -o /etc/apt/keyrings/collaboraonline-release-keyring.gpg
|
curl -fsSL https://collaboraoffice.com/downloads/gpg/collaboraonline-release-keyring.gpg -o /etc/apt/keyrings/collaboraonline-release-keyring.gpg
|
||||||
cat <<EOF >/etc/apt/sources.list.d/colloboraonline.sources
|
cat <<EOF >/etc/apt/sources.list.d/colloboraonline.sources
|
||||||
@@ -152,15 +148,8 @@ COLLABORATION_JWT_SECRET=
|
|||||||
# FRONTEND_FULL_TEXT_SEARCH_ENABLED=true
|
# FRONTEND_FULL_TEXT_SEARCH_ENABLED=true
|
||||||
# SEARCH_EXTRACTOR_TIKA_TIKA_URL=<your-tika-url>
|
# SEARCH_EXTRACTOR_TIKA_TIKA_URL=<your-tika-url>
|
||||||
|
|
||||||
## Uncomment below to enable PosixFS Collaborative Mode
|
## External storage test - Only NFS v4.2+ is supported
|
||||||
## Increase inotify watch/instance limits on your PVE host:
|
## User files
|
||||||
### sysctl -w fs.inotify.max_user_watches=1048576
|
|
||||||
### sysctl -w fs.inotify.max_user_instances=1024
|
|
||||||
# STORAGE_USERS_POSIX_ENABLE_COLLABORATION=true
|
|
||||||
# STORAGE_USERS_POSIX_WATCH_TYPE=inotifywait
|
|
||||||
# STORAGE_USERS_POSIX_WATCH_FS=true
|
|
||||||
# STORAGE_USERS_POSIX_WATCH_PATH=<path-to-storage-or-bind-mount>
|
|
||||||
## User files location - experimental - use at your own risk! - ZFS, NFS v4.2+ supported - CIFS/SMB not supported
|
|
||||||
# STORAGE_USERS_POSIX_ROOT=<path-to-your-bind_mount>
|
# STORAGE_USERS_POSIX_ROOT=<path-to-your-bind_mount>
|
||||||
EOF
|
EOF
|
||||||
|
|
||||||
|
|||||||
@@ -24,7 +24,7 @@ setup_hwaccel
|
|||||||
PYTHON_VERSION="3.12" setup_uv
|
PYTHON_VERSION="3.12" setup_uv
|
||||||
|
|
||||||
msg_info "Installing Open WebUI"
|
msg_info "Installing Open WebUI"
|
||||||
$STD uv tool install --python 3.12 --constraint <(echo "numba>=0.60") open-webui[all]
|
$STD uv tool install --python 3.12 open-webui[all]
|
||||||
msg_ok "Installed Open WebUI"
|
msg_ok "Installed Open WebUI"
|
||||||
|
|
||||||
read -r -p "${TAB3}Would you like to add Ollama? <y/N> " prompt
|
read -r -p "${TAB3}Would you like to add Ollama? <y/N> " prompt
|
||||||
|
|||||||
@@ -178,7 +178,7 @@ http:
|
|||||||
servers:
|
servers:
|
||||||
- url: "http://$LOCAL_IP:3000"
|
- url: "http://$LOCAL_IP:3000"
|
||||||
EOF
|
EOF
|
||||||
$STD npm run db:push
|
$STD npm run db:sqlite:push
|
||||||
|
|
||||||
. /etc/os-release
|
. /etc/os-release
|
||||||
if [ "$VERSION_CODENAME" = "trixie" ]; then
|
if [ "$VERSION_CODENAME" = "trixie" ]; then
|
||||||
|
|||||||
@@ -14,51 +14,42 @@ network_check
|
|||||||
update_os
|
update_os
|
||||||
|
|
||||||
msg_info "Installing Dependencies"
|
msg_info "Installing Dependencies"
|
||||||
$STD apt install -y apache2-utils
|
$STD apt install -y \
|
||||||
|
apache2-utils \
|
||||||
|
python3-pip \
|
||||||
|
python3-venv
|
||||||
msg_ok "Installed Dependencies"
|
msg_ok "Installed Dependencies"
|
||||||
|
|
||||||
PYTHON_VERSION="3.13" setup_uv
|
|
||||||
fetch_and_deploy_gh_release "Radicale" "Kozea/Radicale" "tarball" "latest" "/opt/radicale"
|
|
||||||
|
|
||||||
msg_info "Setting up Radicale"
|
msg_info "Setting up Radicale"
|
||||||
cd /opt/radicale
|
python3 -m venv /opt/radicale
|
||||||
|
source /opt/radicale/bin/activate
|
||||||
|
$STD python3 -m pip install --upgrade https://github.com/Kozea/Radicale/archive/master.tar.gz
|
||||||
RNDPASS=$(openssl rand -base64 18 | tr -dc 'a-zA-Z0-9' | head -c13)
|
RNDPASS=$(openssl rand -base64 18 | tr -dc 'a-zA-Z0-9' | head -c13)
|
||||||
$STD htpasswd -c -b -5 /opt/radicale/users admin "$RNDPASS"
|
$STD htpasswd -c -b -5 /opt/radicale/users admin $RNDPASS
|
||||||
{
|
{
|
||||||
echo "Radicale Credentials"
|
echo "Radicale Credentials"
|
||||||
echo "Admin User: admin"
|
echo "Admin User: admin"
|
||||||
echo "Admin Password: $RNDPASS"
|
echo "Admin Password: $RNDPASS"
|
||||||
} >>~/radicale.creds
|
} >>~/radicale.creds
|
||||||
|
msg_ok "Done setting up Radicale"
|
||||||
|
|
||||||
mkdir -p /etc/radicale
|
msg_info "Setup Service"
|
||||||
cat <<EOF >/etc/radicale/config
|
|
||||||
[server]
|
|
||||||
hosts = 0.0.0.0:5232
|
|
||||||
|
|
||||||
[auth]
|
cat <<EOF >/opt/radicale/start.sh
|
||||||
type = htpasswd
|
#!/usr/bin/env bash
|
||||||
htpasswd_filename = /opt/radicale/users
|
source /opt/radicale/bin/activate
|
||||||
htpasswd_encryption = sha512
|
python3 -m radicale --storage-filesystem-folder=/var/lib/radicale/collections --hosts 0.0.0.0:5232 --auth-type htpasswd --auth-htpasswd-filename /opt/radicale/users --auth-htpasswd-encryption sha512
|
||||||
|
|
||||||
[storage]
|
|
||||||
type = multifilesystem
|
|
||||||
filesystem_folder = /var/lib/radicale/collections
|
|
||||||
|
|
||||||
[web]
|
|
||||||
type = internal
|
|
||||||
EOF
|
EOF
|
||||||
msg_ok "Set up Radicale"
|
|
||||||
|
|
||||||
msg_info "Creating Service"
|
chmod +x /opt/radicale/start.sh
|
||||||
|
|
||||||
cat <<EOF >/etc/systemd/system/radicale.service
|
cat <<EOF >/etc/systemd/system/radicale.service
|
||||||
[Unit]
|
|
||||||
Description=A simple CalDAV (calendar) and CardDAV (contact) server
|
Description=A simple CalDAV (calendar) and CardDAV (contact) server
|
||||||
After=network.target
|
After=network.target
|
||||||
Requires=network.target
|
Requires=network.target
|
||||||
|
|
||||||
[Service]
|
[Service]
|
||||||
WorkingDirectory=/opt/radicale
|
ExecStart=/opt/radicale/start.sh
|
||||||
ExecStart=/usr/local/bin/uv run -m radicale --config /etc/radicale/config
|
|
||||||
Restart=on-failure
|
Restart=on-failure
|
||||||
# User=radicale
|
# User=radicale
|
||||||
# Deny other users access to the calendar data
|
# Deny other users access to the calendar data
|
||||||
|
|||||||
@@ -15,18 +15,16 @@ update_os
|
|||||||
|
|
||||||
msg_info "Installing Dependencies"
|
msg_info "Installing Dependencies"
|
||||||
$STD apt install -y apt-transport-https
|
$STD apt install -y apt-transport-https
|
||||||
curl -fsSL "https://dl.ui.com/unifi/unifi-repo.gpg" -o "/usr/share/keyrings/unifi-repo.gpg"
|
|
||||||
cat <<EOF | sudo tee /etc/apt/sources.list.d/100-ubnt-unifi.sources >/dev/null
|
|
||||||
Types: deb
|
|
||||||
URIs: https://www.ui.com/downloads/unifi/debian
|
|
||||||
Suites: stable
|
|
||||||
Components: ubiquiti
|
|
||||||
Architectures: amd64
|
|
||||||
Signed-By: /usr/share/keyrings/unifi-repo.gpg
|
|
||||||
EOF
|
|
||||||
$STD apt update
|
|
||||||
msg_ok "Installed Dependencies"
|
msg_ok "Installed Dependencies"
|
||||||
|
|
||||||
|
setup_deb822_repo \
|
||||||
|
"unifi" \
|
||||||
|
"https://dl.ui.com/unifi/unifi-repo.gpg" \
|
||||||
|
"https://www.ui.com/downloads/unifi/debian" \
|
||||||
|
"stable" \
|
||||||
|
"ubiquiti" \
|
||||||
|
"amd64"
|
||||||
|
|
||||||
JAVA_VERSION="21" setup_java
|
JAVA_VERSION="21" setup_java
|
||||||
|
|
||||||
if lscpu | grep -q 'avx'; then
|
if lscpu | grep -q 'avx'; then
|
||||||
|
|||||||
1094
misc/api.func
1094
misc/api.func
File diff suppressed because it is too large
Load Diff
@@ -3636,9 +3636,6 @@ $PCT_OPTIONS_STRING"
|
|||||||
exit 214
|
exit 214
|
||||||
fi
|
fi
|
||||||
msg_ok "Storage space validated"
|
msg_ok "Storage space validated"
|
||||||
|
|
||||||
# Report installation start to API (early - captures failed installs too)
|
|
||||||
post_to_api
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
create_lxc_container || exit $?
|
create_lxc_container || exit $?
|
||||||
@@ -4013,9 +4010,6 @@ EOF'
|
|||||||
# Install SSH keys
|
# Install SSH keys
|
||||||
install_ssh_keys_into_ct
|
install_ssh_keys_into_ct
|
||||||
|
|
||||||
# Start timer for duration tracking
|
|
||||||
start_install_timer
|
|
||||||
|
|
||||||
# Run application installer
|
# Run application installer
|
||||||
# Disable error trap - container errors are handled internally via flag file
|
# Disable error trap - container errors are handled internally via flag file
|
||||||
set +Eeuo pipefail # Disable ALL error handling temporarily
|
set +Eeuo pipefail # Disable ALL error handling temporarily
|
||||||
@@ -4046,10 +4040,9 @@ EOF'
|
|||||||
if [[ $install_exit_code -ne 0 ]]; then
|
if [[ $install_exit_code -ne 0 ]]; then
|
||||||
msg_error "Installation failed in container ${CTID} (exit code: ${install_exit_code})"
|
msg_error "Installation failed in container ${CTID} (exit code: ${install_exit_code})"
|
||||||
|
|
||||||
# Copy install log from container BEFORE API call so get_error_text() can read it
|
# Copy both logs from container before potential deletion
|
||||||
local build_log_copied=false
|
local build_log_copied=false
|
||||||
local install_log_copied=false
|
local install_log_copied=false
|
||||||
local host_install_log="/tmp/install-lxc-${CTID}-${SESSION_ID}.log"
|
|
||||||
|
|
||||||
if [[ -n "$CTID" && -n "${SESSION_ID:-}" ]]; then
|
if [[ -n "$CTID" && -n "${SESSION_ID:-}" ]]; then
|
||||||
# Copy BUILD_LOG (creation log) if it exists
|
# Copy BUILD_LOG (creation log) if it exists
|
||||||
@@ -4057,22 +4050,15 @@ EOF'
|
|||||||
cp "${BUILD_LOG}" "/tmp/create-lxc-${CTID}-${SESSION_ID}.log" 2>/dev/null && build_log_copied=true
|
cp "${BUILD_LOG}" "/tmp/create-lxc-${CTID}-${SESSION_ID}.log" 2>/dev/null && build_log_copied=true
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Copy INSTALL_LOG from container to host
|
# Copy INSTALL_LOG from container
|
||||||
if pct pull "$CTID" "/root/.install-${SESSION_ID}.log" "$host_install_log" 2>/dev/null; then
|
if pct pull "$CTID" "/root/.install-${SESSION_ID}.log" "/tmp/install-lxc-${CTID}-${SESSION_ID}.log" 2>/dev/null; then
|
||||||
install_log_copied=true
|
install_log_copied=true
|
||||||
# Point INSTALL_LOG to host copy so get_error_text() finds it
|
|
||||||
INSTALL_LOG="$host_install_log"
|
|
||||||
fi
|
fi
|
||||||
fi
|
|
||||||
|
|
||||||
# Report failure to telemetry API (now with log available on host)
|
# Show available logs
|
||||||
post_update_to_api "failed" "$install_exit_code"
|
|
||||||
|
|
||||||
# Show available logs
|
|
||||||
if [[ -n "$CTID" && -n "${SESSION_ID:-}" ]]; then
|
|
||||||
echo ""
|
echo ""
|
||||||
[[ "$build_log_copied" == true ]] && echo -e "${GN}✔${CL} Container creation log: ${BL}/tmp/create-lxc-${CTID}-${SESSION_ID}.log${CL}"
|
[[ "$build_log_copied" == true ]] && echo -e "${GN}✔${CL} Container creation log: ${BL}/tmp/create-lxc-${CTID}-${SESSION_ID}.log${CL}"
|
||||||
[[ "$install_log_copied" == true ]] && echo -e "${GN}✔${CL} Installation log: ${BL}${host_install_log}${CL}"
|
[[ "$install_log_copied" == true ]] && echo -e "${GN}✔${CL} Installation log: ${BL}/tmp/install-lxc-${CTID}-${SESSION_ID}.log${CL}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Dev mode: Keep container or open breakpoint shell
|
# Dev mode: Keep container or open breakpoint shell
|
||||||
@@ -4130,10 +4116,6 @@ EOF'
|
|||||||
echo -e "${BFR}${CM}${GN}Container ${CTID} removed${CL}"
|
echo -e "${BFR}${CM}${GN}Container ${CTID} removed${CL}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Force one final status update attempt after cleanup
|
|
||||||
# This ensures status is updated even if the first attempt failed (e.g., HTTP 400)
|
|
||||||
post_update_to_api "failed" "$install_exit_code" "force"
|
|
||||||
|
|
||||||
exit $install_exit_code
|
exit $install_exit_code
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
@@ -5141,9 +5123,9 @@ EOF
|
|||||||
# api_exit_script()
|
# api_exit_script()
|
||||||
#
|
#
|
||||||
# - Exit trap handler for reporting to API telemetry
|
# - Exit trap handler for reporting to API telemetry
|
||||||
# - Captures exit code and reports to PocketBase using centralized error descriptions
|
# - Captures exit code and reports to API using centralized error descriptions
|
||||||
# - Uses explain_exit_code() from api.func for consistent error messages
|
# - Uses explain_exit_code() from error_handler.func for consistent error messages
|
||||||
# - Posts failure status with exit code to API (error description resolved automatically)
|
# - Posts failure status with exit code to API (error description added automatically)
|
||||||
# - Only executes on non-zero exit codes
|
# - Only executes on non-zero exit codes
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
api_exit_script() {
|
api_exit_script() {
|
||||||
@@ -5156,6 +5138,6 @@ api_exit_script() {
|
|||||||
if command -v pveversion >/dev/null 2>&1; then
|
if command -v pveversion >/dev/null 2>&1; then
|
||||||
trap 'api_exit_script' EXIT
|
trap 'api_exit_script' EXIT
|
||||||
fi
|
fi
|
||||||
trap 'post_update_to_api "failed" "$?"' ERR
|
trap 'post_update_to_api "failed" "$BASH_COMMAND"' ERR
|
||||||
trap 'post_update_to_api "failed" "130"' SIGINT
|
trap 'post_update_to_api "failed" "INTERRUPTED"' SIGINT
|
||||||
trap 'post_update_to_api "failed" "143"' SIGTERM
|
trap 'post_update_to_api "failed" "TERMINATED"' SIGTERM
|
||||||
|
|||||||
@@ -27,90 +27,100 @@
|
|||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# explain_exit_code()
|
# explain_exit_code()
|
||||||
#
|
#
|
||||||
# - Canonical version is defined in api.func (sourced before this file)
|
# - Maps numeric exit codes to human-readable error descriptions
|
||||||
# - This section only provides a fallback if api.func was not loaded
|
# - Supports:
|
||||||
# - See api.func SECTION 1 for the authoritative exit code mappings
|
# * Generic/Shell errors (1, 2, 126, 127, 128, 130, 137, 139, 143)
|
||||||
|
# * Package manager errors (APT, DPKG: 100, 101, 255)
|
||||||
|
# * Node.js/npm errors (243-249, 254)
|
||||||
|
# * Python/pip/uv errors (210-212)
|
||||||
|
# * PostgreSQL errors (231-234)
|
||||||
|
# * MySQL/MariaDB errors (241-244)
|
||||||
|
# * MongoDB errors (251-254)
|
||||||
|
# * Proxmox custom codes (200-231)
|
||||||
|
# - Returns description string for given exit code
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
if ! declare -f explain_exit_code &>/dev/null; then
|
explain_exit_code() {
|
||||||
explain_exit_code() {
|
local code="$1"
|
||||||
local code="$1"
|
case "$code" in
|
||||||
case "$code" in
|
# --- Generic / Shell ---
|
||||||
1) echo "General error / Operation not permitted" ;;
|
1) echo "General error / Operation not permitted" ;;
|
||||||
2) echo "Misuse of shell builtins (e.g. syntax error)" ;;
|
2) echo "Misuse of shell builtins (e.g. syntax error)" ;;
|
||||||
6) echo "curl: DNS resolution failed (could not resolve host)" ;;
|
126) echo "Command invoked cannot execute (permission problem?)" ;;
|
||||||
7) echo "curl: Failed to connect (network unreachable / host down)" ;;
|
127) echo "Command not found" ;;
|
||||||
22) echo "curl: HTTP error returned (404, 429, 500+)" ;;
|
128) echo "Invalid argument to exit" ;;
|
||||||
28) echo "curl: Operation timeout (network slow or server not responding)" ;;
|
130) echo "Terminated by Ctrl+C (SIGINT)" ;;
|
||||||
35) echo "curl: SSL/TLS handshake failed (certificate error)" ;;
|
137) echo "Killed (SIGKILL / Out of memory?)" ;;
|
||||||
100) echo "APT: Package manager error (broken packages / dependency problems)" ;;
|
139) echo "Segmentation fault (core dumped)" ;;
|
||||||
101) echo "APT: Configuration error (bad sources.list, malformed config)" ;;
|
143) echo "Terminated (SIGTERM)" ;;
|
||||||
102) echo "APT: Lock held by another process (dpkg/apt still running)" ;;
|
|
||||||
124) echo "Command timed out (timeout command)" ;;
|
# --- Package manager / APT / DPKG ---
|
||||||
126) echo "Command invoked cannot execute (permission problem?)" ;;
|
100) echo "APT: Package manager error (broken packages / dependency problems)" ;;
|
||||||
127) echo "Command not found" ;;
|
101) echo "APT: Configuration error (bad sources.list, malformed config)" ;;
|
||||||
128) echo "Invalid argument to exit" ;;
|
255) echo "DPKG: Fatal internal error" ;;
|
||||||
130) echo "Terminated by Ctrl+C (SIGINT)" ;;
|
|
||||||
134) echo "Process aborted (SIGABRT - possibly Node.js heap overflow)" ;;
|
# --- Node.js / npm / pnpm / yarn ---
|
||||||
137) echo "Killed (SIGKILL / Out of memory?)" ;;
|
243) echo "Node.js: Out of memory (JavaScript heap out of memory)" ;;
|
||||||
139) echo "Segmentation fault (core dumped)" ;;
|
245) echo "Node.js: Invalid command-line option" ;;
|
||||||
141) echo "Broken pipe (SIGPIPE - output closed prematurely)" ;;
|
246) echo "Node.js: Internal JavaScript Parse Error" ;;
|
||||||
143) echo "Terminated (SIGTERM)" ;;
|
247) echo "Node.js: Fatal internal error" ;;
|
||||||
150) echo "Systemd: Service failed to start" ;;
|
248) echo "Node.js: Invalid C++ addon / N-API failure" ;;
|
||||||
151) echo "Systemd: Service unit not found" ;;
|
249) echo "Node.js: Inspector error" ;;
|
||||||
152) echo "Permission denied (EACCES)" ;;
|
254) echo "npm/pnpm/yarn: Unknown fatal error" ;;
|
||||||
153) echo "Build/compile failed (make/gcc/cmake)" ;;
|
|
||||||
154) echo "Node.js: Native addon build failed (node-gyp)" ;;
|
# --- Python / pip / uv ---
|
||||||
160) echo "Python: Virtualenv / uv environment missing or broken" ;;
|
210) echo "Python: Virtualenv / uv environment missing or broken" ;;
|
||||||
161) echo "Python: Dependency resolution failed" ;;
|
211) echo "Python: Dependency resolution failed" ;;
|
||||||
162) echo "Python: Installation aborted (permissions or EXTERNALLY-MANAGED)" ;;
|
212) echo "Python: Installation aborted (permissions or EXTERNALLY-MANAGED)" ;;
|
||||||
170) echo "PostgreSQL: Connection failed (server not running / wrong socket)" ;;
|
|
||||||
171) echo "PostgreSQL: Authentication failed (bad user/password)" ;;
|
# --- PostgreSQL ---
|
||||||
172) echo "PostgreSQL: Database does not exist" ;;
|
231) echo "PostgreSQL: Connection failed (server not running / wrong socket)" ;;
|
||||||
173) echo "PostgreSQL: Fatal error in query / syntax" ;;
|
232) echo "PostgreSQL: Authentication failed (bad user/password)" ;;
|
||||||
180) echo "MySQL/MariaDB: Connection failed (server not running / wrong socket)" ;;
|
233) echo "PostgreSQL: Database does not exist" ;;
|
||||||
181) echo "MySQL/MariaDB: Authentication failed (bad user/password)" ;;
|
234) echo "PostgreSQL: Fatal error in query / syntax" ;;
|
||||||
182) echo "MySQL/MariaDB: Database does not exist" ;;
|
|
||||||
183) echo "MySQL/MariaDB: Fatal error in query / syntax" ;;
|
# --- MySQL / MariaDB ---
|
||||||
190) echo "MongoDB: Connection failed (server not running)" ;;
|
241) echo "MySQL/MariaDB: Connection failed (server not running / wrong socket)" ;;
|
||||||
191) echo "MongoDB: Authentication failed (bad user/password)" ;;
|
242) echo "MySQL/MariaDB: Authentication failed (bad user/password)" ;;
|
||||||
192) echo "MongoDB: Database not found" ;;
|
243) echo "MySQL/MariaDB: Database does not exist" ;;
|
||||||
193) echo "MongoDB: Fatal query error" ;;
|
244) echo "MySQL/MariaDB: Fatal error in query / syntax" ;;
|
||||||
200) echo "Proxmox: Failed to create lock file" ;;
|
|
||||||
203) echo "Proxmox: Missing CTID variable" ;;
|
# --- MongoDB ---
|
||||||
204) echo "Proxmox: Missing PCT_OSTYPE variable" ;;
|
251) echo "MongoDB: Connection failed (server not running)" ;;
|
||||||
205) echo "Proxmox: Invalid CTID (<100)" ;;
|
252) echo "MongoDB: Authentication failed (bad user/password)" ;;
|
||||||
206) echo "Proxmox: CTID already in use" ;;
|
253) echo "MongoDB: Database not found" ;;
|
||||||
207) echo "Proxmox: Password contains unescaped special characters" ;;
|
254) echo "MongoDB: Fatal query error" ;;
|
||||||
208) echo "Proxmox: Invalid configuration (DNS/MAC/Network format)" ;;
|
|
||||||
209) echo "Proxmox: Container creation failed" ;;
|
# --- Proxmox Custom Codes ---
|
||||||
210) echo "Proxmox: Cluster not quorate" ;;
|
200) echo "Proxmox: Failed to create lock file" ;;
|
||||||
211) echo "Proxmox: Timeout waiting for template lock" ;;
|
203) echo "Proxmox: Missing CTID variable" ;;
|
||||||
212) echo "Proxmox: Storage type 'iscsidirect' does not support containers (VMs only)" ;;
|
204) echo "Proxmox: Missing PCT_OSTYPE variable" ;;
|
||||||
213) echo "Proxmox: Storage type does not support 'rootdir' content" ;;
|
205) echo "Proxmox: Invalid CTID (<100)" ;;
|
||||||
214) echo "Proxmox: Not enough storage space" ;;
|
206) echo "Proxmox: CTID already in use" ;;
|
||||||
215) echo "Proxmox: Container created but not listed (ghost state)" ;;
|
207) echo "Proxmox: Password contains unescaped special characters" ;;
|
||||||
216) echo "Proxmox: RootFS entry missing in config" ;;
|
208) echo "Proxmox: Invalid configuration (DNS/MAC/Network format)" ;;
|
||||||
217) echo "Proxmox: Storage not accessible" ;;
|
209) echo "Proxmox: Container creation failed" ;;
|
||||||
218) echo "Proxmox: Template file corrupted or incomplete" ;;
|
210) echo "Proxmox: Cluster not quorate" ;;
|
||||||
219) echo "Proxmox: CephFS does not support containers - use RBD" ;;
|
211) echo "Proxmox: Timeout waiting for template lock" ;;
|
||||||
220) echo "Proxmox: Unable to resolve template path" ;;
|
212) echo "Proxmox: Storage type 'iscsidirect' does not support containers (VMs only)" ;;
|
||||||
221) echo "Proxmox: Template file not readable" ;;
|
213) echo "Proxmox: Storage type does not support 'rootdir' content" ;;
|
||||||
222) echo "Proxmox: Template download failed" ;;
|
214) echo "Proxmox: Not enough storage space" ;;
|
||||||
223) echo "Proxmox: Template not available after download" ;;
|
215) echo "Proxmox: Container created but not listed (ghost state)" ;;
|
||||||
224) echo "Proxmox: PBS storage is for backups only" ;;
|
216) echo "Proxmox: RootFS entry missing in config" ;;
|
||||||
225) echo "Proxmox: No template available for OS/Version" ;;
|
217) echo "Proxmox: Storage not accessible" ;;
|
||||||
231) echo "Proxmox: LXC stack upgrade failed" ;;
|
219) echo "Proxmox: CephFS does not support containers - use RBD" ;;
|
||||||
243) echo "Node.js: Out of memory (JavaScript heap out of memory)" ;;
|
224) echo "Proxmox: PBS storage is for backups only" ;;
|
||||||
245) echo "Node.js: Invalid command-line option" ;;
|
218) echo "Proxmox: Template file corrupted or incomplete" ;;
|
||||||
246) echo "Node.js: Internal JavaScript Parse Error" ;;
|
220) echo "Proxmox: Unable to resolve template path" ;;
|
||||||
247) echo "Node.js: Fatal internal error" ;;
|
221) echo "Proxmox: Template file not readable" ;;
|
||||||
248) echo "Node.js: Invalid C++ addon / N-API failure" ;;
|
222) echo "Proxmox: Template download failed" ;;
|
||||||
249) echo "npm/pnpm/yarn: Unknown fatal error" ;;
|
223) echo "Proxmox: Template not available after download" ;;
|
||||||
255) echo "DPKG: Fatal internal error" ;;
|
225) echo "Proxmox: No template available for OS/Version" ;;
|
||||||
*) echo "Unknown error" ;;
|
231) echo "Proxmox: LXC stack upgrade failed" ;;
|
||||||
esac
|
|
||||||
}
|
# --- Default ---
|
||||||
fi
|
*) echo "Unknown error" ;;
|
||||||
|
esac
|
||||||
|
}
|
||||||
|
|
||||||
# ==============================================================================
|
# ==============================================================================
|
||||||
# SECTION 2: ERROR HANDLERS
|
# SECTION 2: ERROR HANDLERS
|
||||||
@@ -187,7 +197,12 @@ error_handler() {
|
|||||||
|
|
||||||
# Create error flag file with exit code for host detection
|
# Create error flag file with exit code for host detection
|
||||||
echo "$exit_code" >"/root/.install-${SESSION_ID:-error}.failed" 2>/dev/null || true
|
echo "$exit_code" >"/root/.install-${SESSION_ID:-error}.failed" 2>/dev/null || true
|
||||||
# Log path is shown by host as combined log - no need to show container path
|
|
||||||
|
if declare -f msg_custom >/dev/null 2>&1; then
|
||||||
|
msg_custom "📋" "${YW}" "Log saved to: ${container_log}"
|
||||||
|
else
|
||||||
|
echo -e "${YW}Log saved to:${CL} ${BL}${container_log}${CL}"
|
||||||
|
fi
|
||||||
else
|
else
|
||||||
# HOST CONTEXT: Show local log path and offer container cleanup
|
# HOST CONTEXT: Show local log path and offer container cleanup
|
||||||
if declare -f msg_custom >/dev/null 2>&1; then
|
if declare -f msg_custom >/dev/null 2>&1; then
|
||||||
@@ -198,11 +213,6 @@ error_handler() {
|
|||||||
|
|
||||||
# Offer to remove container if it exists (build errors after container creation)
|
# Offer to remove container if it exists (build errors after container creation)
|
||||||
if [[ -n "${CTID:-}" ]] && command -v pct &>/dev/null && pct status "$CTID" &>/dev/null; then
|
if [[ -n "${CTID:-}" ]] && command -v pct &>/dev/null && pct status "$CTID" &>/dev/null; then
|
||||||
# Report failure to API before container cleanup
|
|
||||||
if declare -f post_update_to_api &>/dev/null; then
|
|
||||||
post_update_to_api "failed" "$exit_code"
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo ""
|
echo ""
|
||||||
echo -en "${YW}Remove broken container ${CTID}? (Y/n) [auto-remove in 60s]: ${CL}"
|
echo -en "${YW}Remove broken container ${CTID}? (Y/n) [auto-remove in 60s]: ${CL}"
|
||||||
|
|
||||||
@@ -222,12 +232,6 @@ error_handler() {
|
|||||||
pct destroy "$CTID" &>/dev/null || true
|
pct destroy "$CTID" &>/dev/null || true
|
||||||
echo -e "${GN}✔${CL} Container ${CTID} removed"
|
echo -e "${GN}✔${CL} Container ${CTID} removed"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Force one final status update attempt after cleanup
|
|
||||||
# This ensures status is updated even if the first attempt failed (e.g., HTTP 400)
|
|
||||||
if declare -f post_update_to_api &>/dev/null; then
|
|
||||||
post_update_to_api "failed" "$exit_code" "force"
|
|
||||||
fi
|
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
@@ -249,18 +253,6 @@ error_handler() {
|
|||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
on_exit() {
|
on_exit() {
|
||||||
local exit_code=$?
|
local exit_code=$?
|
||||||
# Report orphaned "installing" records to telemetry API
|
|
||||||
# Catches ALL exit paths: errors (non-zero), signals, AND clean exits where
|
|
||||||
# post_to_api was called ("installing" sent) but post_update_to_api was never called
|
|
||||||
if [[ "${POST_TO_API_DONE:-}" == "true" && "${POST_UPDATE_DONE:-}" != "true" ]]; then
|
|
||||||
if declare -f post_update_to_api >/dev/null 2>&1; then
|
|
||||||
if [[ $exit_code -ne 0 ]]; then
|
|
||||||
post_update_to_api "failed" "$exit_code"
|
|
||||||
else
|
|
||||||
post_update_to_api "failed" "1"
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
[[ -n "${lockfile:-}" && -e "$lockfile" ]] && rm -f "$lockfile"
|
[[ -n "${lockfile:-}" && -e "$lockfile" ]] && rm -f "$lockfile"
|
||||||
exit "$exit_code"
|
exit "$exit_code"
|
||||||
}
|
}
|
||||||
@@ -273,10 +265,6 @@ on_exit() {
|
|||||||
# - Exits with code 130 (128 + SIGINT=2)
|
# - Exits with code 130 (128 + SIGINT=2)
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
on_interrupt() {
|
on_interrupt() {
|
||||||
# Report interruption to telemetry API (prevents stuck "installing" records)
|
|
||||||
if declare -f post_update_to_api >/dev/null 2>&1; then
|
|
||||||
post_update_to_api "failed" "130"
|
|
||||||
fi
|
|
||||||
if declare -f msg_error >/dev/null 2>&1; then
|
if declare -f msg_error >/dev/null 2>&1; then
|
||||||
msg_error "Interrupted by user (SIGINT)"
|
msg_error "Interrupted by user (SIGINT)"
|
||||||
else
|
else
|
||||||
@@ -294,10 +282,6 @@ on_interrupt() {
|
|||||||
# - Triggered by external process termination
|
# - Triggered by external process termination
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
on_terminate() {
|
on_terminate() {
|
||||||
# Report termination to telemetry API (prevents stuck "installing" records)
|
|
||||||
if declare -f post_update_to_api >/dev/null 2>&1; then
|
|
||||||
post_update_to_api "failed" "143"
|
|
||||||
fi
|
|
||||||
if declare -f msg_error >/dev/null 2>&1; then
|
if declare -f msg_error >/dev/null 2>&1; then
|
||||||
msg_error "Terminated by signal (SIGTERM)"
|
msg_error "Terminated by signal (SIGTERM)"
|
||||||
else
|
else
|
||||||
|
|||||||
@@ -465,7 +465,6 @@ manage_tool_repository() {
|
|||||||
msg_error "Failed to download MongoDB GPG key"
|
msg_error "Failed to download MongoDB GPG key"
|
||||||
return 1
|
return 1
|
||||||
fi
|
fi
|
||||||
chmod 644 "/etc/apt/keyrings/mongodb-server-${version}.gpg"
|
|
||||||
|
|
||||||
# Setup repository
|
# Setup repository
|
||||||
local distro_codename
|
local distro_codename
|
||||||
@@ -1295,33 +1294,12 @@ setup_deb822_repo() {
|
|||||||
return 1
|
return 1
|
||||||
}
|
}
|
||||||
|
|
||||||
# Import GPG key (auto-detect binary vs ASCII-armored format)
|
# Import GPG
|
||||||
local tmp_gpg
|
curl -fsSL "$gpg_url" | gpg --dearmor --yes -o "/etc/apt/keyrings/${name}.gpg" || {
|
||||||
tmp_gpg=$(mktemp) || return 1
|
msg_error "Failed to import GPG key for ${name}"
|
||||||
curl -fsSL "$gpg_url" -o "$tmp_gpg" || {
|
|
||||||
msg_error "Failed to download GPG key for ${name}"
|
|
||||||
rm -f "$tmp_gpg"
|
|
||||||
return 1
|
return 1
|
||||||
}
|
}
|
||||||
|
|
||||||
if grep -q "BEGIN PGP" "$tmp_gpg" 2>/dev/null; then
|
|
||||||
# ASCII-armored — dearmor to binary
|
|
||||||
gpg --dearmor --yes -o "/etc/apt/keyrings/${name}.gpg" < "$tmp_gpg" || {
|
|
||||||
msg_error "Failed to dearmor GPG key for ${name}"
|
|
||||||
rm -f "$tmp_gpg"
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
else
|
|
||||||
# Already in binary GPG format — copy directly
|
|
||||||
cp "$tmp_gpg" "/etc/apt/keyrings/${name}.gpg" || {
|
|
||||||
msg_error "Failed to install GPG key for ${name}"
|
|
||||||
rm -f "$tmp_gpg"
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
fi
|
|
||||||
rm -f "$tmp_gpg"
|
|
||||||
chmod 644 "/etc/apt/keyrings/${name}.gpg"
|
|
||||||
|
|
||||||
# Write deb822
|
# Write deb822
|
||||||
{
|
{
|
||||||
echo "Types: deb"
|
echo "Types: deb"
|
||||||
|
|||||||
@@ -75,37 +75,14 @@ pct exec "$CTID" -- bash -c '
|
|||||||
set -e
|
set -e
|
||||||
export DEBIAN_FRONTEND=noninteractive
|
export DEBIAN_FRONTEND=noninteractive
|
||||||
|
|
||||||
# Source os-release properly (handles quoted values)
|
ID=$(grep "^ID=" /etc/os-release | cut -d"=" -f2)
|
||||||
source /etc/os-release
|
VER=$(grep "^VERSION_CODENAME=" /etc/os-release | cut -d"=" -f2)
|
||||||
|
|
||||||
# Fallback if DNS is poisoned or blocked
|
# fallback if DNS is poisoned or blocked
|
||||||
ORIG_RESOLV="/etc/resolv.conf"
|
ORIG_RESOLV="/etc/resolv.conf"
|
||||||
BACKUP_RESOLV="/tmp/resolv.conf.backup"
|
BACKUP_RESOLV="/tmp/resolv.conf.backup"
|
||||||
|
|
||||||
# Check DNS resolution using multiple methods (dig may not be installed)
|
if ! dig +short pkgs.tailscale.com | grep -qvE "^127\.|^0\.0\.0\.0$"; then
|
||||||
dns_check_failed=true
|
|
||||||
if command -v dig &>/dev/null; then
|
|
||||||
if dig +short pkgs.tailscale.com 2>/dev/null | grep -qvE "^127\.|^0\.0\.0\.0$|^$"; then
|
|
||||||
dns_check_failed=false
|
|
||||||
fi
|
|
||||||
elif command -v host &>/dev/null; then
|
|
||||||
if host pkgs.tailscale.com 2>/dev/null | grep -q "has address"; then
|
|
||||||
dns_check_failed=false
|
|
||||||
fi
|
|
||||||
elif command -v nslookup &>/dev/null; then
|
|
||||||
if nslookup pkgs.tailscale.com 2>/dev/null | grep -q "Address:"; then
|
|
||||||
dns_check_failed=false
|
|
||||||
fi
|
|
||||||
elif command -v getent &>/dev/null; then
|
|
||||||
if getent hosts pkgs.tailscale.com &>/dev/null; then
|
|
||||||
dns_check_failed=false
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
# No DNS tools available, try curl directly and assume DNS works
|
|
||||||
dns_check_failed=false
|
|
||||||
fi
|
|
||||||
|
|
||||||
if $dns_check_failed; then
|
|
||||||
echo "[INFO] DNS resolution for pkgs.tailscale.com failed (blocked or redirected)."
|
echo "[INFO] DNS resolution for pkgs.tailscale.com failed (blocked or redirected)."
|
||||||
echo "[INFO] Temporarily overriding /etc/resolv.conf with Cloudflare DNS (1.1.1.1)"
|
echo "[INFO] Temporarily overriding /etc/resolv.conf with Cloudflare DNS (1.1.1.1)"
|
||||||
cp "$ORIG_RESOLV" "$BACKUP_RESOLV"
|
cp "$ORIG_RESOLV" "$BACKUP_RESOLV"
|
||||||
@@ -115,22 +92,17 @@ fi
|
|||||||
if ! command -v curl &>/dev/null; then
|
if ! command -v curl &>/dev/null; then
|
||||||
echo "[INFO] curl not found, installing..."
|
echo "[INFO] curl not found, installing..."
|
||||||
apt-get update -qq
|
apt-get update -qq
|
||||||
apt update -qq
|
apt-get install -y curl >/dev/null
|
||||||
apt install -y curl >/dev/null
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Ensure keyrings directory exists
|
curl -fsSL https://pkgs.tailscale.com/stable/${ID}/${VER}.noarmor.gpg \
|
||||||
mkdir -p /usr/share/keyrings
|
|
||||||
|
|
||||||
curl -fsSL "https://pkgs.tailscale.com/stable/${ID}/${VERSION_CODENAME}.noarmor.gpg" \
|
|
||||||
| tee /usr/share/keyrings/tailscale-archive-keyring.gpg >/dev/null
|
| tee /usr/share/keyrings/tailscale-archive-keyring.gpg >/dev/null
|
||||||
|
|
||||||
echo "deb [signed-by=/usr/share/keyrings/tailscale-archive-keyring.gpg] https://pkgs.tailscale.com/stable/${ID} ${VERSION_CODENAME} main" \
|
echo "deb [signed-by=/usr/share/keyrings/tailscale-archive-keyring.gpg] https://pkgs.tailscale.com/stable/${ID} ${VER} main" \
|
||||||
>/etc/apt/sources.list.d/tailscale.list
|
>/etc/apt/sources.list.d/tailscale.list
|
||||||
|
|
||||||
apt-get update -qq
|
apt-get update -qq
|
||||||
apt update -qq
|
apt-get install -y tailscale >/dev/null
|
||||||
apt install -y tailscale >/dev/null
|
|
||||||
|
|
||||||
if [[ -f /tmp/resolv.conf.backup ]]; then
|
if [[ -f /tmp/resolv.conf.backup ]]; then
|
||||||
echo "[INFO] Restoring original /etc/resolv.conf"
|
echo "[INFO] Restoring original /etc/resolv.conf"
|
||||||
|
|||||||
@@ -70,7 +70,7 @@ function error_handler() {
|
|||||||
local line_number="$1"
|
local line_number="$1"
|
||||||
local command="$2"
|
local command="$2"
|
||||||
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
|
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
|
||||||
post_update_to_api "failed" "${exit_code}"
|
post_update_to_api "failed" "${command}"
|
||||||
echo -e "\n$error_message\n"
|
echo -e "\n$error_message\n"
|
||||||
cleanup_vmid
|
cleanup_vmid
|
||||||
}
|
}
|
||||||
@@ -203,6 +203,7 @@ function exit-script() {
|
|||||||
|
|
||||||
function default_settings() {
|
function default_settings() {
|
||||||
VMID=$(get_valid_nextid)
|
VMID=$(get_valid_nextid)
|
||||||
|
FORMAT=",efitype=4m"
|
||||||
MACHINE=""
|
MACHINE=""
|
||||||
DISK_SIZE="4G"
|
DISK_SIZE="4G"
|
||||||
DISK_CACHE=""
|
DISK_CACHE=""
|
||||||
@@ -258,9 +259,11 @@ function advanced_settings() {
|
|||||||
3>&1 1>&2 2>&3); then
|
3>&1 1>&2 2>&3); then
|
||||||
if [ "$MACH" = q35 ]; then
|
if [ "$MACH" = q35 ]; then
|
||||||
echo -e "${CONTAINERTYPE}${BOLD}${DGN}Machine Type: ${BGN}$MACH${CL}"
|
echo -e "${CONTAINERTYPE}${BOLD}${DGN}Machine Type: ${BGN}$MACH${CL}"
|
||||||
|
FORMAT=""
|
||||||
MACHINE=" -machine q35"
|
MACHINE=" -machine q35"
|
||||||
else
|
else
|
||||||
echo -e "${CONTAINERTYPE}${BOLD}${DGN}Machine Type: ${BGN}$MACH${CL}"
|
echo -e "${CONTAINERTYPE}${BOLD}${DGN}Machine Type: ${BGN}$MACH${CL}"
|
||||||
|
FORMAT=",efitype=4m"
|
||||||
MACHINE=""
|
MACHINE=""
|
||||||
fi
|
fi
|
||||||
else
|
else
|
||||||
@@ -473,45 +476,31 @@ case $STORAGE_TYPE in
|
|||||||
nfs | dir | cifs)
|
nfs | dir | cifs)
|
||||||
DISK_EXT=".qcow2"
|
DISK_EXT=".qcow2"
|
||||||
DISK_REF="$VMID/"
|
DISK_REF="$VMID/"
|
||||||
DISK_IMPORT="--format qcow2"
|
DISK_IMPORT="-format qcow2"
|
||||||
THIN=""
|
THIN=""
|
||||||
;;
|
;;
|
||||||
btrfs)
|
btrfs)
|
||||||
DISK_EXT=".raw"
|
DISK_EXT=".raw"
|
||||||
DISK_REF="$VMID/"
|
DISK_REF="$VMID/"
|
||||||
DISK_IMPORT="--format raw"
|
DISK_IMPORT="-format raw"
|
||||||
|
FORMAT=",efitype=4m"
|
||||||
THIN=""
|
THIN=""
|
||||||
;;
|
;;
|
||||||
*)
|
|
||||||
DISK_EXT=""
|
|
||||||
DISK_REF=""
|
|
||||||
DISK_IMPORT="--format raw"
|
|
||||||
;;
|
|
||||||
esac
|
esac
|
||||||
|
for i in {0,1}; do
|
||||||
|
disk="DISK$i"
|
||||||
|
eval DISK"${i}"=vm-"${VMID}"-disk-"${i}"${DISK_EXT:-}
|
||||||
|
eval DISK"${i}"_REF="${STORAGE}":"${DISK_REF:-}"${!disk}
|
||||||
|
done
|
||||||
|
|
||||||
msg_info "Creating a Arch Linux VM"
|
msg_info "Creating a Arch Linux VM"
|
||||||
qm create $VMID -agent 1${MACHINE} -tablet 0 -localtime 1 -bios ovmf${CPU_TYPE} -cores $CORE_COUNT -memory $RAM_SIZE \
|
qm create $VMID -agent 1${MACHINE} -tablet 0 -localtime 1 -bios ovmf${CPU_TYPE} -cores $CORE_COUNT -memory $RAM_SIZE \
|
||||||
-name $HN -tags community-script -net0 virtio,bridge=$BRG,macaddr=$MAC$VLAN$MTU -onboot 1 -ostype l26 -scsihw virtio-scsi-pci
|
-name $HN -tags community-script -net0 virtio,bridge=$BRG,macaddr=$MAC$VLAN$MTU -onboot 1 -ostype l26 -scsihw virtio-scsi-pci
|
||||||
|
pvesm alloc $STORAGE $VMID $DISK0 4M 1>&/dev/null
|
||||||
if qm disk import --help >/dev/null 2>&1; then
|
qm importdisk $VMID ${FILE} $STORAGE ${DISK_IMPORT:-} 1>&/dev/null
|
||||||
IMPORT_CMD=(qm disk import)
|
|
||||||
else
|
|
||||||
IMPORT_CMD=(qm importdisk)
|
|
||||||
fi
|
|
||||||
|
|
||||||
IMPORT_OUT="$("${IMPORT_CMD[@]}" "$VMID" "${FILE}" "$STORAGE" ${DISK_IMPORT:-} 2>&1 || true)"
|
|
||||||
DISK_REF_IMPORTED="$(printf '%s\n' "$IMPORT_OUT" | sed -n "s/.*successfully imported disk '\([^']\+\)'.*/\1/p" | tr -d "\r\"'")"
|
|
||||||
[[ -z "$DISK_REF_IMPORTED" ]] && DISK_REF_IMPORTED="$(pvesm list "$STORAGE" | awk -v id="$VMID" '$5 ~ ("vm-"id"-disk-") {print $1":"$5}' | sort | tail -n1)"
|
|
||||||
[[ -z "$DISK_REF_IMPORTED" ]] && {
|
|
||||||
msg_error "Unable to determine imported disk reference."
|
|
||||||
echo "$IMPORT_OUT"
|
|
||||||
exit 1
|
|
||||||
}
|
|
||||||
msg_ok "Imported disk (${CL}${BL}${DISK_REF_IMPORTED}${CL})"
|
|
||||||
|
|
||||||
qm set $VMID \
|
qm set $VMID \
|
||||||
-efidisk0 ${STORAGE}:0,efitype=4m \
|
-efidisk0 ${DISK0_REF}${FORMAT} \
|
||||||
-scsi0 ${DISK_REF_IMPORTED},${DISK_CACHE}${THIN%,} \
|
-scsi0 ${DISK1_REF},${DISK_CACHE}${THIN}size=${DISK_SIZE} \
|
||||||
-ide2 ${STORAGE}:cloudinit \
|
-ide2 ${STORAGE}:cloudinit \
|
||||||
-boot order=scsi0 \
|
-boot order=scsi0 \
|
||||||
-serial0 socket >/dev/null
|
-serial0 socket >/dev/null
|
||||||
|
|||||||
@@ -70,7 +70,7 @@ function error_handler() {
|
|||||||
local line_number="$1"
|
local line_number="$1"
|
||||||
local command="$2"
|
local command="$2"
|
||||||
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
|
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
|
||||||
post_update_to_api "failed" "${exit_code}"
|
post_update_to_api "failed" "${command}"
|
||||||
echo -e "\n$error_message\n"
|
echo -e "\n$error_message\n"
|
||||||
cleanup_vmid
|
cleanup_vmid
|
||||||
}
|
}
|
||||||
@@ -560,11 +560,6 @@ btrfs)
|
|||||||
FORMAT=",efitype=4m"
|
FORMAT=",efitype=4m"
|
||||||
THIN=""
|
THIN=""
|
||||||
;;
|
;;
|
||||||
*)
|
|
||||||
DISK_EXT=""
|
|
||||||
DISK_REF=""
|
|
||||||
DISK_IMPORT="-format raw"
|
|
||||||
;;
|
|
||||||
esac
|
esac
|
||||||
for i in {0,1}; do
|
for i in {0,1}; do
|
||||||
disk="DISK$i"
|
disk="DISK$i"
|
||||||
|
|||||||
@@ -70,7 +70,7 @@ function error_handler() {
|
|||||||
local line_number="$1"
|
local line_number="$1"
|
||||||
local command="$2"
|
local command="$2"
|
||||||
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
|
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
|
||||||
post_update_to_api "failed" "${exit_code}"
|
post_update_to_api "failed" "${command}"
|
||||||
echo -e "\n$error_message\n"
|
echo -e "\n$error_message\n"
|
||||||
cleanup_vmid
|
cleanup_vmid
|
||||||
}
|
}
|
||||||
@@ -501,11 +501,6 @@ btrfs)
|
|||||||
FORMAT=",efitype=4m"
|
FORMAT=",efitype=4m"
|
||||||
THIN=""
|
THIN=""
|
||||||
;;
|
;;
|
||||||
*)
|
|
||||||
DISK_EXT=""
|
|
||||||
DISK_REF=""
|
|
||||||
DISK_IMPORT="-format raw"
|
|
||||||
;;
|
|
||||||
esac
|
esac
|
||||||
for i in {0,1}; do
|
for i in {0,1}; do
|
||||||
disk="DISK$i"
|
disk="DISK$i"
|
||||||
|
|||||||
@@ -45,7 +45,7 @@ function error_handler() {
|
|||||||
local line_number="$1"
|
local line_number="$1"
|
||||||
local command="$2"
|
local command="$2"
|
||||||
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
|
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
|
||||||
post_update_to_api "failed" "${exit_code}"
|
post_update_to_api "failed" "${command}"
|
||||||
echo -e "\n$error_message\n"
|
echo -e "\n$error_message\n"
|
||||||
cleanup_vmid
|
cleanup_vmid
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -74,7 +74,7 @@ function error_handler() {
|
|||||||
local line_number="$1"
|
local line_number="$1"
|
||||||
local command="$2"
|
local command="$2"
|
||||||
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
|
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
|
||||||
post_update_to_api "failed" "${exit_code}"
|
post_update_to_api "failed" "${command}"
|
||||||
echo -e "\n$error_message\n"
|
echo -e "\n$error_message\n"
|
||||||
cleanup_vmid
|
cleanup_vmid
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -71,7 +71,7 @@ function error_handler() {
|
|||||||
local line_number="$1"
|
local line_number="$1"
|
||||||
local command="$2"
|
local command="$2"
|
||||||
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
|
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
|
||||||
post_update_to_api "failed" "${exit_code}"
|
post_update_to_api "failed" "${command}"
|
||||||
echo -e "\n$error_message\n"
|
echo -e "\n$error_message\n"
|
||||||
cleanup_vmid
|
cleanup_vmid
|
||||||
}
|
}
|
||||||
@@ -566,11 +566,6 @@ zfspool)
|
|||||||
DISK_REF=""
|
DISK_REF=""
|
||||||
DISK_IMPORT="-format raw"
|
DISK_IMPORT="-format raw"
|
||||||
;;
|
;;
|
||||||
*)
|
|
||||||
DISK_EXT=""
|
|
||||||
DISK_REF=""
|
|
||||||
DISK_IMPORT="-format raw"
|
|
||||||
;;
|
|
||||||
esac
|
esac
|
||||||
|
|
||||||
DISK_VAR="vm-${VMID}-disk-0${DISK_EXT:-}"
|
DISK_VAR="vm-${VMID}-disk-0${DISK_EXT:-}"
|
||||||
|
|||||||
@@ -70,7 +70,7 @@ function error_handler() {
|
|||||||
local line_number="$1"
|
local line_number="$1"
|
||||||
local command="$2"
|
local command="$2"
|
||||||
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
|
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
|
||||||
post_update_to_api "failed" "${exit_code}"
|
post_update_to_api "failed" "${command}"
|
||||||
echo -e "\n$error_message\n"
|
echo -e "\n$error_message\n"
|
||||||
cleanup_vmid
|
cleanup_vmid
|
||||||
}
|
}
|
||||||
@@ -487,11 +487,6 @@ btrfs)
|
|||||||
FORMAT=",efitype=4m"
|
FORMAT=",efitype=4m"
|
||||||
THIN=""
|
THIN=""
|
||||||
;;
|
;;
|
||||||
*)
|
|
||||||
DISK_EXT=""
|
|
||||||
DISK_REF=""
|
|
||||||
DISK_IMPORT="-format raw"
|
|
||||||
;;
|
|
||||||
esac
|
esac
|
||||||
for i in {0,1,2}; do
|
for i in {0,1,2}; do
|
||||||
disk="DISK$i"
|
disk="DISK$i"
|
||||||
|
|||||||
@@ -74,7 +74,7 @@ function error_handler() {
|
|||||||
local exit_code="$?"
|
local exit_code="$?"
|
||||||
local line_number="$1"
|
local line_number="$1"
|
||||||
local command="$2"
|
local command="$2"
|
||||||
post_update_to_api "failed" "$exit_code"
|
post_update_to_api "failed" "$command"
|
||||||
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
|
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
|
||||||
echo -e "\n$error_message\n"
|
echo -e "\n$error_message\n"
|
||||||
cleanup_vmid
|
cleanup_vmid
|
||||||
|
|||||||
@@ -48,7 +48,7 @@ function error_handler() {
|
|||||||
local exit_code="$?"
|
local exit_code="$?"
|
||||||
local line_number="$1"
|
local line_number="$1"
|
||||||
local command="$2"
|
local command="$2"
|
||||||
post_update_to_api "failed" "$exit_code"
|
post_update_to_api "failed" "$command"
|
||||||
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
|
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
|
||||||
echo -e "\n$error_message\n"
|
echo -e "\n$error_message\n"
|
||||||
cleanup_vmid
|
cleanup_vmid
|
||||||
@@ -619,11 +619,6 @@ btrfs)
|
|||||||
FORMAT=",efitype=4m"
|
FORMAT=",efitype=4m"
|
||||||
THIN=""
|
THIN=""
|
||||||
;;
|
;;
|
||||||
*)
|
|
||||||
DISK_EXT=""
|
|
||||||
DISK_REF=""
|
|
||||||
DISK_IMPORT="-format raw"
|
|
||||||
;;
|
|
||||||
esac
|
esac
|
||||||
for i in {0,1}; do
|
for i in {0,1}; do
|
||||||
disk="DISK$i"
|
disk="DISK$i"
|
||||||
|
|||||||
@@ -71,7 +71,7 @@ function error_handler() {
|
|||||||
local line_number="$1"
|
local line_number="$1"
|
||||||
local command="$2"
|
local command="$2"
|
||||||
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
|
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
|
||||||
post_update_to_api "failed" "${exit_code}"
|
post_update_to_api "failed" "${command}"
|
||||||
echo -e "\n$error_message\n"
|
echo -e "\n$error_message\n"
|
||||||
cleanup_vmid
|
cleanup_vmid
|
||||||
}
|
}
|
||||||
@@ -500,11 +500,6 @@ btrfs)
|
|||||||
FORMAT=",efitype=4m"
|
FORMAT=",efitype=4m"
|
||||||
THIN=""
|
THIN=""
|
||||||
;;
|
;;
|
||||||
*)
|
|
||||||
DISK_EXT=""
|
|
||||||
DISK_REF=""
|
|
||||||
DISK_IMPORT="-format raw"
|
|
||||||
;;
|
|
||||||
esac
|
esac
|
||||||
for i in {0,1,2}; do
|
for i in {0,1,2}; do
|
||||||
disk="DISK$i"
|
disk="DISK$i"
|
||||||
|
|||||||
@@ -79,7 +79,7 @@ function error_handler() {
|
|||||||
local line_number="$1"
|
local line_number="$1"
|
||||||
local command="$2"
|
local command="$2"
|
||||||
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
|
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
|
||||||
post_update_to_api "failed" "${exit_code}"
|
post_update_to_api "failed" "${command}"
|
||||||
echo -e "\n$error_message\n"
|
echo -e "\n$error_message\n"
|
||||||
cleanup_vmid
|
cleanup_vmid
|
||||||
}
|
}
|
||||||
@@ -402,11 +402,6 @@ nfs | dir)
|
|||||||
DISK_REF="$VMID/"
|
DISK_REF="$VMID/"
|
||||||
DISK_IMPORT="-format qcow2"
|
DISK_IMPORT="-format qcow2"
|
||||||
;;
|
;;
|
||||||
*)
|
|
||||||
DISK_EXT=""
|
|
||||||
DISK_REF=""
|
|
||||||
DISK_IMPORT="-format raw"
|
|
||||||
;;
|
|
||||||
esac
|
esac
|
||||||
for i in {0,1}; do
|
for i in {0,1}; do
|
||||||
disk="DISK$i"
|
disk="DISK$i"
|
||||||
|
|||||||
@@ -66,7 +66,7 @@ function error_handler() {
|
|||||||
local exit_code="$?"
|
local exit_code="$?"
|
||||||
local line_number="$1"
|
local line_number="$1"
|
||||||
local command="$2"
|
local command="$2"
|
||||||
post_update_to_api "failed" "$exit_code"
|
post_update_to_api "failed" "$command"
|
||||||
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
|
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
|
||||||
echo -e "\n$error_message\n"
|
echo -e "\n$error_message\n"
|
||||||
cleanup_vmid
|
cleanup_vmid
|
||||||
@@ -482,11 +482,6 @@ btrfs)
|
|||||||
FORMAT=",efitype=4m"
|
FORMAT=",efitype=4m"
|
||||||
THIN=""
|
THIN=""
|
||||||
;;
|
;;
|
||||||
*)
|
|
||||||
DISK_EXT=""
|
|
||||||
DISK_REF=""
|
|
||||||
DISK_IMPORT="-format raw"
|
|
||||||
;;
|
|
||||||
esac
|
esac
|
||||||
for i in {0,1}; do
|
for i in {0,1}; do
|
||||||
disk="DISK$i"
|
disk="DISK$i"
|
||||||
|
|||||||
@@ -69,7 +69,7 @@ function error_handler() {
|
|||||||
local exit_code="$?"
|
local exit_code="$?"
|
||||||
local line_number="$1"
|
local line_number="$1"
|
||||||
local command="$2"
|
local command="$2"
|
||||||
post_update_to_api "failed" "$exit_code"
|
post_update_to_api "failed" "$command"
|
||||||
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
|
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
|
||||||
echo -e "\n$error_message\n"
|
echo -e "\n$error_message\n"
|
||||||
cleanup_vmid
|
cleanup_vmid
|
||||||
@@ -484,11 +484,6 @@ btrfs)
|
|||||||
FORMAT=",efitype=4m"
|
FORMAT=",efitype=4m"
|
||||||
THIN=""
|
THIN=""
|
||||||
;;
|
;;
|
||||||
*)
|
|
||||||
DISK_EXT=""
|
|
||||||
DISK_REF=""
|
|
||||||
DISK_IMPORT="-format raw"
|
|
||||||
;;
|
|
||||||
esac
|
esac
|
||||||
for i in {0,1}; do
|
for i in {0,1}; do
|
||||||
disk="DISK$i"
|
disk="DISK$i"
|
||||||
|
|||||||
@@ -68,7 +68,7 @@ function error_handler() {
|
|||||||
local exit_code="$?"
|
local exit_code="$?"
|
||||||
local line_number="$1"
|
local line_number="$1"
|
||||||
local command="$2"
|
local command="$2"
|
||||||
post_update_to_api "failed" "$exit_code"
|
post_update_to_api "failed" "$command"
|
||||||
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
|
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
|
||||||
echo -e "\n$error_message\n"
|
echo -e "\n$error_message\n"
|
||||||
cleanup_vmid
|
cleanup_vmid
|
||||||
@@ -483,11 +483,6 @@ btrfs)
|
|||||||
FORMAT=",efitype=4m"
|
FORMAT=",efitype=4m"
|
||||||
THIN=""
|
THIN=""
|
||||||
;;
|
;;
|
||||||
*)
|
|
||||||
DISK_EXT=""
|
|
||||||
DISK_REF=""
|
|
||||||
DISK_IMPORT="-format raw"
|
|
||||||
;;
|
|
||||||
esac
|
esac
|
||||||
for i in {0,1}; do
|
for i in {0,1}; do
|
||||||
disk="DISK$i"
|
disk="DISK$i"
|
||||||
|
|||||||
@@ -69,7 +69,7 @@ function error_handler() {
|
|||||||
local line_number="$1"
|
local line_number="$1"
|
||||||
local command="$2"
|
local command="$2"
|
||||||
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
|
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
|
||||||
post_update_to_api "failed" "${exit_code}"
|
post_update_to_api "failed" "${command}"
|
||||||
echo -e "\n$error_message\n"
|
echo -e "\n$error_message\n"
|
||||||
cleanup_vmid
|
cleanup_vmid
|
||||||
}
|
}
|
||||||
|
|||||||
Reference in New Issue
Block a user