mirror of
https://github.com/community-scripts/ProxmoxVE.git
synced 2026-02-13 16:53:27 +01:00
Compare commits
105 Commits
fix/github
...
refactor/u
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1dc16954ef | ||
|
|
0db0e34ce7 | ||
|
|
0957a23366 | ||
|
|
9f3588dd8d | ||
|
|
f23414a1a8 | ||
|
|
2a8bb76dcf | ||
|
|
bf85ef2a8b | ||
|
|
cc89cdbab1 | ||
|
|
d6f3f03f8a | ||
|
|
55e35d7f11 | ||
|
|
3b9f8d4a93 | ||
|
|
6c5377adec | ||
|
|
eeb349346b | ||
|
|
d271c16799 | ||
|
|
4774c54861 | ||
|
|
4bf63bae35 | ||
|
|
f2b7c9638d | ||
|
|
551f89e46f | ||
|
|
4f571a1eb6 | ||
|
|
3156e8e363 | ||
|
|
60ebdc97a5 | ||
|
|
20ec369338 | ||
|
|
4907a906c3 | ||
|
|
27e3a4301e | ||
|
|
43fb75f2b4 | ||
|
|
899d0e4baa | ||
|
|
85584b105d | ||
|
|
3fe6f50414 | ||
|
|
724a066aed | ||
|
|
cd6e8ecbbe | ||
|
|
8083c0c0e1 | ||
|
|
29836f35ed | ||
|
|
17d3d4297c | ||
|
|
2b921736e6 | ||
|
|
ddabe81dd8 | ||
|
|
19c5671d3f | ||
|
|
2326520d17 | ||
|
|
7964d39e32 | ||
|
|
f7cf7c8adc | ||
|
|
744191cb84 | ||
|
|
291ed4c5ad | ||
|
|
f9612c5aba | ||
|
|
403a839ac0 | ||
|
|
41c89413ef | ||
|
|
fa11528a7b | ||
|
|
2a03c86384 | ||
|
|
57b4e10b93 | ||
|
|
4b22c7cc2d | ||
|
|
79fd0d1dda | ||
|
|
280778d53b | ||
|
|
1c3a3107f1 | ||
|
|
ee2c3a20ee | ||
|
|
5ee4f4e34b | ||
|
|
4b0e893bf1 | ||
|
|
3676157a7c | ||
|
|
e437e50882 | ||
|
|
6e9a94b46d | ||
|
|
137ae6775e | ||
|
|
dd8c998d43 | ||
|
|
b215bac01d | ||
|
|
c3cd9df12f | ||
|
|
406d53ea2f | ||
|
|
c8b278f26f | ||
|
|
a6f0d7233e | ||
|
|
079a436286 | ||
|
|
1c2ed6ff10 | ||
|
|
c1d7f23a17 | ||
|
|
fdbe48badb | ||
|
|
d09dd0b664 | ||
|
|
cba6717469 | ||
|
|
0a12acf6bd | ||
|
|
4e4defa236 | ||
|
|
c15f69712f | ||
|
|
b53a731c42 | ||
|
|
ddfe9166a1 | ||
|
|
1b1c84ad4f | ||
|
|
db69c7b0f8 | ||
|
|
53b3b4bf9f | ||
|
|
8fadcc0130 | ||
|
|
5aff8dc2f1 | ||
|
|
e7ed841361 | ||
|
|
7e49c222e5 | ||
|
|
9f31012598 | ||
|
|
811062f958 | ||
|
|
893b0bfb4a | ||
|
|
f34f994560 | ||
|
|
216b389635 | ||
|
|
d062baf8c9 | ||
|
|
e09e244c3d | ||
|
|
2645f4cf4d | ||
|
|
a0b55b6934 | ||
|
|
b263dc25fe | ||
|
|
ac308c931e | ||
|
|
a16dfb6d82 | ||
|
|
63e9bc3729 | ||
|
|
3735f9251b | ||
|
|
fc2559c702 | ||
|
|
5f2d463408 | ||
|
|
69e0dc6968 | ||
|
|
fccb8a923a | ||
|
|
53dbb9d705 | ||
|
|
236c5296b8 | ||
|
|
76c7e3a67f | ||
|
|
4dbb139c60 | ||
|
|
c581704fdd |
12
.github/workflows/update-versions-github.yml
generated
vendored
12
.github/workflows/update-versions-github.yml
generated
vendored
@@ -89,9 +89,15 @@ jobs:
|
|||||||
slug=$(jq -r '.slug // empty' "$json_file" 2>/dev/null)
|
slug=$(jq -r '.slug // empty' "$json_file" 2>/dev/null)
|
||||||
[[ -z "$slug" ]] && continue
|
[[ -z "$slug" ]] && continue
|
||||||
|
|
||||||
# Find corresponding install script
|
# Find corresponding script (install script or addon script)
|
||||||
install_script="install/${slug}-install.sh"
|
install_script=""
|
||||||
[[ ! -f "$install_script" ]] && continue
|
if [[ -f "install/${slug}-install.sh" ]]; then
|
||||||
|
install_script="install/${slug}-install.sh"
|
||||||
|
elif [[ -f "tools/addon/${slug}.sh" ]]; then
|
||||||
|
install_script="tools/addon/${slug}.sh"
|
||||||
|
else
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
|
||||||
# Look for fetch_and_deploy_gh_release calls
|
# Look for fetch_and_deploy_gh_release calls
|
||||||
# Pattern: fetch_and_deploy_gh_release "app" "owner/repo" ["mode"] ["version"]
|
# Pattern: fetch_and_deploy_gh_release "app" "owner/repo" ["mode"] ["version"]
|
||||||
|
|||||||
128
CHANGELOG.md
128
CHANGELOG.md
@@ -401,11 +401,137 @@ Exercise vigilance regarding copycat or coat-tailing sites that seek to exploit
|
|||||||
|
|
||||||
</details>
|
</details>
|
||||||
|
|
||||||
|
## 2026-02-13
|
||||||
|
|
||||||
|
### 🚀 Updated Scripts
|
||||||
|
|
||||||
|
- #### 🐞 Bug Fixes
|
||||||
|
|
||||||
|
- OpenWebUI: pin numba constraint [@MickLesk](https://github.com/MickLesk) ([#11874](https://github.com/community-scripts/ProxmoxVE/pull/11874))
|
||||||
|
- Planka: add migrate step to update function [@ZimmermannLeon](https://github.com/ZimmermannLeon) ([#11877](https://github.com/community-scripts/ProxmoxVE/pull/11877))
|
||||||
|
- Pangolin: switch sqlite-specific back to generic [@MickLesk](https://github.com/MickLesk) ([#11868](https://github.com/community-scripts/ProxmoxVE/pull/11868))
|
||||||
|
- [Hotfix] Jotty: Copy contents of config backup into /opt/jotty/config [@vhsdream](https://github.com/vhsdream) ([#11864](https://github.com/community-scripts/ProxmoxVE/pull/11864))
|
||||||
|
|
||||||
|
- #### 🔧 Refactor
|
||||||
|
|
||||||
|
- chore(donetick): add config entry for v0.1.73 [@tomfrenzel](https://github.com/tomfrenzel) ([#11872](https://github.com/community-scripts/ProxmoxVE/pull/11872))
|
||||||
|
- Refactor: Radicale [@vhsdream](https://github.com/vhsdream) ([#11850](https://github.com/community-scripts/ProxmoxVE/pull/11850))
|
||||||
|
|
||||||
|
### 💾 Core
|
||||||
|
|
||||||
|
- #### 🔧 Refactor
|
||||||
|
|
||||||
|
- core: retry reporting with fallback payloads [@MickLesk](https://github.com/MickLesk) ([#11885](https://github.com/community-scripts/ProxmoxVE/pull/11885))
|
||||||
|
|
||||||
|
### 📡 API
|
||||||
|
|
||||||
|
- #### ✨ New Features
|
||||||
|
|
||||||
|
- error-handler: Implement json_escape and enhance error handling [@MickLesk](https://github.com/MickLesk) ([#11875](https://github.com/community-scripts/ProxmoxVE/pull/11875))
|
||||||
|
|
||||||
|
### 🌐 Website
|
||||||
|
|
||||||
|
- #### 📝 Script Information
|
||||||
|
|
||||||
|
- SQLServer-2025: add PVE9/Kernel 6.x incompatibility warning [@MickLesk](https://github.com/MickLesk) ([#11829](https://github.com/community-scripts/ProxmoxVE/pull/11829))
|
||||||
|
|
||||||
|
## 2026-02-12
|
||||||
|
|
||||||
|
### 🚀 Updated Scripts
|
||||||
|
|
||||||
|
- #### 🐞 Bug Fixes
|
||||||
|
|
||||||
|
- EMQX: increase disk to 6GB and add optional MQ disable prompt [@MickLesk](https://github.com/MickLesk) ([#11844](https://github.com/community-scripts/ProxmoxVE/pull/11844))
|
||||||
|
- Increased the Grafana container default disk size. [@shtefko](https://github.com/shtefko) ([#11840](https://github.com/community-scripts/ProxmoxVE/pull/11840))
|
||||||
|
- Pangolin: Update database generation command in install script [@tremor021](https://github.com/tremor021) ([#11825](https://github.com/community-scripts/ProxmoxVE/pull/11825))
|
||||||
|
- Deluge: add python3-setuptools as dep [@MickLesk](https://github.com/MickLesk) ([#11833](https://github.com/community-scripts/ProxmoxVE/pull/11833))
|
||||||
|
- Dispatcharr: migrate to uv sync [@MickLesk](https://github.com/MickLesk) ([#11831](https://github.com/community-scripts/ProxmoxVE/pull/11831))
|
||||||
|
|
||||||
|
- #### ✨ New Features
|
||||||
|
|
||||||
|
- Archlinux-VM: fix LVM/LVM-thin storage and improve error reporting | VM's add correct exit_code for analytics [@MickLesk](https://github.com/MickLesk) ([#11842](https://github.com/community-scripts/ProxmoxVE/pull/11842))
|
||||||
|
- Debian13-VM: Optimize First Boot & add noCloud/Cloud Selection [@MickLesk](https://github.com/MickLesk) ([#11810](https://github.com/community-scripts/ProxmoxVE/pull/11810))
|
||||||
|
|
||||||
|
### 💾 Core
|
||||||
|
|
||||||
|
- #### ✨ New Features
|
||||||
|
|
||||||
|
- tools.func: auto-detect binary vs armored GPG keys in setup_deb822_repo [@MickLesk](https://github.com/MickLesk) ([#11841](https://github.com/community-scripts/ProxmoxVE/pull/11841))
|
||||||
|
- core: remove old Go API and extend misc/api.func with new backend [@MickLesk](https://github.com/MickLesk) ([#11822](https://github.com/community-scripts/ProxmoxVE/pull/11822))
|
||||||
|
|
||||||
|
- #### 🔧 Refactor
|
||||||
|
|
||||||
|
- error_handler: prevent stuck 'installing' status [@MickLesk](https://github.com/MickLesk) ([#11845](https://github.com/community-scripts/ProxmoxVE/pull/11845))
|
||||||
|
|
||||||
|
### 🧰 Tools
|
||||||
|
|
||||||
|
- #### 🐞 Bug Fixes
|
||||||
|
|
||||||
|
- Tailscale: fix DNS check and keyrings directory issues [@MickLesk](https://github.com/MickLesk) ([#11837](https://github.com/community-scripts/ProxmoxVE/pull/11837))
|
||||||
|
|
||||||
|
## 2026-02-11
|
||||||
|
|
||||||
|
### 🆕 New Scripts
|
||||||
|
|
||||||
|
- Draw.io ([#11788](https://github.com/community-scripts/ProxmoxVE/pull/11788))
|
||||||
|
|
||||||
|
### 🚀 Updated Scripts
|
||||||
|
|
||||||
|
- #### 🐞 Bug Fixes
|
||||||
|
|
||||||
|
- dispatcharr: include port 9191 in success-message [@MickLesk](https://github.com/MickLesk) ([#11808](https://github.com/community-scripts/ProxmoxVE/pull/11808))
|
||||||
|
- fix: make donetick 0.1.71 compatible [@tomfrenzel](https://github.com/tomfrenzel) ([#11804](https://github.com/community-scripts/ProxmoxVE/pull/11804))
|
||||||
|
- Kasm: Support new version URL format without hash suffix [@MickLesk](https://github.com/MickLesk) ([#11787](https://github.com/community-scripts/ProxmoxVE/pull/11787))
|
||||||
|
- LibreTranslate: Remove Torch [@tremor021](https://github.com/tremor021) ([#11783](https://github.com/community-scripts/ProxmoxVE/pull/11783))
|
||||||
|
- Snowshare: fix update script [@TuroYT](https://github.com/TuroYT) ([#11726](https://github.com/community-scripts/ProxmoxVE/pull/11726))
|
||||||
|
|
||||||
|
- #### ✨ New Features
|
||||||
|
|
||||||
|
- [Feature] OpenCloud: support PosixFS Collaborative Mode [@vhsdream](https://github.com/vhsdream) ([#11806](https://github.com/community-scripts/ProxmoxVE/pull/11806))
|
||||||
|
|
||||||
|
### 💾 Core
|
||||||
|
|
||||||
|
- #### 🔧 Refactor
|
||||||
|
|
||||||
|
- core: respect EDITOR variable for config editing [@ls-root](https://github.com/ls-root) ([#11693](https://github.com/community-scripts/ProxmoxVE/pull/11693))
|
||||||
|
|
||||||
|
### 📚 Documentation
|
||||||
|
|
||||||
|
- Fix formatting in kutt.json notes section [@tiagodenoronha](https://github.com/tiagodenoronha) ([#11774](https://github.com/community-scripts/ProxmoxVE/pull/11774))
|
||||||
|
|
||||||
## 2026-02-10
|
## 2026-02-10
|
||||||
|
|
||||||
|
### 🚀 Updated Scripts
|
||||||
|
|
||||||
|
- #### 🐞 Bug Fixes
|
||||||
|
|
||||||
|
- Immich: Pin version to 2.5.6 [@vhsdream](https://github.com/vhsdream) ([#11775](https://github.com/community-scripts/ProxmoxVE/pull/11775))
|
||||||
|
- Libretranslate: Fix setuptools [@tremor021](https://github.com/tremor021) ([#11772](https://github.com/community-scripts/ProxmoxVE/pull/11772))
|
||||||
|
- Element Synapse: prevent systemd invoke failure during apt install [@MickLesk](https://github.com/MickLesk) ([#11758](https://github.com/community-scripts/ProxmoxVE/pull/11758))
|
||||||
|
|
||||||
|
- #### ✨ New Features
|
||||||
|
|
||||||
|
- Refactor: Slskd & Soularr [@vhsdream](https://github.com/vhsdream) ([#11674](https://github.com/community-scripts/ProxmoxVE/pull/11674))
|
||||||
|
|
||||||
### 🗑️ Deleted Scripts
|
### 🗑️ Deleted Scripts
|
||||||
|
|
||||||
- paperless-exporter ([#11737](https://github.com/community-scripts/ProxmoxVE/pull/11737))
|
- move paperless-exporter from LXC to addon ([#11737](https://github.com/community-scripts/ProxmoxVE/pull/11737))
|
||||||
|
|
||||||
|
### 🧰 Tools
|
||||||
|
|
||||||
|
- #### 🐞 Bug Fixes
|
||||||
|
|
||||||
|
- feat: improve storage parsing & add guestname [@carlosmaroot](https://github.com/carlosmaroot) ([#11752](https://github.com/community-scripts/ProxmoxVE/pull/11752))
|
||||||
|
|
||||||
|
### 📂 Github
|
||||||
|
|
||||||
|
- Github-Version Workflow: include addon scripts in extraction [@MickLesk](https://github.com/MickLesk) ([#11757](https://github.com/community-scripts/ProxmoxVE/pull/11757))
|
||||||
|
|
||||||
|
### 🌐 Website
|
||||||
|
|
||||||
|
- #### 📝 Script Information
|
||||||
|
|
||||||
|
- Snowshare: fix typo in config file path on website [@BirdMakingStuff](https://github.com/BirdMakingStuff) ([#11754](https://github.com/community-scripts/ProxmoxVE/pull/11754))
|
||||||
|
|
||||||
## 2026-02-09
|
## 2026-02-09
|
||||||
|
|
||||||
|
|||||||
@@ -1,5 +0,0 @@
|
|||||||
MONGO_USER=
|
|
||||||
MONGO_PASSWORD=
|
|
||||||
MONGO_IP=
|
|
||||||
MONGO_PORT=
|
|
||||||
MONGO_DATABASE=
|
|
||||||
23
api/go.mod
23
api/go.mod
@@ -1,23 +0,0 @@
|
|||||||
module proxmox-api
|
|
||||||
|
|
||||||
go 1.24.0
|
|
||||||
|
|
||||||
require (
|
|
||||||
github.com/gorilla/mux v1.8.1
|
|
||||||
github.com/joho/godotenv v1.5.1
|
|
||||||
github.com/rs/cors v1.11.1
|
|
||||||
go.mongodb.org/mongo-driver v1.17.2
|
|
||||||
)
|
|
||||||
|
|
||||||
require (
|
|
||||||
github.com/golang/snappy v0.0.4 // indirect
|
|
||||||
github.com/klauspost/compress v1.16.7 // indirect
|
|
||||||
github.com/montanaflynn/stats v0.7.1 // indirect
|
|
||||||
github.com/xdg-go/pbkdf2 v1.0.0 // indirect
|
|
||||||
github.com/xdg-go/scram v1.1.2 // indirect
|
|
||||||
github.com/xdg-go/stringprep v1.0.4 // indirect
|
|
||||||
github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78 // indirect
|
|
||||||
golang.org/x/crypto v0.45.0 // indirect
|
|
||||||
golang.org/x/sync v0.18.0 // indirect
|
|
||||||
golang.org/x/text v0.31.0 // indirect
|
|
||||||
)
|
|
||||||
56
api/go.sum
56
api/go.sum
@@ -1,56 +0,0 @@
|
|||||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
|
||||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
|
||||||
github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
|
|
||||||
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
|
||||||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
|
||||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
|
||||||
github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY=
|
|
||||||
github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ=
|
|
||||||
github.com/joho/godotenv v1.5.1 h1:7eLL/+HRGLY0ldzfGMeQkb7vMd0as4CfYvUVzLqw0N0=
|
|
||||||
github.com/joho/godotenv v1.5.1/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4=
|
|
||||||
github.com/klauspost/compress v1.16.7 h1:2mk3MPGNzKyxErAw8YaohYh69+pa4sIQSC0fPGCFR9I=
|
|
||||||
github.com/klauspost/compress v1.16.7/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
|
|
||||||
github.com/montanaflynn/stats v0.7.1 h1:etflOAAHORrCC44V+aR6Ftzort912ZU+YLiSTuV8eaE=
|
|
||||||
github.com/montanaflynn/stats v0.7.1/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow=
|
|
||||||
github.com/rs/cors v1.11.1 h1:eU3gRzXLRK57F5rKMGMZURNdIG4EoAmX8k94r9wXWHA=
|
|
||||||
github.com/rs/cors v1.11.1/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU=
|
|
||||||
github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c=
|
|
||||||
github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI=
|
|
||||||
github.com/xdg-go/scram v1.1.2 h1:FHX5I5B4i4hKRVRBCFRxq1iQRej7WO3hhBuJf+UUySY=
|
|
||||||
github.com/xdg-go/scram v1.1.2/go.mod h1:RT/sEzTbU5y00aCK8UOx6R7YryM0iF1N2MOmC3kKLN4=
|
|
||||||
github.com/xdg-go/stringprep v1.0.4 h1:XLI/Ng3O1Atzq0oBs3TWm+5ZVgkq2aqdlvP9JtoZ6c8=
|
|
||||||
github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM=
|
|
||||||
github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78 h1:ilQV1hzziu+LLM3zUTJ0trRztfwgjqKnBWNtSRkbmwM=
|
|
||||||
github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78/go.mod h1:aL8wCCfTfSfmXjznFBSZNN13rSJjlIOI1fUNAtF7rmI=
|
|
||||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
|
||||||
go.mongodb.org/mongo-driver v1.17.2 h1:gvZyk8352qSfzyZ2UMWcpDpMSGEr1eqE4T793SqyhzM=
|
|
||||||
go.mongodb.org/mongo-driver v1.17.2/go.mod h1:Hy04i7O2kC4RS06ZrhPRqj/u4DTYkFDAAccj+rVKqgQ=
|
|
||||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
|
||||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
|
||||||
golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q=
|
|
||||||
golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4=
|
|
||||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
|
||||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
|
||||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
|
||||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
|
||||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
|
||||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
|
||||||
golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I=
|
|
||||||
golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
|
||||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
|
||||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
|
||||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
|
||||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
|
||||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
|
||||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
|
||||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
|
||||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
|
||||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
|
||||||
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
|
|
||||||
golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM=
|
|
||||||
golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM=
|
|
||||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
|
||||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
|
||||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
|
||||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
|
||||||
450
api/main.go
450
api/main.go
@@ -1,450 +0,0 @@
|
|||||||
// Copyright (c) 2021-2026 community-scripts ORG
|
|
||||||
// Author: Michel Roegl-Brunner (michelroegl-brunner)
|
|
||||||
// License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
|
|
||||||
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"log"
|
|
||||||
"net/http"
|
|
||||||
"os"
|
|
||||||
"strconv"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/gorilla/mux"
|
|
||||||
"github.com/joho/godotenv"
|
|
||||||
"github.com/rs/cors"
|
|
||||||
"go.mongodb.org/mongo-driver/bson"
|
|
||||||
"go.mongodb.org/mongo-driver/bson/primitive"
|
|
||||||
"go.mongodb.org/mongo-driver/mongo"
|
|
||||||
"go.mongodb.org/mongo-driver/mongo/options"
|
|
||||||
)
|
|
||||||
|
|
||||||
var client *mongo.Client
|
|
||||||
var collection *mongo.Collection
|
|
||||||
|
|
||||||
func loadEnv() {
|
|
||||||
if err := godotenv.Load(); err != nil {
|
|
||||||
log.Fatal("Error loading .env file")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// DataModel represents a single document in MongoDB
|
|
||||||
type DataModel struct {
|
|
||||||
ID primitive.ObjectID `json:"id" bson:"_id,omitempty"`
|
|
||||||
CT_TYPE uint `json:"ct_type" bson:"ct_type"`
|
|
||||||
DISK_SIZE float32 `json:"disk_size" bson:"disk_size"`
|
|
||||||
CORE_COUNT uint `json:"core_count" bson:"core_count"`
|
|
||||||
RAM_SIZE uint `json:"ram_size" bson:"ram_size"`
|
|
||||||
OS_TYPE string `json:"os_type" bson:"os_type"`
|
|
||||||
OS_VERSION string `json:"os_version" bson:"os_version"`
|
|
||||||
DISABLEIP6 string `json:"disableip6" bson:"disableip6"`
|
|
||||||
NSAPP string `json:"nsapp" bson:"nsapp"`
|
|
||||||
METHOD string `json:"method" bson:"method"`
|
|
||||||
CreatedAt time.Time `json:"created_at" bson:"created_at"`
|
|
||||||
PVEVERSION string `json:"pve_version" bson:"pve_version"`
|
|
||||||
STATUS string `json:"status" bson:"status"`
|
|
||||||
RANDOM_ID string `json:"random_id" bson:"random_id"`
|
|
||||||
TYPE string `json:"type" bson:"type"`
|
|
||||||
ERROR string `json:"error" bson:"error"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type StatusModel struct {
|
|
||||||
RANDOM_ID string `json:"random_id" bson:"random_id"`
|
|
||||||
ERROR string `json:"error" bson:"error"`
|
|
||||||
STATUS string `json:"status" bson:"status"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type CountResponse struct {
|
|
||||||
TotalEntries int64 `json:"total_entries"`
|
|
||||||
StatusCount map[string]int64 `json:"status_count"`
|
|
||||||
NSAPPCount map[string]int64 `json:"nsapp_count"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// ConnectDatabase initializes the MongoDB connection
|
|
||||||
func ConnectDatabase() {
|
|
||||||
loadEnv()
|
|
||||||
|
|
||||||
mongoURI := fmt.Sprintf("mongodb://%s:%s@%s:%s",
|
|
||||||
os.Getenv("MONGO_USER"),
|
|
||||||
os.Getenv("MONGO_PASSWORD"),
|
|
||||||
os.Getenv("MONGO_IP"),
|
|
||||||
os.Getenv("MONGO_PORT"))
|
|
||||||
|
|
||||||
database := os.Getenv("MONGO_DATABASE")
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
var err error
|
|
||||||
client, err = mongo.Connect(ctx, options.Client().ApplyURI(mongoURI))
|
|
||||||
if err != nil {
|
|
||||||
log.Fatal("Failed to connect to MongoDB!", err)
|
|
||||||
}
|
|
||||||
collection = client.Database(database).Collection("data_models")
|
|
||||||
fmt.Println("Connected to MongoDB on 10.10.10.18")
|
|
||||||
}
|
|
||||||
|
|
||||||
// UploadJSON handles API requests and stores data as a document in MongoDB
|
|
||||||
func UploadJSON(w http.ResponseWriter, r *http.Request) {
|
|
||||||
var input DataModel
|
|
||||||
|
|
||||||
if err := json.NewDecoder(r.Body).Decode(&input); err != nil {
|
|
||||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
input.CreatedAt = time.Now()
|
|
||||||
|
|
||||||
_, err := collection.InsertOne(context.Background(), input)
|
|
||||||
if err != nil {
|
|
||||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Println("Received data:", input)
|
|
||||||
w.WriteHeader(http.StatusCreated)
|
|
||||||
json.NewEncoder(w).Encode(map[string]string{"message": "Data saved successfully"})
|
|
||||||
}
|
|
||||||
|
|
||||||
// UpdateStatus updates the status of a record based on RANDOM_ID
|
|
||||||
func UpdateStatus(w http.ResponseWriter, r *http.Request) {
|
|
||||||
var input StatusModel
|
|
||||||
|
|
||||||
if err := json.NewDecoder(r.Body).Decode(&input); err != nil {
|
|
||||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
filter := bson.M{"random_id": input.RANDOM_ID}
|
|
||||||
update := bson.M{"$set": bson.M{"status": input.STATUS, "error": input.ERROR}}
|
|
||||||
|
|
||||||
_, err := collection.UpdateOne(context.Background(), filter, update)
|
|
||||||
if err != nil {
|
|
||||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Println("Updated data:", input)
|
|
||||||
w.WriteHeader(http.StatusOK)
|
|
||||||
json.NewEncoder(w).Encode(map[string]string{"message": "Record updated successfully"})
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetDataJSON fetches all data from MongoDB
|
|
||||||
func GetDataJSON(w http.ResponseWriter, r *http.Request) {
|
|
||||||
var records []DataModel
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
cursor, err := collection.Find(ctx, bson.M{})
|
|
||||||
if err != nil {
|
|
||||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
defer cursor.Close(ctx)
|
|
||||||
|
|
||||||
for cursor.Next(ctx) {
|
|
||||||
var record DataModel
|
|
||||||
if err := cursor.Decode(&record); err != nil {
|
|
||||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
records = append(records, record)
|
|
||||||
}
|
|
||||||
|
|
||||||
w.Header().Set("Content-Type", "application/json")
|
|
||||||
json.NewEncoder(w).Encode(records)
|
|
||||||
}
|
|
||||||
func GetPaginatedData(w http.ResponseWriter, r *http.Request) {
|
|
||||||
page, _ := strconv.Atoi(r.URL.Query().Get("page"))
|
|
||||||
limit, _ := strconv.Atoi(r.URL.Query().Get("limit"))
|
|
||||||
if page < 1 {
|
|
||||||
page = 1
|
|
||||||
}
|
|
||||||
if limit < 1 {
|
|
||||||
limit = 10
|
|
||||||
}
|
|
||||||
skip := (page - 1) * limit
|
|
||||||
var records []DataModel
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
options := options.Find().SetSkip(int64(skip)).SetLimit(int64(limit))
|
|
||||||
cursor, err := collection.Find(ctx, bson.M{}, options)
|
|
||||||
if err != nil {
|
|
||||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
defer cursor.Close(ctx)
|
|
||||||
|
|
||||||
for cursor.Next(ctx) {
|
|
||||||
var record DataModel
|
|
||||||
if err := cursor.Decode(&record); err != nil {
|
|
||||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
records = append(records, record)
|
|
||||||
}
|
|
||||||
|
|
||||||
w.Header().Set("Content-Type", "application/json")
|
|
||||||
json.NewEncoder(w).Encode(records)
|
|
||||||
}
|
|
||||||
|
|
||||||
func GetSummary(w http.ResponseWriter, r *http.Request) {
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
totalCount, err := collection.CountDocuments(ctx, bson.M{})
|
|
||||||
if err != nil {
|
|
||||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
statusCount := make(map[string]int64)
|
|
||||||
nsappCount := make(map[string]int64)
|
|
||||||
|
|
||||||
pipeline := []bson.M{
|
|
||||||
{"$group": bson.M{"_id": "$status", "count": bson.M{"$sum": 1}}},
|
|
||||||
}
|
|
||||||
cursor, err := collection.Aggregate(ctx, pipeline)
|
|
||||||
if err == nil {
|
|
||||||
for cursor.Next(ctx) {
|
|
||||||
var result struct {
|
|
||||||
ID string `bson:"_id"`
|
|
||||||
Count int64 `bson:"count"`
|
|
||||||
}
|
|
||||||
if err := cursor.Decode(&result); err == nil {
|
|
||||||
statusCount[result.ID] = result.Count
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pipeline = []bson.M{
|
|
||||||
{"$group": bson.M{"_id": "$nsapp", "count": bson.M{"$sum": 1}}},
|
|
||||||
}
|
|
||||||
cursor, err = collection.Aggregate(ctx, pipeline)
|
|
||||||
if err == nil {
|
|
||||||
for cursor.Next(ctx) {
|
|
||||||
var result struct {
|
|
||||||
ID string `bson:"_id"`
|
|
||||||
Count int64 `bson:"count"`
|
|
||||||
}
|
|
||||||
if err := cursor.Decode(&result); err == nil {
|
|
||||||
nsappCount[result.ID] = result.Count
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
response := CountResponse{
|
|
||||||
TotalEntries: totalCount,
|
|
||||||
StatusCount: statusCount,
|
|
||||||
NSAPPCount: nsappCount,
|
|
||||||
}
|
|
||||||
|
|
||||||
w.Header().Set("Content-Type", "application/json")
|
|
||||||
json.NewEncoder(w).Encode(response)
|
|
||||||
}
|
|
||||||
|
|
||||||
func GetByNsapp(w http.ResponseWriter, r *http.Request) {
|
|
||||||
nsapp := r.URL.Query().Get("nsapp")
|
|
||||||
var records []DataModel
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
cursor, err := collection.Find(ctx, bson.M{"nsapp": nsapp})
|
|
||||||
if err != nil {
|
|
||||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
defer cursor.Close(ctx)
|
|
||||||
|
|
||||||
for cursor.Next(ctx) {
|
|
||||||
var record DataModel
|
|
||||||
if err := cursor.Decode(&record); err != nil {
|
|
||||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
records = append(records, record)
|
|
||||||
}
|
|
||||||
|
|
||||||
w.Header().Set("Content-Type", "application/json")
|
|
||||||
json.NewEncoder(w).Encode(records)
|
|
||||||
}
|
|
||||||
|
|
||||||
func GetByDateRange(w http.ResponseWriter, r *http.Request) {
|
|
||||||
|
|
||||||
startDate := r.URL.Query().Get("start_date")
|
|
||||||
endDate := r.URL.Query().Get("end_date")
|
|
||||||
|
|
||||||
if startDate == "" || endDate == "" {
|
|
||||||
http.Error(w, "Both start_date and end_date are required", http.StatusBadRequest)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
start, err := time.Parse("2006-01-02T15:04:05.999999+00:00", startDate+"T00:00:00+00:00")
|
|
||||||
if err != nil {
|
|
||||||
http.Error(w, "Invalid start_date format", http.StatusBadRequest)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
end, err := time.Parse("2006-01-02T15:04:05.999999+00:00", endDate+"T23:59:59+00:00")
|
|
||||||
if err != nil {
|
|
||||||
http.Error(w, "Invalid end_date format", http.StatusBadRequest)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
var records []DataModel
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
cursor, err := collection.Find(ctx, bson.M{
|
|
||||||
"created_at": bson.M{
|
|
||||||
"$gte": start,
|
|
||||||
"$lte": end,
|
|
||||||
},
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
defer cursor.Close(ctx)
|
|
||||||
|
|
||||||
for cursor.Next(ctx) {
|
|
||||||
var record DataModel
|
|
||||||
if err := cursor.Decode(&record); err != nil {
|
|
||||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
records = append(records, record)
|
|
||||||
}
|
|
||||||
|
|
||||||
w.Header().Set("Content-Type", "application/json")
|
|
||||||
json.NewEncoder(w).Encode(records)
|
|
||||||
}
|
|
||||||
func GetByStatus(w http.ResponseWriter, r *http.Request) {
|
|
||||||
status := r.URL.Query().Get("status")
|
|
||||||
var records []DataModel
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
cursor, err := collection.Find(ctx, bson.M{"status": status})
|
|
||||||
if err != nil {
|
|
||||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
defer cursor.Close(ctx)
|
|
||||||
|
|
||||||
for cursor.Next(ctx) {
|
|
||||||
var record DataModel
|
|
||||||
if err := cursor.Decode(&record); err != nil {
|
|
||||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
records = append(records, record)
|
|
||||||
}
|
|
||||||
|
|
||||||
w.Header().Set("Content-Type", "application/json")
|
|
||||||
json.NewEncoder(w).Encode(records)
|
|
||||||
}
|
|
||||||
|
|
||||||
func GetByOS(w http.ResponseWriter, r *http.Request) {
|
|
||||||
osType := r.URL.Query().Get("os_type")
|
|
||||||
osVersion := r.URL.Query().Get("os_version")
|
|
||||||
var records []DataModel
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
cursor, err := collection.Find(ctx, bson.M{"os_type": osType, "os_version": osVersion})
|
|
||||||
if err != nil {
|
|
||||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
defer cursor.Close(ctx)
|
|
||||||
|
|
||||||
for cursor.Next(ctx) {
|
|
||||||
var record DataModel
|
|
||||||
if err := cursor.Decode(&record); err != nil {
|
|
||||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
records = append(records, record)
|
|
||||||
}
|
|
||||||
|
|
||||||
w.Header().Set("Content-Type", "application/json")
|
|
||||||
json.NewEncoder(w).Encode(records)
|
|
||||||
}
|
|
||||||
|
|
||||||
func GetErrors(w http.ResponseWriter, r *http.Request) {
|
|
||||||
errorCount := make(map[string]int)
|
|
||||||
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
cursor, err := collection.Find(ctx, bson.M{"error": bson.M{"$ne": ""}})
|
|
||||||
if err != nil {
|
|
||||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
defer cursor.Close(ctx)
|
|
||||||
|
|
||||||
for cursor.Next(ctx) {
|
|
||||||
var record DataModel
|
|
||||||
if err := cursor.Decode(&record); err != nil {
|
|
||||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if record.ERROR != "" {
|
|
||||||
errorCount[record.ERROR]++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type ErrorCountResponse struct {
|
|
||||||
Error string `json:"error"`
|
|
||||||
Count int `json:"count"`
|
|
||||||
}
|
|
||||||
|
|
||||||
var errorCounts []ErrorCountResponse
|
|
||||||
for err, count := range errorCount {
|
|
||||||
errorCounts = append(errorCounts, ErrorCountResponse{
|
|
||||||
Error: err,
|
|
||||||
Count: count,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
w.Header().Set("Content-Type", "application/json")
|
|
||||||
json.NewEncoder(w).Encode(struct {
|
|
||||||
ErrorCounts []ErrorCountResponse `json:"error_counts"`
|
|
||||||
}{
|
|
||||||
ErrorCounts: errorCounts,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
ConnectDatabase()
|
|
||||||
|
|
||||||
router := mux.NewRouter()
|
|
||||||
router.HandleFunc("/upload", UploadJSON).Methods("POST")
|
|
||||||
router.HandleFunc("/upload/updatestatus", UpdateStatus).Methods("POST")
|
|
||||||
router.HandleFunc("/data/json", GetDataJSON).Methods("GET")
|
|
||||||
router.HandleFunc("/data/paginated", GetPaginatedData).Methods("GET")
|
|
||||||
router.HandleFunc("/data/summary", GetSummary).Methods("GET")
|
|
||||||
router.HandleFunc("/data/nsapp", GetByNsapp).Methods("GET")
|
|
||||||
router.HandleFunc("/data/date", GetByDateRange).Methods("GET")
|
|
||||||
router.HandleFunc("/data/status", GetByStatus).Methods("GET")
|
|
||||||
router.HandleFunc("/data/os", GetByOS).Methods("GET")
|
|
||||||
router.HandleFunc("/data/errors", GetErrors).Methods("GET")
|
|
||||||
|
|
||||||
c := cors.New(cors.Options{
|
|
||||||
AllowedOrigins: []string{"*"},
|
|
||||||
AllowedMethods: []string{"GET", "POST"},
|
|
||||||
AllowedHeaders: []string{"Content-Type", "Authorization"},
|
|
||||||
AllowCredentials: true,
|
|
||||||
})
|
|
||||||
|
|
||||||
handler := c.Handler(router)
|
|
||||||
|
|
||||||
fmt.Println("Server running on port 8080")
|
|
||||||
log.Fatal(http.ListenAndServe(":8080", handler))
|
|
||||||
}
|
|
||||||
@@ -9,7 +9,7 @@ APP="Alpine-Grafana"
|
|||||||
var_tags="${var_tags:-alpine;monitoring}"
|
var_tags="${var_tags:-alpine;monitoring}"
|
||||||
var_cpu="${var_cpu:-1}"
|
var_cpu="${var_cpu:-1}"
|
||||||
var_ram="${var_ram:-256}"
|
var_ram="${var_ram:-256}"
|
||||||
var_disk="${var_disk:-1}"
|
var_disk="${var_disk:-2}"
|
||||||
var_os="${var_os:-alpine}"
|
var_os="${var_os:-alpine}"
|
||||||
var_version="${var_version:-3.23}"
|
var_version="${var_version:-3.23}"
|
||||||
var_unprivileged="${var_unprivileged:-1}"
|
var_unprivileged="${var_unprivileged:-1}"
|
||||||
|
|||||||
@@ -28,6 +28,7 @@ function update_script() {
|
|||||||
exit
|
exit
|
||||||
fi
|
fi
|
||||||
msg_info "Updating Deluge"
|
msg_info "Updating Deluge"
|
||||||
|
ensure_dependencies python3-setuptools
|
||||||
$STD apt update
|
$STD apt update
|
||||||
$STD pip3 install deluge[all] --upgrade
|
$STD pip3 install deluge[all] --upgrade
|
||||||
msg_ok "Updated Deluge"
|
msg_ok "Updated Deluge"
|
||||||
|
|||||||
@@ -104,7 +104,7 @@ function update_script() {
|
|||||||
cd /opt/dispatcharr
|
cd /opt/dispatcharr
|
||||||
rm -rf .venv
|
rm -rf .venv
|
||||||
$STD uv venv --clear
|
$STD uv venv --clear
|
||||||
$STD uv pip install -r requirements.txt --index-strategy unsafe-best-match
|
$STD uv sync
|
||||||
$STD uv pip install gunicorn gevent celery redis daphne
|
$STD uv pip install gunicorn gevent celery redis daphne
|
||||||
msg_ok "Updated Dispatcharr Backend"
|
msg_ok "Updated Dispatcharr Backend"
|
||||||
|
|
||||||
@@ -144,4 +144,4 @@ description
|
|||||||
msg_ok "Completed successfully!\n"
|
msg_ok "Completed successfully!\n"
|
||||||
echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}"
|
echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}"
|
||||||
echo -e "${INFO}${YW} Access it using the following URL:${CL}"
|
echo -e "${INFO}${YW} Access it using the following URL:${CL}"
|
||||||
echo -e "${TAB}${GATEWAY}${BGN}http://${IP}${CL}"
|
echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:9191${CL}"
|
||||||
|
|||||||
@@ -35,13 +35,15 @@ function update_script() {
|
|||||||
msg_ok "Stopped Service"
|
msg_ok "Stopped Service"
|
||||||
|
|
||||||
msg_info "Backing Up Configurations"
|
msg_info "Backing Up Configurations"
|
||||||
mv /opt/donetick/config/selfhosted.yml /opt/donetick/donetick.db /opt
|
mv /opt/donetick/config/selfhosted.yaml /opt/donetick/donetick.db /opt
|
||||||
msg_ok "Backed Up Configurations"
|
msg_ok "Backed Up Configurations"
|
||||||
|
|
||||||
CLEAN_INSTALL=1 fetch_and_deploy_gh_release "donetick" "donetick/donetick" "prebuild" "latest" "/opt/donetick" "donetick_Linux_x86_64.tar.gz"
|
CLEAN_INSTALL=1 fetch_and_deploy_gh_release "donetick" "donetick/donetick" "prebuild" "latest" "/opt/donetick" "donetick_Linux_x86_64.tar.gz"
|
||||||
|
|
||||||
msg_info "Restoring Configurations"
|
msg_info "Restoring Configurations"
|
||||||
mv /opt/selfhosted.yml /opt/donetick/config
|
mv /opt/selfhosted.yaml /opt/donetick/config
|
||||||
|
grep -q 'http://localhost"$' /opt/donetick/config/selfhosted.yaml || sed -i '/https:\/\/localhost"$/a\ - "http://localhost"' /opt/donetick/config/selfhosted.yaml
|
||||||
|
grep -q 'capacitor://localhost' /opt/donetick/config/selfhosted.yaml || sed -i '/http:\/\/localhost"$/a\ - "capacitor://localhost"' /opt/donetick/config/selfhosted.yaml
|
||||||
mv /opt/donetick.db /opt/donetick
|
mv /opt/donetick.db /opt/donetick
|
||||||
msg_ok "Restored Configurations"
|
msg_ok "Restored Configurations"
|
||||||
|
|
||||||
|
|||||||
58
ct/drawio.sh
Normal file
58
ct/drawio.sh
Normal file
@@ -0,0 +1,58 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/build.func)
|
||||||
|
# Copyright (c) 2021-2026 community-scripts ORG
|
||||||
|
# Author: Slaviša Arežina (tremor021)
|
||||||
|
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
|
||||||
|
# Source: https://www.drawio.com/
|
||||||
|
|
||||||
|
APP="DrawIO"
|
||||||
|
var_tags="${var_tags:-diagrams}"
|
||||||
|
var_cpu="${var_cpu:-1}"
|
||||||
|
var_ram="${var_ram:-2048}"
|
||||||
|
var_disk="${var_disk:-4}"
|
||||||
|
var_os="${var_os:-debian}"
|
||||||
|
var_version="${var_version:-13}"
|
||||||
|
var_unprivileged="${var_unprivileged:-1}"
|
||||||
|
|
||||||
|
header_info "$APP"
|
||||||
|
variables
|
||||||
|
color
|
||||||
|
catch_errors
|
||||||
|
|
||||||
|
function update_script() {
|
||||||
|
header_info
|
||||||
|
check_container_storage
|
||||||
|
check_container_resources
|
||||||
|
if [[ ! -f /var/lib/tomcat11/webapps/draw.war ]]; then
|
||||||
|
msg_error "No ${APP} Installation Found!"
|
||||||
|
exit
|
||||||
|
fi
|
||||||
|
|
||||||
|
if check_for_gh_release "drawio" "jgraph/drawio"; then
|
||||||
|
msg_info "Stopping service"
|
||||||
|
systemctl stop tomcat11
|
||||||
|
msg_ok "Service stopped"
|
||||||
|
|
||||||
|
msg_info "Updating Debian LXC"
|
||||||
|
$STD apt update
|
||||||
|
$STD apt upgrade -y
|
||||||
|
msg_ok "Updated Debian LXC"
|
||||||
|
|
||||||
|
USE_ORIGINAL_FILENAME=true fetch_and_deploy_gh_release "drawio" "jgraph/drawio" "singlefile" "latest" "/var/lib/tomcat11/webapps" "draw.war"
|
||||||
|
|
||||||
|
msg_info "Starting service"
|
||||||
|
systemctl start tomcat11
|
||||||
|
msg_ok "Service started"
|
||||||
|
msg_ok "Updated successfully!"
|
||||||
|
fi
|
||||||
|
exit
|
||||||
|
}
|
||||||
|
|
||||||
|
start
|
||||||
|
build_container
|
||||||
|
description
|
||||||
|
|
||||||
|
msg_ok "Completed Successfully!\n"
|
||||||
|
echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}"
|
||||||
|
echo -e "${INFO}${YW} Access it using the following URL:${CL}"
|
||||||
|
echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:8080/draw${CL}"
|
||||||
@@ -9,7 +9,7 @@ APP="EMQX"
|
|||||||
var_tags="${var_tags:-mqtt}"
|
var_tags="${var_tags:-mqtt}"
|
||||||
var_cpu="${var_cpu:-2}"
|
var_cpu="${var_cpu:-2}"
|
||||||
var_ram="${var_ram:-1024}"
|
var_ram="${var_ram:-1024}"
|
||||||
var_disk="${var_disk:-4}"
|
var_disk="${var_disk:-6}"
|
||||||
var_os="${var_os:-debian}"
|
var_os="${var_os:-debian}"
|
||||||
var_version="${var_version:-13}"
|
var_version="${var_version:-13}"
|
||||||
var_unprivileged="${var_unprivileged:-1}"
|
var_unprivileged="${var_unprivileged:-1}"
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ APP="Grafana"
|
|||||||
var_tags="${var_tags:-monitoring;visualization}"
|
var_tags="${var_tags:-monitoring;visualization}"
|
||||||
var_cpu="${var_cpu:-1}"
|
var_cpu="${var_cpu:-1}"
|
||||||
var_ram="${var_ram:-512}"
|
var_ram="${var_ram:-512}"
|
||||||
var_disk="${var_disk:-2}"
|
var_disk="${var_disk:-4}"
|
||||||
var_os="${var_os:-debian}"
|
var_os="${var_os:-debian}"
|
||||||
var_version="${var_version:-13}"
|
var_version="${var_version:-13}"
|
||||||
var_unprivileged="${var_unprivileged:-1}"
|
var_unprivileged="${var_unprivileged:-1}"
|
||||||
|
|||||||
6
ct/headers/drawio
Normal file
6
ct/headers/drawio
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
____ ________
|
||||||
|
/ __ \_________ __ __/ _/ __ \
|
||||||
|
/ / / / ___/ __ `/ | /| / // // / / /
|
||||||
|
/ /_/ / / / /_/ /| |/ |/ // // /_/ /
|
||||||
|
/_____/_/ \__,_/ |__/|__/___/\____/
|
||||||
|
|
||||||
@@ -105,7 +105,7 @@ EOF
|
|||||||
msg_ok "Image-processing libraries up to date"
|
msg_ok "Image-processing libraries up to date"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
RELEASE="2.5.5"
|
RELEASE="2.5.6"
|
||||||
if check_for_gh_release "Immich" "immich-app/immich" "${RELEASE}"; then
|
if check_for_gh_release "Immich" "immich-app/immich" "${RELEASE}"; then
|
||||||
if [[ $(cat ~/.immich) > "2.5.1" ]]; then
|
if [[ $(cat ~/.immich) > "2.5.1" ]]; then
|
||||||
msg_info "Enabling Maintenance Mode"
|
msg_info "Enabling Maintenance Mode"
|
||||||
|
|||||||
@@ -46,7 +46,7 @@ function update_script() {
|
|||||||
msg_info "Restoring configuration & data"
|
msg_info "Restoring configuration & data"
|
||||||
mv /opt/app.env /opt/jotty/.env
|
mv /opt/app.env /opt/jotty/.env
|
||||||
[[ -d /opt/data ]] && mv /opt/data /opt/jotty/data
|
[[ -d /opt/data ]] && mv /opt/data /opt/jotty/data
|
||||||
[[ -d /opt/jotty/config ]] && mv /opt/config/* /opt/jotty/config
|
[[ -d /opt/jotty/config ]] && cp -a /opt/config/* /opt/jotty/config && rm -rf /opt/config
|
||||||
msg_ok "Restored configuration & data"
|
msg_ok "Restored configuration & data"
|
||||||
|
|
||||||
msg_info "Starting Service"
|
msg_info "Starting Service"
|
||||||
|
|||||||
11
ct/kasm.sh
11
ct/kasm.sh
@@ -34,10 +34,19 @@ function update_script() {
|
|||||||
CURRENT_VERSION=$(readlink -f /opt/kasm/current | awk -F'/' '{print $4}')
|
CURRENT_VERSION=$(readlink -f /opt/kasm/current | awk -F'/' '{print $4}')
|
||||||
KASM_URL=$(curl -fsSL "https://www.kasm.com/downloads" | tr '\n' ' ' | grep -oE 'https://kasm-static-content[^"]*kasm_release_[0-9]+\.[0-9]+\.[0-9]+\.[a-z0-9]+\.tar\.gz' | head -n 1)
|
KASM_URL=$(curl -fsSL "https://www.kasm.com/downloads" | tr '\n' ' ' | grep -oE 'https://kasm-static-content[^"]*kasm_release_[0-9]+\.[0-9]+\.[0-9]+\.[a-z0-9]+\.tar\.gz' | head -n 1)
|
||||||
if [[ -z "$KASM_URL" ]]; then
|
if [[ -z "$KASM_URL" ]]; then
|
||||||
|
SERVICE_IMAGE_URL=$(curl -fsSL "https://www.kasm.com/downloads" | tr '\n' ' ' | grep -oE 'https://kasm-static-content[^"]*kasm_release_service_images_amd64_[0-9]+\.[0-9]+\.[0-9]+\.tar\.gz' | head -n 1)
|
||||||
|
if [[ -n "$SERVICE_IMAGE_URL" ]]; then
|
||||||
|
KASM_VERSION=$(echo "$SERVICE_IMAGE_URL" | sed -E 's/.*kasm_release_service_images_amd64_([0-9]+\.[0-9]+\.[0-9]+).*/\1/')
|
||||||
|
KASM_URL="https://kasm-static-content.s3.amazonaws.com/kasm_release_${KASM_VERSION}.tar.gz"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
KASM_VERSION=$(echo "$KASM_URL" | sed -E 's/.*kasm_release_([0-9]+\.[0-9]+\.[0-9]+).*/\1/')
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ -z "$KASM_URL" ]] || [[ -z "$KASM_VERSION" ]]; then
|
||||||
msg_error "Unable to detect latest Kasm release URL."
|
msg_error "Unable to detect latest Kasm release URL."
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
KASM_VERSION=$(echo "$KASM_URL" | sed -E 's/.*kasm_release_([0-9]+\.[0-9]+\.[0-9]+).*/\1/')
|
|
||||||
msg_info "Checked for new version"
|
msg_info "Checked for new version"
|
||||||
|
|
||||||
msg_info "Removing outdated docker-compose plugin"
|
msg_info "Removing outdated docker-compose plugin"
|
||||||
|
|||||||
@@ -30,7 +30,7 @@ function update_script() {
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
RELEASE="v5.0.2"
|
RELEASE="v5.0.2"
|
||||||
if check_for_gh_release "opencloud" "opencloud-eu/opencloud" "${RELEASE}"; then
|
if check_for_gh_release "OpenCloud" "opencloud-eu/opencloud" "${RELEASE}"; then
|
||||||
msg_info "Stopping services"
|
msg_info "Stopping services"
|
||||||
systemctl stop opencloud opencloud-wopi
|
systemctl stop opencloud opencloud-wopi
|
||||||
msg_ok "Stopped services"
|
msg_ok "Stopped services"
|
||||||
@@ -38,9 +38,21 @@ function update_script() {
|
|||||||
msg_info "Updating packages"
|
msg_info "Updating packages"
|
||||||
$STD apt-get update
|
$STD apt-get update
|
||||||
$STD apt-get dist-upgrade -y
|
$STD apt-get dist-upgrade -y
|
||||||
|
ensure_dependencies "inotify-tools"
|
||||||
msg_ok "Updated packages"
|
msg_ok "Updated packages"
|
||||||
|
|
||||||
CLEAN_INSTALL=1 fetch_and_deploy_gh_release "opencloud" "opencloud-eu/opencloud" "singlefile" "${RELEASE}" "/usr/bin" "opencloud-*-linux-amd64"
|
CLEAN_INSTALL=1 fetch_and_deploy_gh_release "OpenCloud" "opencloud-eu/opencloud" "singlefile" "${RELEASE}" "/usr/bin" "opencloud-*-linux-amd64"
|
||||||
|
|
||||||
|
if ! grep -q 'POSIX_WATCH' /etc/opencloud/opencloud.env; then
|
||||||
|
sed -i '/^## External/i ## Uncomment below to enable PosixFS Collaborative Mode\
|
||||||
|
## Increase inotify watch/instance limits on your PVE host:\
|
||||||
|
### sysctl -w fs.inotify.max_user_watches=1048576\
|
||||||
|
### sysctl -w fs.inotify.max_user_instances=1024\
|
||||||
|
# STORAGE_USERS_POSIX_ENABLE_COLLABORATION=true\
|
||||||
|
# STORAGE_USERS_POSIX_WATCH_TYPE=inotifywait\
|
||||||
|
# STORAGE_USERS_POSIX_WATCH_FS=true\
|
||||||
|
# STORAGE_USERS_POSIX_WATCH_PATH=<path-to-storage-or-bind-mount>' /etc/opencloud/opencloud.env
|
||||||
|
fi
|
||||||
|
|
||||||
msg_info "Starting services"
|
msg_info "Starting services"
|
||||||
systemctl start opencloud opencloud-wopi
|
systemctl start opencloud opencloud-wopi
|
||||||
|
|||||||
@@ -44,7 +44,7 @@ function update_script() {
|
|||||||
|
|
||||||
msg_info "Installing uv-based Open-WebUI"
|
msg_info "Installing uv-based Open-WebUI"
|
||||||
PYTHON_VERSION="3.12" setup_uv
|
PYTHON_VERSION="3.12" setup_uv
|
||||||
$STD uv tool install --python 3.12 open-webui[all]
|
$STD uv tool install --python 3.12 --constraint <(echo "numba>=0.60") open-webui[all]
|
||||||
msg_ok "Installed uv-based Open-WebUI"
|
msg_ok "Installed uv-based Open-WebUI"
|
||||||
|
|
||||||
msg_info "Restoring data"
|
msg_info "Restoring data"
|
||||||
@@ -126,7 +126,7 @@ EOF
|
|||||||
|
|
||||||
msg_info "Updating Open WebUI via uv"
|
msg_info "Updating Open WebUI via uv"
|
||||||
PYTHON_VERSION="3.12" setup_uv
|
PYTHON_VERSION="3.12" setup_uv
|
||||||
$STD uv tool upgrade --python 3.12 open-webui[all]
|
$STD uv tool install --force --python 3.12 --constraint <(echo "numba>=0.60") open-webui[all]
|
||||||
systemctl restart open-webui
|
systemctl restart open-webui
|
||||||
msg_ok "Updated Open WebUI"
|
msg_ok "Updated Open WebUI"
|
||||||
msg_ok "Updated successfully!"
|
msg_ok "Updated successfully!"
|
||||||
|
|||||||
@@ -51,7 +51,7 @@ function update_script() {
|
|||||||
$STD npm run db:generate
|
$STD npm run db:generate
|
||||||
$STD npm run build
|
$STD npm run build
|
||||||
$STD npm run build:cli
|
$STD npm run build:cli
|
||||||
$STD npm run db:sqlite:push
|
$STD npm run db:push
|
||||||
cp -R .next/standalone ./
|
cp -R .next/standalone ./
|
||||||
chmod +x ./dist/cli.mjs
|
chmod +x ./dist/cli.mjs
|
||||||
cp server/db/names.json ./dist/names.json
|
cp server/db/names.json ./dist/names.json
|
||||||
|
|||||||
@@ -61,6 +61,12 @@ function update_script() {
|
|||||||
rm -rf "$BK"
|
rm -rf "$BK"
|
||||||
msg_ok "Restored data"
|
msg_ok "Restored data"
|
||||||
|
|
||||||
|
msg_ok "Migrate Database"
|
||||||
|
cd /opt/planka
|
||||||
|
$STD npm run db:upgrade
|
||||||
|
$STD npm run db:migrate
|
||||||
|
msg_ok "Migrated Database"
|
||||||
|
|
||||||
msg_info "Starting Service"
|
msg_info "Starting Service"
|
||||||
systemctl start planka
|
systemctl start planka
|
||||||
msg_ok "Started Service"
|
msg_ok "Started Service"
|
||||||
|
|||||||
@@ -28,16 +28,55 @@ function update_script() {
|
|||||||
exit
|
exit
|
||||||
fi
|
fi
|
||||||
|
|
||||||
msg_info "Updating ${APP}"
|
if check_for_gh_release "Radicale" "Kozea/Radicale"; then
|
||||||
$STD python3 -m venv /opt/radicale
|
msg_info "Stopping service"
|
||||||
source /opt/radicale/bin/activate
|
systemctl stop radicale
|
||||||
$STD python3 -m pip install --upgrade https://github.com/Kozea/Radicale/archive/master.tar.gz
|
msg_ok "Stopped service"
|
||||||
msg_ok "Updated ${APP}"
|
|
||||||
|
|
||||||
msg_info "Starting Service"
|
msg_info "Backing up users file"
|
||||||
systemctl enable -q --now radicale
|
cp /opt/radicale/users /opt/radicale_users_backup
|
||||||
msg_ok "Started Service"
|
msg_ok "Backed up users file"
|
||||||
msg_ok "Updated successfully!"
|
|
||||||
|
PYTHON_VERSION="3.13" setup_uv
|
||||||
|
CLEAN_INSTALL=1 fetch_and_deploy_gh_release "Radicale" "Kozea/Radicale" "tarball" "latest" "/opt/radicale"
|
||||||
|
|
||||||
|
msg_info "Restoring users file"
|
||||||
|
rm -f /opt/radicale/users
|
||||||
|
mv /opt/radicale_users_backup /opt/radicale/users
|
||||||
|
msg_ok "Restored users file"
|
||||||
|
|
||||||
|
if grep -q 'start.sh' /etc/systemd/system/radicale.service; then
|
||||||
|
sed -i -e '/^Description/i[Unit]' \
|
||||||
|
-e '\|^ExecStart|iWorkingDirectory=/opt/radicale' \
|
||||||
|
-e 's|^ExecStart=.*|ExecStart=/usr/local/bin/uv run -m radicale --config /etc/radicale/config|' /etc/systemd/system/radicale.service
|
||||||
|
systemctl daemon-reload
|
||||||
|
fi
|
||||||
|
if [[ ! -f /etc/radicale/config ]]; then
|
||||||
|
msg_info "Migrating to config file (/etc/radicale/config)"
|
||||||
|
mkdir -p /etc/radicale
|
||||||
|
cat <<EOF >/etc/radicale/config
|
||||||
|
[server]
|
||||||
|
hosts = 0.0.0.0:5232
|
||||||
|
|
||||||
|
[auth]
|
||||||
|
type = htpasswd
|
||||||
|
htpasswd_filename = /opt/radicale/users
|
||||||
|
htpasswd_encryption = sha512
|
||||||
|
|
||||||
|
[storage]
|
||||||
|
type = multifilesystem
|
||||||
|
filesystem_folder = /var/lib/radicale/collections
|
||||||
|
|
||||||
|
[web]
|
||||||
|
type = internal
|
||||||
|
EOF
|
||||||
|
msg_ok "Migrated to config (/etc/radicale/config)"
|
||||||
|
fi
|
||||||
|
msg_info "Starting service"
|
||||||
|
systemctl start radicale
|
||||||
|
msg_ok "Started service"
|
||||||
|
msg_ok "Updated Successfully!"
|
||||||
|
fi
|
||||||
exit
|
exit
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
89
ct/slskd.sh
89
ct/slskd.sh
@@ -3,7 +3,7 @@ source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxV
|
|||||||
# Copyright (c) 2021-2026 community-scripts ORG
|
# Copyright (c) 2021-2026 community-scripts ORG
|
||||||
# Author: vhsdream
|
# Author: vhsdream
|
||||||
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
|
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
|
||||||
# Source: https://github.com/slskd/slskd, https://soularr.net
|
# Source: https://github.com/slskd/slskd, https://github.com/mrusse/soularr
|
||||||
|
|
||||||
APP="slskd"
|
APP="slskd"
|
||||||
var_tags="${var_tags:-arr;p2p}"
|
var_tags="${var_tags:-arr;p2p}"
|
||||||
@@ -24,50 +24,65 @@ function update_script() {
|
|||||||
check_container_storage
|
check_container_storage
|
||||||
check_container_resources
|
check_container_resources
|
||||||
|
|
||||||
if [[ ! -d /opt/slskd ]] || [[ ! -d /opt/soularr ]]; then
|
if [[ ! -d /opt/slskd ]]; then
|
||||||
msg_error "No ${APP} Installation Found!"
|
msg_error "No Slskd Installation Found!"
|
||||||
exit
|
exit
|
||||||
fi
|
fi
|
||||||
|
|
||||||
RELEASE=$(curl -s https://api.github.com/repos/slskd/slskd/releases/latest | grep "tag_name" | awk '{print substr($2, 2, length($2)-3) }')
|
if check_for_gh_release "Slskd" "slskd/slskd"; then
|
||||||
if [[ "${RELEASE}" != "$(cat /opt/${APP}_version.txt)" ]] || [[ ! -f /opt/${APP}_version.txt ]]; then
|
msg_info "Stopping Service(s)"
|
||||||
msg_info "Stopping Service"
|
systemctl stop slskd
|
||||||
systemctl stop slskd soularr.timer soularr.service
|
[[ -f /etc/systemd/system/soularr.service ]] && systemctl stop soularr.timer soularr.service
|
||||||
msg_info "Stopped Service"
|
msg_ok "Stopped Service(s)"
|
||||||
|
|
||||||
msg_info "Updating $APP to v${RELEASE}"
|
msg_info "Backing up config"
|
||||||
tmp_file=$(mktemp)
|
cp /opt/slskd/config/slskd.yml /opt/slskd.yml.bak
|
||||||
curl -fsSL "https://github.com/slskd/slskd/releases/download/${RELEASE}/slskd-${RELEASE}-linux-x64.zip" -o $tmp_file
|
msg_ok "Backed up config"
|
||||||
$STD unzip -oj $tmp_file slskd -d /opt/${APP}
|
|
||||||
echo "${RELEASE}" >/opt/${APP}_version.txt
|
|
||||||
msg_ok "Updated $APP to v${RELEASE}"
|
|
||||||
|
|
||||||
msg_info "Starting Service"
|
CLEAN_INSTALL=1 fetch_and_deploy_gh_release "Slskd" "slskd/slskd" "prebuild" "latest" "/opt/slskd" "slskd-*-linux-x64.zip"
|
||||||
|
|
||||||
|
msg_info "Restoring config"
|
||||||
|
mv /opt/slskd.yml.bak /opt/slskd/config/slskd.yml
|
||||||
|
msg_ok "Restored config"
|
||||||
|
|
||||||
|
msg_info "Starting Service(s)"
|
||||||
systemctl start slskd
|
systemctl start slskd
|
||||||
msg_ok "Started Service"
|
[[ -f /etc/systemd/system/soularr.service ]] && systemctl start soularr.timer
|
||||||
rm -rf $tmp_file
|
msg_ok "Started Service(s)"
|
||||||
else
|
msg_ok "Updated Slskd successfully!"
|
||||||
msg_ok "No ${APP} update required. ${APP} is already at v${RELEASE}"
|
|
||||||
fi
|
fi
|
||||||
msg_info "Updating Soularr"
|
[[ -d /opt/soularr ]] && if check_for_gh_release "Soularr" "mrusse/soularr"; then
|
||||||
cp /opt/soularr/config.ini /opt/config.ini.bak
|
if systemctl is-active soularr.timer >/dev/null; then
|
||||||
cp /opt/soularr/run.sh /opt/run.sh.bak
|
msg_info "Stopping Timer and Service"
|
||||||
cd /tmp
|
systemctl stop soularr.timer soularr.service
|
||||||
rm -rf /opt/soularr
|
msg_ok "Stopped Timer and Service"
|
||||||
curl -fsSL -o main.zip https://github.com/mrusse/soularr/archive/refs/heads/main.zip
|
fi
|
||||||
$STD unzip main.zip
|
|
||||||
mv soularr-main /opt/soularr
|
|
||||||
cd /opt/soularr
|
|
||||||
$STD pip install -r requirements.txt
|
|
||||||
mv /opt/config.ini.bak /opt/soularr/config.ini
|
|
||||||
mv /opt/run.sh.bak /opt/soularr/run.sh
|
|
||||||
rm -rf /tmp/main.zip
|
|
||||||
msg_ok "Updated soularr"
|
|
||||||
|
|
||||||
msg_info "Starting soularr timer"
|
msg_info "Backing up Soularr config"
|
||||||
systemctl start soularr.timer
|
cp /opt/soularr/config.ini /opt/soularr_config.ini.bak
|
||||||
msg_ok "Started soularr timer"
|
cp /opt/soularr/run.sh /opt/soularr_run.sh.bak
|
||||||
exit
|
msg_ok "Backed up Soularr config"
|
||||||
|
|
||||||
|
PYTHON_VERSION="3.11" setup_uv
|
||||||
|
CLEAN_INSTALL=1 fetch_and_deploy_gh_release "Soularr" "mrusse/soularr" "tarball" "latest" "/opt/soularr"
|
||||||
|
msg_info "Updating Soularr"
|
||||||
|
cd /opt/soularr
|
||||||
|
$STD uv venv -c venv
|
||||||
|
$STD source venv/bin/activate
|
||||||
|
$STD uv pip install -r requirements.txt
|
||||||
|
deactivate
|
||||||
|
msg_ok "Updated Soularr"
|
||||||
|
|
||||||
|
msg_info "Restoring Soularr config"
|
||||||
|
mv /opt/soularr_config.ini.bak /opt/soularr/config.ini
|
||||||
|
mv /opt/soularr_run.sh.bak /opt/soularr/run.sh
|
||||||
|
msg_ok "Restored Soularr config"
|
||||||
|
|
||||||
|
msg_info "Starting Soularr Timer"
|
||||||
|
systemctl restart soularr.timer
|
||||||
|
msg_ok "Started Soularr Timer"
|
||||||
|
msg_ok "Updated Soularr successfully!"
|
||||||
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
start
|
start
|
||||||
|
|||||||
@@ -33,7 +33,15 @@ function update_script() {
|
|||||||
systemctl stop snowshare
|
systemctl stop snowshare
|
||||||
msg_ok "Stopped Service"
|
msg_ok "Stopped Service"
|
||||||
|
|
||||||
fetch_and_deploy_gh_release "snowshare" "TuroYT/snowshare" "tarball"
|
msg_info "Backing up uploads"
|
||||||
|
[ -d /opt/snowshare/uploads ] && cp -a /opt/snowshare/uploads /opt/.snowshare_uploads_backup
|
||||||
|
msg_ok "Uploads backed up"
|
||||||
|
|
||||||
|
CLEAN_INSTALL=1 fetch_and_deploy_gh_release "snowshare" "TuroYT/snowshare" "tarball"
|
||||||
|
|
||||||
|
msg_info "Restoring uploads"
|
||||||
|
[ -d /opt/.snowshare_uploads_backup ] && rm -rf /opt/snowshare/uploads && cp -a /opt/.snowshare_uploads_backup /opt/snowshare/uploads
|
||||||
|
msg_ok "Uploads restored"
|
||||||
|
|
||||||
msg_info "Updating Snowshare"
|
msg_info "Updating Snowshare"
|
||||||
cd /opt/snowshare
|
cd /opt/snowshare
|
||||||
|
|||||||
35
frontend/public/json/drawio.json
Normal file
35
frontend/public/json/drawio.json
Normal file
@@ -0,0 +1,35 @@
|
|||||||
|
{
|
||||||
|
"name": "Draw.IO",
|
||||||
|
"slug": "drawio",
|
||||||
|
"categories": [
|
||||||
|
12
|
||||||
|
],
|
||||||
|
"date_created": "2026-02-11",
|
||||||
|
"type": "ct",
|
||||||
|
"updateable": true,
|
||||||
|
"privileged": false,
|
||||||
|
"interface_port": 8080,
|
||||||
|
"documentation": "https://www.drawio.com/doc/",
|
||||||
|
"website": "https://www.drawio.com/",
|
||||||
|
"logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/draw-io.webp",
|
||||||
|
"config_path": "",
|
||||||
|
"description": "draw.io is a configurable diagramming and whiteboarding application, jointly owned and developed by draw.io Ltd (previously named JGraph) and draw.io AG.",
|
||||||
|
"install_methods": [
|
||||||
|
{
|
||||||
|
"type": "default",
|
||||||
|
"script": "ct/drawio.sh",
|
||||||
|
"resources": {
|
||||||
|
"cpu": 1,
|
||||||
|
"ram": 2048,
|
||||||
|
"hdd": 4,
|
||||||
|
"os": "Debian",
|
||||||
|
"version": "13"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"default_credentials": {
|
||||||
|
"username": null,
|
||||||
|
"password": null
|
||||||
|
},
|
||||||
|
"notes": []
|
||||||
|
}
|
||||||
@@ -21,7 +21,7 @@
|
|||||||
"resources": {
|
"resources": {
|
||||||
"cpu": 2,
|
"cpu": 2,
|
||||||
"ram": 1024,
|
"ram": 1024,
|
||||||
"hdd": 4,
|
"hdd": 6,
|
||||||
"os": "debian",
|
"os": "debian",
|
||||||
"version": "13"
|
"version": "13"
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
{
|
{
|
||||||
"generated": "2026-02-10T12:19:02Z",
|
"generated": "2026-02-13T12:11:36Z",
|
||||||
"versions": [
|
"versions": [
|
||||||
{
|
{
|
||||||
"slug": "2fauth",
|
"slug": "2fauth",
|
||||||
@@ -15,6 +15,13 @@
|
|||||||
"pinned": false,
|
"pinned": false,
|
||||||
"date": "2025-12-08T14:34:55Z"
|
"date": "2025-12-08T14:34:55Z"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"slug": "adguardhome-sync",
|
||||||
|
"repo": "bakito/adguardhome-sync",
|
||||||
|
"version": "v0.8.2",
|
||||||
|
"pinned": false,
|
||||||
|
"date": "2025-10-24T17:13:47Z"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"slug": "adventurelog",
|
"slug": "adventurelog",
|
||||||
"repo": "seanmorley15/adventurelog",
|
"repo": "seanmorley15/adventurelog",
|
||||||
@@ -186,9 +193,9 @@
|
|||||||
{
|
{
|
||||||
"slug": "cleanuparr",
|
"slug": "cleanuparr",
|
||||||
"repo": "Cleanuparr/Cleanuparr",
|
"repo": "Cleanuparr/Cleanuparr",
|
||||||
"version": "v2.5.1",
|
"version": "v2.6.1",
|
||||||
"pinned": false,
|
"pinned": false,
|
||||||
"date": "2026-01-11T00:46:17Z"
|
"date": "2026-02-13T10:00:19Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"slug": "cloudreve",
|
"slug": "cloudreve",
|
||||||
@@ -200,9 +207,9 @@
|
|||||||
{
|
{
|
||||||
"slug": "comfyui",
|
"slug": "comfyui",
|
||||||
"repo": "comfyanonymous/ComfyUI",
|
"repo": "comfyanonymous/ComfyUI",
|
||||||
"version": "v0.12.3",
|
"version": "v0.13.0",
|
||||||
"pinned": false,
|
"pinned": false,
|
||||||
"date": "2026-02-05T07:04:07Z"
|
"date": "2026-02-10T20:27:38Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"slug": "commafeed",
|
"slug": "commafeed",
|
||||||
@@ -235,16 +242,16 @@
|
|||||||
{
|
{
|
||||||
"slug": "cronicle",
|
"slug": "cronicle",
|
||||||
"repo": "jhuckaby/Cronicle",
|
"repo": "jhuckaby/Cronicle",
|
||||||
"version": "v0.9.105",
|
"version": "v0.9.106",
|
||||||
"pinned": false,
|
"pinned": false,
|
||||||
"date": "2026-02-05T18:16:11Z"
|
"date": "2026-02-11T17:11:46Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"slug": "cryptpad",
|
"slug": "cryptpad",
|
||||||
"repo": "cryptpad/cryptpad",
|
"repo": "cryptpad/cryptpad",
|
||||||
"version": "2025.9.0",
|
"version": "2026.2.0",
|
||||||
"pinned": false,
|
"pinned": false,
|
||||||
"date": "2025-10-22T10:06:29Z"
|
"date": "2026-02-11T15:39:05Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"slug": "dawarich",
|
"slug": "dawarich",
|
||||||
@@ -263,9 +270,9 @@
|
|||||||
{
|
{
|
||||||
"slug": "dispatcharr",
|
"slug": "dispatcharr",
|
||||||
"repo": "Dispatcharr/Dispatcharr",
|
"repo": "Dispatcharr/Dispatcharr",
|
||||||
"version": "v0.18.1",
|
"version": "v0.19.0",
|
||||||
"pinned": false,
|
"pinned": false,
|
||||||
"date": "2026-01-27T17:09:11Z"
|
"date": "2026-02-10T21:18:10Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"slug": "docmost",
|
"slug": "docmost",
|
||||||
@@ -277,23 +284,30 @@
|
|||||||
{
|
{
|
||||||
"slug": "domain-locker",
|
"slug": "domain-locker",
|
||||||
"repo": "Lissy93/domain-locker",
|
"repo": "Lissy93/domain-locker",
|
||||||
"version": "v0.1.2",
|
"version": "v0.1.3",
|
||||||
"pinned": false,
|
"pinned": false,
|
||||||
"date": "2025-11-14T22:08:23Z"
|
"date": "2026-02-11T10:03:32Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"slug": "domain-monitor",
|
"slug": "domain-monitor",
|
||||||
"repo": "Hosteroid/domain-monitor",
|
"repo": "Hosteroid/domain-monitor",
|
||||||
"version": "v1.1.2",
|
"version": "v1.1.3",
|
||||||
"pinned": false,
|
"pinned": false,
|
||||||
"date": "2026-02-09T06:29:34Z"
|
"date": "2026-02-11T15:48:18Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"slug": "donetick",
|
"slug": "donetick",
|
||||||
"repo": "donetick/donetick",
|
"repo": "donetick/donetick",
|
||||||
"version": "v0.1.64",
|
"version": "v0.1.73",
|
||||||
"pinned": false,
|
"pinned": false,
|
||||||
"date": "2025-10-03T05:18:24Z"
|
"date": "2026-02-12T23:42:30Z"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"slug": "drawio",
|
||||||
|
"repo": "jgraph/drawio",
|
||||||
|
"version": "v29.3.6",
|
||||||
|
"pinned": false,
|
||||||
|
"date": "2026-01-28T18:25:02Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"slug": "duplicati",
|
"slug": "duplicati",
|
||||||
@@ -319,9 +333,9 @@
|
|||||||
{
|
{
|
||||||
"slug": "endurain",
|
"slug": "endurain",
|
||||||
"repo": "endurain-project/endurain",
|
"repo": "endurain-project/endurain",
|
||||||
"version": "v0.17.3",
|
"version": "v0.17.4",
|
||||||
"pinned": false,
|
"pinned": false,
|
||||||
"date": "2026-01-23T22:02:05Z"
|
"date": "2026-02-11T04:54:22Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"slug": "ersatztv",
|
"slug": "ersatztv",
|
||||||
@@ -389,9 +403,9 @@
|
|||||||
{
|
{
|
||||||
"slug": "ghostfolio",
|
"slug": "ghostfolio",
|
||||||
"repo": "ghostfolio/ghostfolio",
|
"repo": "ghostfolio/ghostfolio",
|
||||||
"version": "2.237.0",
|
"version": "2.238.0",
|
||||||
"pinned": false,
|
"pinned": false,
|
||||||
"date": "2026-02-08T13:59:53Z"
|
"date": "2026-02-12T18:28:55Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"slug": "gitea",
|
"slug": "gitea",
|
||||||
@@ -529,9 +543,16 @@
|
|||||||
{
|
{
|
||||||
"slug": "huntarr",
|
"slug": "huntarr",
|
||||||
"repo": "plexguide/Huntarr.io",
|
"repo": "plexguide/Huntarr.io",
|
||||||
"version": "9.2.3",
|
"version": "9.2.4.1",
|
||||||
"pinned": false,
|
"pinned": false,
|
||||||
"date": "2026-02-07T04:44:20Z"
|
"date": "2026-02-12T22:17:47Z"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"slug": "immich-public-proxy",
|
||||||
|
"repo": "alangrainger/immich-public-proxy",
|
||||||
|
"version": "v1.15.1",
|
||||||
|
"pinned": false,
|
||||||
|
"date": "2026-01-26T08:04:27Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"slug": "inspircd",
|
"slug": "inspircd",
|
||||||
@@ -550,16 +571,23 @@
|
|||||||
{
|
{
|
||||||
"slug": "invoiceninja",
|
"slug": "invoiceninja",
|
||||||
"repo": "invoiceninja/invoiceninja",
|
"repo": "invoiceninja/invoiceninja",
|
||||||
"version": "v5.12.55",
|
"version": "v5.12.59",
|
||||||
"pinned": false,
|
"pinned": false,
|
||||||
"date": "2026-02-05T01:06:15Z"
|
"date": "2026-02-13T02:26:13Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"slug": "jackett",
|
"slug": "jackett",
|
||||||
"repo": "Jackett/Jackett",
|
"repo": "Jackett/Jackett",
|
||||||
"version": "v0.24.1089",
|
"version": "v0.24.1103",
|
||||||
"pinned": false,
|
"pinned": false,
|
||||||
"date": "2026-02-10T05:55:59Z"
|
"date": "2026-02-13T05:53:23Z"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"slug": "jellystat",
|
||||||
|
"repo": "CyferShepard/Jellystat",
|
||||||
|
"version": "V1.1.8",
|
||||||
|
"pinned": false,
|
||||||
|
"date": "2026-02-08T08:15:00Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"slug": "joplin-server",
|
"slug": "joplin-server",
|
||||||
@@ -571,9 +599,9 @@
|
|||||||
{
|
{
|
||||||
"slug": "jotty",
|
"slug": "jotty",
|
||||||
"repo": "fccview/jotty",
|
"repo": "fccview/jotty",
|
||||||
"version": "1.19.1",
|
"version": "1.20.0",
|
||||||
"pinned": false,
|
"pinned": false,
|
||||||
"date": "2026-01-26T21:30:39Z"
|
"date": "2026-02-12T09:23:30Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"slug": "kapowarr",
|
"slug": "kapowarr",
|
||||||
@@ -683,9 +711,9 @@
|
|||||||
{
|
{
|
||||||
"slug": "libretranslate",
|
"slug": "libretranslate",
|
||||||
"repo": "LibreTranslate/LibreTranslate",
|
"repo": "LibreTranslate/LibreTranslate",
|
||||||
"version": "v1.8.4",
|
"version": "v1.9.0",
|
||||||
"pinned": false,
|
"pinned": false,
|
||||||
"date": "2026-02-02T17:45:16Z"
|
"date": "2026-02-10T19:05:48Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"slug": "lidarr",
|
"slug": "lidarr",
|
||||||
@@ -718,9 +746,9 @@
|
|||||||
{
|
{
|
||||||
"slug": "lubelogger",
|
"slug": "lubelogger",
|
||||||
"repo": "hargata/lubelog",
|
"repo": "hargata/lubelog",
|
||||||
"version": "v1.5.9",
|
"version": "v1.6.0",
|
||||||
"pinned": false,
|
"pinned": false,
|
||||||
"date": "2026-02-09T17:36:13Z"
|
"date": "2026-02-10T20:16:32Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"slug": "mafl",
|
"slug": "mafl",
|
||||||
@@ -739,9 +767,9 @@
|
|||||||
{
|
{
|
||||||
"slug": "mail-archiver",
|
"slug": "mail-archiver",
|
||||||
"repo": "s1t5/mail-archiver",
|
"repo": "s1t5/mail-archiver",
|
||||||
"version": "2601.3",
|
"version": "2602.1",
|
||||||
"pinned": false,
|
"pinned": false,
|
||||||
"date": "2026-01-25T12:52:24Z"
|
"date": "2026-02-11T06:23:11Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"slug": "managemydamnlife",
|
"slug": "managemydamnlife",
|
||||||
@@ -767,9 +795,9 @@
|
|||||||
{
|
{
|
||||||
"slug": "mediamanager",
|
"slug": "mediamanager",
|
||||||
"repo": "maxdorninger/MediaManager",
|
"repo": "maxdorninger/MediaManager",
|
||||||
"version": "v1.12.2",
|
"version": "v1.12.3",
|
||||||
"pinned": false,
|
"pinned": false,
|
||||||
"date": "2026-02-08T19:18:29Z"
|
"date": "2026-02-11T16:45:40Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"slug": "mediamtx",
|
"slug": "mediamtx",
|
||||||
@@ -795,9 +823,9 @@
|
|||||||
{
|
{
|
||||||
"slug": "metube",
|
"slug": "metube",
|
||||||
"repo": "alexta69/metube",
|
"repo": "alexta69/metube",
|
||||||
"version": "2026.02.08",
|
"version": "2026.02.12",
|
||||||
"pinned": false,
|
"pinned": false,
|
||||||
"date": "2026-02-08T17:01:37Z"
|
"date": "2026-02-12T21:05:49Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"slug": "miniflux",
|
"slug": "miniflux",
|
||||||
@@ -837,9 +865,9 @@
|
|||||||
{
|
{
|
||||||
"slug": "navidrome",
|
"slug": "navidrome",
|
||||||
"repo": "navidrome/navidrome",
|
"repo": "navidrome/navidrome",
|
||||||
"version": "v0.60.2",
|
"version": "v0.60.3",
|
||||||
"pinned": false,
|
"pinned": false,
|
||||||
"date": "2026-02-07T19:42:33Z"
|
"date": "2026-02-10T23:55:04Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"slug": "netbox",
|
"slug": "netbox",
|
||||||
@@ -848,6 +876,13 @@
|
|||||||
"pinned": false,
|
"pinned": false,
|
||||||
"date": "2026-02-03T13:54:26Z"
|
"date": "2026-02-03T13:54:26Z"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"slug": "nextcloud-exporter",
|
||||||
|
"repo": "xperimental/nextcloud-exporter",
|
||||||
|
"version": "v0.9.0",
|
||||||
|
"pinned": false,
|
||||||
|
"date": "2025-10-12T20:03:10Z"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"slug": "nginx-ui",
|
"slug": "nginx-ui",
|
||||||
"repo": "0xJacky/nginx-ui",
|
"repo": "0xJacky/nginx-ui",
|
||||||
@@ -963,9 +998,9 @@
|
|||||||
{
|
{
|
||||||
"slug": "pangolin",
|
"slug": "pangolin",
|
||||||
"repo": "fosrl/pangolin",
|
"repo": "fosrl/pangolin",
|
||||||
"version": "1.15.2",
|
"version": "1.15.4",
|
||||||
"pinned": false,
|
"pinned": false,
|
||||||
"date": "2026-02-05T19:23:58Z"
|
"date": "2026-02-13T00:54:02Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"slug": "paperless-ai",
|
"slug": "paperless-ai",
|
||||||
@@ -991,9 +1026,9 @@
|
|||||||
{
|
{
|
||||||
"slug": "patchmon",
|
"slug": "patchmon",
|
||||||
"repo": "PatchMon/PatchMon",
|
"repo": "PatchMon/PatchMon",
|
||||||
"version": "v1.3.7",
|
"version": "v1.4.0",
|
||||||
"pinned": false,
|
"pinned": false,
|
||||||
"date": "2025-12-25T11:08:14Z"
|
"date": "2026-02-13T10:39:03Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"slug": "paymenter",
|
"slug": "paymenter",
|
||||||
@@ -1037,12 +1072,19 @@
|
|||||||
"pinned": false,
|
"pinned": false,
|
||||||
"date": "2025-12-01T05:07:31Z"
|
"date": "2025-12-01T05:07:31Z"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"slug": "pihole-exporter",
|
||||||
|
"repo": "eko/pihole-exporter",
|
||||||
|
"version": "v1.2.0",
|
||||||
|
"pinned": false,
|
||||||
|
"date": "2025-07-29T19:15:37Z"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"slug": "planka",
|
"slug": "planka",
|
||||||
"repo": "plankanban/planka",
|
"repo": "plankanban/planka",
|
||||||
"version": "v2.0.0-rc.4",
|
"version": "v2.0.0",
|
||||||
"pinned": false,
|
"pinned": false,
|
||||||
"date": "2025-09-04T12:41:17Z"
|
"date": "2026-02-11T13:50:10Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"slug": "plant-it",
|
"slug": "plant-it",
|
||||||
@@ -1089,9 +1131,9 @@
|
|||||||
{
|
{
|
||||||
"slug": "prometheus-alertmanager",
|
"slug": "prometheus-alertmanager",
|
||||||
"repo": "prometheus/alertmanager",
|
"repo": "prometheus/alertmanager",
|
||||||
"version": "v0.31.0",
|
"version": "v0.31.1",
|
||||||
"pinned": false,
|
"pinned": false,
|
||||||
"date": "2026-02-02T13:34:15Z"
|
"date": "2026-02-11T21:28:26Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"slug": "prometheus-blackbox-exporter",
|
"slug": "prometheus-blackbox-exporter",
|
||||||
@@ -1100,6 +1142,13 @@
|
|||||||
"pinned": false,
|
"pinned": false,
|
||||||
"date": "2025-12-06T13:32:18Z"
|
"date": "2025-12-06T13:32:18Z"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"slug": "prometheus-paperless-ngx-exporter",
|
||||||
|
"repo": "hansmi/prometheus-paperless-exporter",
|
||||||
|
"version": "v0.0.9",
|
||||||
|
"pinned": false,
|
||||||
|
"date": "2025-12-08T20:37:45Z"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"slug": "prowlarr",
|
"slug": "prowlarr",
|
||||||
"repo": "Prowlarr/Prowlarr",
|
"repo": "Prowlarr/Prowlarr",
|
||||||
@@ -1124,9 +1173,9 @@
|
|||||||
{
|
{
|
||||||
"slug": "pulse",
|
"slug": "pulse",
|
||||||
"repo": "rcourtman/Pulse",
|
"repo": "rcourtman/Pulse",
|
||||||
"version": "v5.1.7",
|
"version": "v5.1.9",
|
||||||
"pinned": false,
|
"pinned": false,
|
||||||
"date": "2026-02-10T09:59:55Z"
|
"date": "2026-02-11T15:34:40Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"slug": "pve-scripts-local",
|
"slug": "pve-scripts-local",
|
||||||
@@ -1142,6 +1191,13 @@
|
|||||||
"pinned": false,
|
"pinned": false,
|
||||||
"date": "2025-11-19T23:54:34Z"
|
"date": "2025-11-19T23:54:34Z"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"slug": "qbittorrent-exporter",
|
||||||
|
"repo": "martabal/qbittorrent-exporter",
|
||||||
|
"version": "v1.13.2",
|
||||||
|
"pinned": false,
|
||||||
|
"date": "2025-12-13T22:59:03Z"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"slug": "qdrant",
|
"slug": "qdrant",
|
||||||
"repo": "qdrant/qdrant",
|
"repo": "qdrant/qdrant",
|
||||||
@@ -1163,6 +1219,13 @@
|
|||||||
"pinned": false,
|
"pinned": false,
|
||||||
"date": "2025-11-16T22:39:01Z"
|
"date": "2025-11-16T22:39:01Z"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"slug": "radicale",
|
||||||
|
"repo": "Kozea/Radicale",
|
||||||
|
"version": "v3.6.0",
|
||||||
|
"pinned": false,
|
||||||
|
"date": "2026-01-10T06:56:46Z"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"slug": "rclone",
|
"slug": "rclone",
|
||||||
"repo": "rclone/rclone",
|
"repo": "rclone/rclone",
|
||||||
@@ -1173,9 +1236,9 @@
|
|||||||
{
|
{
|
||||||
"slug": "rdtclient",
|
"slug": "rdtclient",
|
||||||
"repo": "rogerfar/rdt-client",
|
"repo": "rogerfar/rdt-client",
|
||||||
"version": "v2.0.119",
|
"version": "v2.0.120",
|
||||||
"pinned": false,
|
"pinned": false,
|
||||||
"date": "2025-10-13T23:15:11Z"
|
"date": "2026-02-12T02:53:51Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"slug": "reactive-resume",
|
"slug": "reactive-resume",
|
||||||
@@ -1229,16 +1292,16 @@
|
|||||||
{
|
{
|
||||||
"slug": "scanopy",
|
"slug": "scanopy",
|
||||||
"repo": "scanopy/scanopy",
|
"repo": "scanopy/scanopy",
|
||||||
"version": "v0.14.3",
|
"version": "v0.14.4",
|
||||||
"pinned": false,
|
"pinned": false,
|
||||||
"date": "2026-02-04T01:41:01Z"
|
"date": "2026-02-10T03:57:28Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"slug": "scraparr",
|
"slug": "scraparr",
|
||||||
"repo": "thecfu/scraparr",
|
"repo": "thecfu/scraparr",
|
||||||
"version": "v2.2.5",
|
"version": "v3.0.3",
|
||||||
"pinned": false,
|
"pinned": false,
|
||||||
"date": "2025-10-07T12:34:31Z"
|
"date": "2026-02-12T14:20:56Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"slug": "seelf",
|
"slug": "seelf",
|
||||||
@@ -1275,6 +1338,13 @@
|
|||||||
"pinned": false,
|
"pinned": false,
|
||||||
"date": "2026-01-16T12:08:28Z"
|
"date": "2026-01-16T12:08:28Z"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"slug": "slskd",
|
||||||
|
"repo": "slskd/slskd",
|
||||||
|
"version": "0.24.3",
|
||||||
|
"pinned": false,
|
||||||
|
"date": "2026-01-15T14:40:15Z"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"slug": "snipeit",
|
"slug": "snipeit",
|
||||||
"repo": "grokability/snipe-it",
|
"repo": "grokability/snipe-it",
|
||||||
@@ -1285,9 +1355,9 @@
|
|||||||
{
|
{
|
||||||
"slug": "snowshare",
|
"slug": "snowshare",
|
||||||
"repo": "TuroYT/snowshare",
|
"repo": "TuroYT/snowshare",
|
||||||
"version": "v1.3.3",
|
"version": "v1.3.5",
|
||||||
"pinned": false,
|
"pinned": false,
|
||||||
"date": "2026-02-09T10:52:12Z"
|
"date": "2026-02-11T10:24:51Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"slug": "sonarr",
|
"slug": "sonarr",
|
||||||
@@ -1320,9 +1390,9 @@
|
|||||||
{
|
{
|
||||||
"slug": "stirling-pdf",
|
"slug": "stirling-pdf",
|
||||||
"repo": "Stirling-Tools/Stirling-PDF",
|
"repo": "Stirling-Tools/Stirling-PDF",
|
||||||
"version": "v2.4.5",
|
"version": "v2.4.6",
|
||||||
"pinned": false,
|
"pinned": false,
|
||||||
"date": "2026-02-06T23:12:20Z"
|
"date": "2026-02-12T00:01:19Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"slug": "streamlink-webui",
|
"slug": "streamlink-webui",
|
||||||
@@ -1369,9 +1439,9 @@
|
|||||||
{
|
{
|
||||||
"slug": "termix",
|
"slug": "termix",
|
||||||
"repo": "Termix-SSH/Termix",
|
"repo": "Termix-SSH/Termix",
|
||||||
"version": "release-1.11.0-tag",
|
"version": "release-1.11.1-tag",
|
||||||
"pinned": false,
|
"pinned": false,
|
||||||
"date": "2026-01-25T02:09:52Z"
|
"date": "2026-02-13T04:49:16Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"slug": "the-lounge",
|
"slug": "the-lounge",
|
||||||
@@ -1397,9 +1467,9 @@
|
|||||||
{
|
{
|
||||||
"slug": "tianji",
|
"slug": "tianji",
|
||||||
"repo": "msgbyte/tianji",
|
"repo": "msgbyte/tianji",
|
||||||
"version": "v1.31.10",
|
"version": "v1.31.12",
|
||||||
"pinned": false,
|
"pinned": false,
|
||||||
"date": "2026-02-04T17:21:04Z"
|
"date": "2026-02-12T19:06:14Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"slug": "traccar",
|
"slug": "traccar",
|
||||||
@@ -1411,9 +1481,9 @@
|
|||||||
{
|
{
|
||||||
"slug": "tracearr",
|
"slug": "tracearr",
|
||||||
"repo": "connorgallopo/Tracearr",
|
"repo": "connorgallopo/Tracearr",
|
||||||
"version": "v1.4.12",
|
"version": "v1.4.17",
|
||||||
"pinned": false,
|
"pinned": false,
|
||||||
"date": "2026-01-28T23:29:37Z"
|
"date": "2026-02-11T01:33:21Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"slug": "tracktor",
|
"slug": "tracktor",
|
||||||
@@ -1425,9 +1495,9 @@
|
|||||||
{
|
{
|
||||||
"slug": "traefik",
|
"slug": "traefik",
|
||||||
"repo": "traefik/traefik",
|
"repo": "traefik/traefik",
|
||||||
"version": "v3.6.7",
|
"version": "v3.6.8",
|
||||||
"pinned": false,
|
"pinned": false,
|
||||||
"date": "2026-01-14T14:11:45Z"
|
"date": "2026-02-11T16:44:37Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"slug": "trilium",
|
"slug": "trilium",
|
||||||
@@ -1439,9 +1509,9 @@
|
|||||||
{
|
{
|
||||||
"slug": "trip",
|
"slug": "trip",
|
||||||
"repo": "itskovacs/TRIP",
|
"repo": "itskovacs/TRIP",
|
||||||
"version": "1.39.0",
|
"version": "1.40.0",
|
||||||
"pinned": false,
|
"pinned": false,
|
||||||
"date": "2026-02-07T16:59:51Z"
|
"date": "2026-02-10T20:12:53Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"slug": "tududi",
|
"slug": "tududi",
|
||||||
@@ -1488,9 +1558,9 @@
|
|||||||
{
|
{
|
||||||
"slug": "upsnap",
|
"slug": "upsnap",
|
||||||
"repo": "seriousm4x/UpSnap",
|
"repo": "seriousm4x/UpSnap",
|
||||||
"version": "5.2.7",
|
"version": "5.2.8",
|
||||||
"pinned": false,
|
"pinned": false,
|
||||||
"date": "2026-01-07T23:48:00Z"
|
"date": "2026-02-13T00:02:37Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"slug": "uptimekuma",
|
"slug": "uptimekuma",
|
||||||
@@ -1502,9 +1572,9 @@
|
|||||||
{
|
{
|
||||||
"slug": "vaultwarden",
|
"slug": "vaultwarden",
|
||||||
"repo": "dani-garcia/vaultwarden",
|
"repo": "dani-garcia/vaultwarden",
|
||||||
"version": "1.35.2",
|
"version": "1.35.3",
|
||||||
"pinned": false,
|
"pinned": false,
|
||||||
"date": "2026-01-09T18:37:04Z"
|
"date": "2026-02-10T20:37:03Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"slug": "victoriametrics",
|
"slug": "victoriametrics",
|
||||||
@@ -1530,9 +1600,9 @@
|
|||||||
{
|
{
|
||||||
"slug": "wallos",
|
"slug": "wallos",
|
||||||
"repo": "ellite/Wallos",
|
"repo": "ellite/Wallos",
|
||||||
"version": "v4.6.0",
|
"version": "v4.6.1",
|
||||||
"pinned": false,
|
"pinned": false,
|
||||||
"date": "2025-12-20T15:57:51Z"
|
"date": "2026-02-10T21:06:46Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"slug": "wanderer",
|
"slug": "wanderer",
|
||||||
@@ -1565,9 +1635,9 @@
|
|||||||
{
|
{
|
||||||
"slug": "wavelog",
|
"slug": "wavelog",
|
||||||
"repo": "wavelog/wavelog",
|
"repo": "wavelog/wavelog",
|
||||||
"version": "2.2.2",
|
"version": "2.3",
|
||||||
"pinned": false,
|
"pinned": false,
|
||||||
"date": "2025-12-31T16:53:34Z"
|
"date": "2026-02-11T15:46:40Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"slug": "wealthfolio",
|
"slug": "wealthfolio",
|
||||||
@@ -1593,9 +1663,9 @@
|
|||||||
{
|
{
|
||||||
"slug": "wikijs",
|
"slug": "wikijs",
|
||||||
"repo": "requarks/wiki",
|
"repo": "requarks/wiki",
|
||||||
"version": "v2.5.311",
|
"version": "v2.5.312",
|
||||||
"pinned": false,
|
"pinned": false,
|
||||||
"date": "2026-01-08T09:50:00Z"
|
"date": "2026-02-12T02:45:22Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"slug": "wishlist",
|
"slug": "wishlist",
|
||||||
@@ -1642,9 +1712,9 @@
|
|||||||
{
|
{
|
||||||
"slug": "zipline",
|
"slug": "zipline",
|
||||||
"repo": "diced/zipline",
|
"repo": "diced/zipline",
|
||||||
"version": "v4.4.1",
|
"version": "v4.4.2",
|
||||||
"pinned": false,
|
"pinned": false,
|
||||||
"date": "2026-01-20T01:29:01Z"
|
"date": "2026-02-11T04:58:54Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"slug": "zitadel",
|
"slug": "zitadel",
|
||||||
|
|||||||
@@ -21,7 +21,7 @@
|
|||||||
"resources": {
|
"resources": {
|
||||||
"cpu": 1,
|
"cpu": 1,
|
||||||
"ram": 512,
|
"ram": 512,
|
||||||
"hdd": 2,
|
"hdd": 4,
|
||||||
"os": "debian",
|
"os": "debian",
|
||||||
"version": "13"
|
"version": "13"
|
||||||
}
|
}
|
||||||
@@ -32,7 +32,7 @@
|
|||||||
"resources": {
|
"resources": {
|
||||||
"cpu": 1,
|
"cpu": 1,
|
||||||
"ram": 256,
|
"ram": 256,
|
||||||
"hdd": 1,
|
"hdd": 2,
|
||||||
"os": "alpine",
|
"os": "alpine",
|
||||||
"version": "3.23"
|
"version": "3.23"
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -33,7 +33,7 @@
|
|||||||
},
|
},
|
||||||
"notes": [
|
"notes": [
|
||||||
{
|
{
|
||||||
"text": "Kutt needs so be served with an SSL certificate for its login to work. During install, you will be prompted to choose if you want to have Caddy installed for SSL termination or if you want to use your own reverse proxy (in that case point your reverse porxy to port 3000).",
|
"text": "Kutt needs so be served with an SSL certificate for its login to work. During install, you will be prompted to choose if you want to have Caddy installed for SSL termination or if you want to use your own reverse proxy (in that case point your reverse proxy to port 3000).",
|
||||||
"type": "info"
|
"type": "info"
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
|
|||||||
@@ -12,7 +12,7 @@
|
|||||||
"documentation": "https://radicale.org/master.html#documentation-1",
|
"documentation": "https://radicale.org/master.html#documentation-1",
|
||||||
"website": "https://radicale.org/",
|
"website": "https://radicale.org/",
|
||||||
"logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/radicale.webp",
|
"logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/radicale.webp",
|
||||||
"config_path": "/etc/radicale/config or ~/.config/radicale/config",
|
"config_path": "/etc/radicale/config",
|
||||||
"description": "Radicale is a small but powerful CalDAV (calendars, to-do lists) and CardDAV (contacts)",
|
"description": "Radicale is a small but powerful CalDAV (calendars, to-do lists) and CardDAV (contacts)",
|
||||||
"install_methods": [
|
"install_methods": [
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
{
|
{
|
||||||
"name": "slskd",
|
"name": "Slskd",
|
||||||
"slug": "slskd",
|
"slug": "slskd",
|
||||||
"categories": [
|
"categories": [
|
||||||
11
|
11
|
||||||
@@ -35,10 +35,6 @@
|
|||||||
{
|
{
|
||||||
"text": "See /opt/slskd/config/slskd.yml to add your Soulseek credentials",
|
"text": "See /opt/slskd/config/slskd.yml to add your Soulseek credentials",
|
||||||
"type": "info"
|
"type": "info"
|
||||||
},
|
|
||||||
{
|
|
||||||
"text": "This LXC includes Soularr; it needs to be configured (/opt/soularr/config.ini) before it will work",
|
|
||||||
"type": "info"
|
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -10,7 +10,7 @@
|
|||||||
"privileged": false,
|
"privileged": false,
|
||||||
"interface_port": 3000,
|
"interface_port": 3000,
|
||||||
"documentation": "https://github.com/TuroYT/snowshare",
|
"documentation": "https://github.com/TuroYT/snowshare",
|
||||||
"config_path": "/opt/snowshare/.env",
|
"config_path": "/opt/snowshare.env",
|
||||||
"website": "https://github.com/TuroYT/snowshare",
|
"website": "https://github.com/TuroYT/snowshare",
|
||||||
"logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/png/snowshare.png",
|
"logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/png/snowshare.png",
|
||||||
"description": "A modern, secure file and link sharing platform built with Next.js, Prisma, and NextAuth. Share URLs, code snippets, and files with customizable expiration, privacy, and QR codes.",
|
"description": "A modern, secure file and link sharing platform built with Next.js, Prisma, and NextAuth. Share URLs, code snippets, and files with customizable expiration, privacy, and QR codes.",
|
||||||
|
|||||||
@@ -32,6 +32,10 @@
|
|||||||
"password": null
|
"password": null
|
||||||
},
|
},
|
||||||
"notes": [
|
"notes": [
|
||||||
|
{
|
||||||
|
"text": "SQL Server (2025) SQLPAL is incompatible with Proxmox VE 9 (Kernel 6.12+) in LXC containers. Use a VM instead or the SQL-Server 2022 LXC.",
|
||||||
|
"type": "warning"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"text": "If you choose not to run the installation setup, execute: `/opt/mssql/bin/mssql-conf setup` in LXC shell.",
|
"text": "If you choose not to run the installation setup, execute: `/opt/mssql/bin/mssql-conf setup` in LXC shell.",
|
||||||
"type": "info"
|
"type": "info"
|
||||||
|
|||||||
@@ -16,7 +16,8 @@ update_os
|
|||||||
msg_info "Installing Dependencies"
|
msg_info "Installing Dependencies"
|
||||||
$STD apt install -y \
|
$STD apt install -y \
|
||||||
python3-pip \
|
python3-pip \
|
||||||
python3-libtorrent
|
python3-libtorrent \
|
||||||
|
python3-setuptools
|
||||||
msg_ok "Installed Dependencies"
|
msg_ok "Installed Dependencies"
|
||||||
|
|
||||||
msg_info "Installing Deluge"
|
msg_info "Installing Deluge"
|
||||||
|
|||||||
@@ -37,7 +37,7 @@ fetch_and_deploy_gh_release "dispatcharr" "Dispatcharr/Dispatcharr" "tarball"
|
|||||||
msg_info "Installing Python Dependencies with uv"
|
msg_info "Installing Python Dependencies with uv"
|
||||||
cd /opt/dispatcharr
|
cd /opt/dispatcharr
|
||||||
$STD uv venv --clear
|
$STD uv venv --clear
|
||||||
$STD uv pip install -r requirements.txt --index-strategy unsafe-best-match
|
$STD uv sync
|
||||||
$STD uv pip install gunicorn gevent celery redis daphne
|
$STD uv pip install gunicorn gevent celery redis daphne
|
||||||
msg_ok "Installed Python Dependencies"
|
msg_ok "Installed Python Dependencies"
|
||||||
|
|
||||||
|
|||||||
25
install/drawio-install.sh
Normal file
25
install/drawio-install.sh
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
# Copyright (c) 2021-2026 community-scripts ORG
|
||||||
|
# Author: Slaviša Arežina (tremor021)
|
||||||
|
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
|
||||||
|
# Source: https://www.drawio.com/
|
||||||
|
|
||||||
|
source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
|
||||||
|
color
|
||||||
|
verb_ip6
|
||||||
|
catch_errors
|
||||||
|
setting_up_container
|
||||||
|
network_check
|
||||||
|
update_os
|
||||||
|
setup_hwaccel
|
||||||
|
|
||||||
|
msg_info "Installing Dependencies"
|
||||||
|
$STD apt install -y tomcat11
|
||||||
|
msg_ok "Installed Dependencies"
|
||||||
|
|
||||||
|
USE_ORIGINAL_FILENAME=true fetch_and_deploy_gh_release "drawio" "jgraph/drawio" "singlefile" "latest" "/var/lib/tomcat11/webapps" "draw.war"
|
||||||
|
|
||||||
|
motd_ssh
|
||||||
|
customize
|
||||||
|
cleanup_lxc
|
||||||
@@ -31,8 +31,10 @@ setup_deb822_repo "matrix-org" \
|
|||||||
"main"
|
"main"
|
||||||
echo "matrix-synapse-py3 matrix-synapse/server-name string $servername" | debconf-set-selections
|
echo "matrix-synapse-py3 matrix-synapse/server-name string $servername" | debconf-set-selections
|
||||||
echo "matrix-synapse-py3 matrix-synapse/report-stats boolean false" | debconf-set-selections
|
echo "matrix-synapse-py3 matrix-synapse/report-stats boolean false" | debconf-set-selections
|
||||||
|
echo "exit 101" >/usr/sbin/policy-rc.d
|
||||||
|
chmod +x /usr/sbin/policy-rc.d
|
||||||
$STD apt install matrix-synapse-py3 -y
|
$STD apt install matrix-synapse-py3 -y
|
||||||
systemctl stop matrix-synapse
|
rm -f /usr/sbin/policy-rc.d
|
||||||
sed -i 's/127.0.0.1/0.0.0.0/g' /etc/matrix-synapse/homeserver.yaml
|
sed -i 's/127.0.0.1/0.0.0.0/g' /etc/matrix-synapse/homeserver.yaml
|
||||||
sed -i 's/'\''::1'\'', //g' /etc/matrix-synapse/homeserver.yaml
|
sed -i 's/'\''::1'\'', //g' /etc/matrix-synapse/homeserver.yaml
|
||||||
SECRET=$(openssl rand -hex 32)
|
SECRET=$(openssl rand -hex 32)
|
||||||
|
|||||||
@@ -38,6 +38,18 @@ rm -f "$DEB_FILE"
|
|||||||
echo "$LATEST_VERSION" >~/.emqx
|
echo "$LATEST_VERSION" >~/.emqx
|
||||||
msg_ok "Installed EMQX"
|
msg_ok "Installed EMQX"
|
||||||
|
|
||||||
|
read -r -p "${TAB3}Would you like to disable the EMQX MQ feature? (reduces disk/CPU usage) <y/N> " prompt
|
||||||
|
if [[ ${prompt,,} =~ ^(y|yes)$ ]]; then
|
||||||
|
msg_info "Disabling EMQX MQ feature"
|
||||||
|
mkdir -p /etc/emqx
|
||||||
|
if ! grep -q "^mq.enable" /etc/emqx/emqx.conf 2>/dev/null; then
|
||||||
|
echo "mq.enable = false" >>/etc/emqx/emqx.conf
|
||||||
|
else
|
||||||
|
sed -i 's/^mq.enable.*/mq.enable = false/' /etc/emqx/emqx.conf
|
||||||
|
fi
|
||||||
|
msg_ok "Disabled EMQX MQ feature"
|
||||||
|
fi
|
||||||
|
|
||||||
msg_info "Starting EMQX service"
|
msg_info "Starting EMQX service"
|
||||||
$STD systemctl enable -q --now emqx
|
$STD systemctl enable -q --now emqx
|
||||||
msg_ok "Enabled EMQX service"
|
msg_ok "Enabled EMQX service"
|
||||||
|
|||||||
@@ -289,7 +289,7 @@ ML_DIR="${APP_DIR}/machine-learning"
|
|||||||
GEO_DIR="${INSTALL_DIR}/geodata"
|
GEO_DIR="${INSTALL_DIR}/geodata"
|
||||||
mkdir -p {"${APP_DIR}","${UPLOAD_DIR}","${GEO_DIR}","${INSTALL_DIR}"/cache}
|
mkdir -p {"${APP_DIR}","${UPLOAD_DIR}","${GEO_DIR}","${INSTALL_DIR}"/cache}
|
||||||
|
|
||||||
fetch_and_deploy_gh_release "Immich" "immich-app/immich" "tarball" "v2.5.5" "$SRC_DIR"
|
fetch_and_deploy_gh_release "Immich" "immich-app/immich" "tarball" "v2.5.6" "$SRC_DIR"
|
||||||
PNPM_VERSION="$(jq -r '.packageManager | split("@")[1]' ${SRC_DIR}/package.json)"
|
PNPM_VERSION="$(jq -r '.packageManager | split("@")[1]' ${SRC_DIR}/package.json)"
|
||||||
NODE_VERSION="24" NODE_MODULE="pnpm@${PNPM_VERSION}" setup_nodejs
|
NODE_VERSION="24" NODE_MODULE="pnpm@${PNPM_VERSION}" setup_nodejs
|
||||||
|
|
||||||
|
|||||||
@@ -20,10 +20,19 @@ msg_ok "Installed Docker"
|
|||||||
msg_info "Detecting latest Kasm Workspaces release"
|
msg_info "Detecting latest Kasm Workspaces release"
|
||||||
KASM_URL=$(curl -fsSL "https://www.kasm.com/downloads" | tr '\n' ' ' | grep -oE 'https://kasm-static-content[^"]*kasm_release_[0-9]+\.[0-9]+\.[0-9]+\.[a-z0-9]+\.tar\.gz' | head -n 1)
|
KASM_URL=$(curl -fsSL "https://www.kasm.com/downloads" | tr '\n' ' ' | grep -oE 'https://kasm-static-content[^"]*kasm_release_[0-9]+\.[0-9]+\.[0-9]+\.[a-z0-9]+\.tar\.gz' | head -n 1)
|
||||||
if [[ -z "$KASM_URL" ]]; then
|
if [[ -z "$KASM_URL" ]]; then
|
||||||
|
SERVICE_IMAGE_URL=$(curl -fsSL "https://www.kasm.com/downloads" | tr '\n' ' ' | grep -oE 'https://kasm-static-content[^"]*kasm_release_service_images_amd64_[0-9]+\.[0-9]+\.[0-9]+\.tar\.gz' | head -n 1)
|
||||||
|
if [[ -n "$SERVICE_IMAGE_URL" ]]; then
|
||||||
|
KASM_VERSION=$(echo "$SERVICE_IMAGE_URL" | sed -E 's/.*kasm_release_service_images_amd64_([0-9]+\.[0-9]+\.[0-9]+).*/\1/')
|
||||||
|
KASM_URL="https://kasm-static-content.s3.amazonaws.com/kasm_release_${KASM_VERSION}.tar.gz"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
KASM_VERSION=$(echo "$KASM_URL" | sed -E 's/.*kasm_release_([0-9]+\.[0-9]+\.[0-9]+).*/\1/')
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ -z "$KASM_URL" ]] || [[ -z "$KASM_VERSION" ]]; then
|
||||||
msg_error "Unable to detect latest Kasm release URL."
|
msg_error "Unable to detect latest Kasm release URL."
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
KASM_VERSION=$(echo "$KASM_URL" | sed -E 's/.*kasm_release_([0-9]+\.[0-9]+\.[0-9]+).*/\1/')
|
|
||||||
msg_ok "Detected Kasm Workspaces version $KASM_VERSION"
|
msg_ok "Detected Kasm Workspaces version $KASM_VERSION"
|
||||||
|
|
||||||
msg_warn "WARNING: This script will run an external installer from a third-party source (https://www.kasmweb.com/)."
|
msg_warn "WARNING: This script will run an external installer from a third-party source (https://www.kasmweb.com/)."
|
||||||
|
|||||||
@@ -37,18 +37,13 @@ PYTHON_VERSION="3.12" setup_uv
|
|||||||
fetch_and_deploy_gh_release "libretranslate" "LibreTranslate/LibreTranslate" "tarball"
|
fetch_and_deploy_gh_release "libretranslate" "LibreTranslate/LibreTranslate" "tarball"
|
||||||
|
|
||||||
msg_info "Setup LibreTranslate (Patience)"
|
msg_info "Setup LibreTranslate (Patience)"
|
||||||
TORCH_VERSION=$(grep -Eo '"torch ==[0-9]+\.[0-9]+\.[0-9]+' /opt/libretranslate/pyproject.toml |
|
|
||||||
tail -n1 | sed 's/.*==//')
|
|
||||||
if [[ -z "$TORCH_VERSION" ]]; then
|
|
||||||
TORCH_VERSION="2.5.0"
|
|
||||||
fi
|
|
||||||
cd /opt/libretranslate
|
cd /opt/libretranslate
|
||||||
$STD uv venv --clear .venv --python 3.12
|
$STD uv venv --clear .venv --python 3.12
|
||||||
$STD source .venv/bin/activate
|
$STD source .venv/bin/activate
|
||||||
$STD uv pip install --upgrade pip setuptools
|
$STD uv pip install --upgrade pip
|
||||||
|
$STD uv pip install "setuptools<81"
|
||||||
$STD uv pip install Babel==2.12.1
|
$STD uv pip install Babel==2.12.1
|
||||||
$STD .venv/bin/python scripts/compile_locales.py
|
$STD .venv/bin/python scripts/compile_locales.py
|
||||||
$STD uv pip install "torch==${TORCH_VERSION}" --extra-index-url https://download.pytorch.org/whl/cpu
|
|
||||||
$STD uv pip install "numpy<2"
|
$STD uv pip install "numpy<2"
|
||||||
$STD uv pip install .
|
$STD uv pip install .
|
||||||
$STD uv pip install libretranslate
|
$STD uv pip install libretranslate
|
||||||
|
|||||||
@@ -38,6 +38,10 @@ for server in "${servers[@]}"; do
|
|||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
|
|
||||||
|
msg_info "Installing dependencies"
|
||||||
|
$STD apt install -y inotify-tools
|
||||||
|
msg_ok "Installed dependencies"
|
||||||
|
|
||||||
msg_info "Installing Collabora Online"
|
msg_info "Installing Collabora Online"
|
||||||
curl -fsSL https://collaboraoffice.com/downloads/gpg/collaboraonline-release-keyring.gpg -o /etc/apt/keyrings/collaboraonline-release-keyring.gpg
|
curl -fsSL https://collaboraoffice.com/downloads/gpg/collaboraonline-release-keyring.gpg -o /etc/apt/keyrings/collaboraonline-release-keyring.gpg
|
||||||
cat <<EOF >/etc/apt/sources.list.d/colloboraonline.sources
|
cat <<EOF >/etc/apt/sources.list.d/colloboraonline.sources
|
||||||
@@ -148,8 +152,15 @@ COLLABORATION_JWT_SECRET=
|
|||||||
# FRONTEND_FULL_TEXT_SEARCH_ENABLED=true
|
# FRONTEND_FULL_TEXT_SEARCH_ENABLED=true
|
||||||
# SEARCH_EXTRACTOR_TIKA_TIKA_URL=<your-tika-url>
|
# SEARCH_EXTRACTOR_TIKA_TIKA_URL=<your-tika-url>
|
||||||
|
|
||||||
## External storage test - Only NFS v4.2+ is supported
|
## Uncomment below to enable PosixFS Collaborative Mode
|
||||||
## User files
|
## Increase inotify watch/instance limits on your PVE host:
|
||||||
|
### sysctl -w fs.inotify.max_user_watches=1048576
|
||||||
|
### sysctl -w fs.inotify.max_user_instances=1024
|
||||||
|
# STORAGE_USERS_POSIX_ENABLE_COLLABORATION=true
|
||||||
|
# STORAGE_USERS_POSIX_WATCH_TYPE=inotifywait
|
||||||
|
# STORAGE_USERS_POSIX_WATCH_FS=true
|
||||||
|
# STORAGE_USERS_POSIX_WATCH_PATH=<path-to-storage-or-bind-mount>
|
||||||
|
## User files location - experimental - use at your own risk! - ZFS, NFS v4.2+ supported - CIFS/SMB not supported
|
||||||
# STORAGE_USERS_POSIX_ROOT=<path-to-your-bind_mount>
|
# STORAGE_USERS_POSIX_ROOT=<path-to-your-bind_mount>
|
||||||
EOF
|
EOF
|
||||||
|
|
||||||
|
|||||||
@@ -24,7 +24,7 @@ setup_hwaccel
|
|||||||
PYTHON_VERSION="3.12" setup_uv
|
PYTHON_VERSION="3.12" setup_uv
|
||||||
|
|
||||||
msg_info "Installing Open WebUI"
|
msg_info "Installing Open WebUI"
|
||||||
$STD uv tool install --python 3.12 open-webui[all]
|
$STD uv tool install --python 3.12 --constraint <(echo "numba>=0.60") open-webui[all]
|
||||||
msg_ok "Installed Open WebUI"
|
msg_ok "Installed Open WebUI"
|
||||||
|
|
||||||
read -r -p "${TAB3}Would you like to add Ollama? <y/N> " prompt
|
read -r -p "${TAB3}Would you like to add Ollama? <y/N> " prompt
|
||||||
|
|||||||
@@ -178,7 +178,7 @@ http:
|
|||||||
servers:
|
servers:
|
||||||
- url: "http://$LOCAL_IP:3000"
|
- url: "http://$LOCAL_IP:3000"
|
||||||
EOF
|
EOF
|
||||||
$STD npm run db:sqlite:push
|
$STD npm run db:push
|
||||||
|
|
||||||
. /etc/os-release
|
. /etc/os-release
|
||||||
if [ "$VERSION_CODENAME" = "trixie" ]; then
|
if [ "$VERSION_CODENAME" = "trixie" ]; then
|
||||||
|
|||||||
@@ -14,42 +14,51 @@ network_check
|
|||||||
update_os
|
update_os
|
||||||
|
|
||||||
msg_info "Installing Dependencies"
|
msg_info "Installing Dependencies"
|
||||||
$STD apt install -y \
|
$STD apt install -y apache2-utils
|
||||||
apache2-utils \
|
|
||||||
python3-pip \
|
|
||||||
python3-venv
|
|
||||||
msg_ok "Installed Dependencies"
|
msg_ok "Installed Dependencies"
|
||||||
|
|
||||||
|
PYTHON_VERSION="3.13" setup_uv
|
||||||
|
fetch_and_deploy_gh_release "Radicale" "Kozea/Radicale" "tarball" "latest" "/opt/radicale"
|
||||||
|
|
||||||
msg_info "Setting up Radicale"
|
msg_info "Setting up Radicale"
|
||||||
python3 -m venv /opt/radicale
|
cd /opt/radicale
|
||||||
source /opt/radicale/bin/activate
|
|
||||||
$STD python3 -m pip install --upgrade https://github.com/Kozea/Radicale/archive/master.tar.gz
|
|
||||||
RNDPASS=$(openssl rand -base64 18 | tr -dc 'a-zA-Z0-9' | head -c13)
|
RNDPASS=$(openssl rand -base64 18 | tr -dc 'a-zA-Z0-9' | head -c13)
|
||||||
$STD htpasswd -c -b -5 /opt/radicale/users admin $RNDPASS
|
$STD htpasswd -c -b -5 /opt/radicale/users admin "$RNDPASS"
|
||||||
{
|
{
|
||||||
echo "Radicale Credentials"
|
echo "Radicale Credentials"
|
||||||
echo "Admin User: admin"
|
echo "Admin User: admin"
|
||||||
echo "Admin Password: $RNDPASS"
|
echo "Admin Password: $RNDPASS"
|
||||||
} >>~/radicale.creds
|
} >>~/radicale.creds
|
||||||
msg_ok "Done setting up Radicale"
|
|
||||||
|
|
||||||
msg_info "Setup Service"
|
mkdir -p /etc/radicale
|
||||||
|
cat <<EOF >/etc/radicale/config
|
||||||
|
[server]
|
||||||
|
hosts = 0.0.0.0:5232
|
||||||
|
|
||||||
cat <<EOF >/opt/radicale/start.sh
|
[auth]
|
||||||
#!/usr/bin/env bash
|
type = htpasswd
|
||||||
source /opt/radicale/bin/activate
|
htpasswd_filename = /opt/radicale/users
|
||||||
python3 -m radicale --storage-filesystem-folder=/var/lib/radicale/collections --hosts 0.0.0.0:5232 --auth-type htpasswd --auth-htpasswd-filename /opt/radicale/users --auth-htpasswd-encryption sha512
|
htpasswd_encryption = sha512
|
||||||
|
|
||||||
|
[storage]
|
||||||
|
type = multifilesystem
|
||||||
|
filesystem_folder = /var/lib/radicale/collections
|
||||||
|
|
||||||
|
[web]
|
||||||
|
type = internal
|
||||||
EOF
|
EOF
|
||||||
|
msg_ok "Set up Radicale"
|
||||||
|
|
||||||
chmod +x /opt/radicale/start.sh
|
msg_info "Creating Service"
|
||||||
|
|
||||||
cat <<EOF >/etc/systemd/system/radicale.service
|
cat <<EOF >/etc/systemd/system/radicale.service
|
||||||
|
[Unit]
|
||||||
Description=A simple CalDAV (calendar) and CardDAV (contact) server
|
Description=A simple CalDAV (calendar) and CardDAV (contact) server
|
||||||
After=network.target
|
After=network.target
|
||||||
Requires=network.target
|
Requires=network.target
|
||||||
|
|
||||||
[Service]
|
[Service]
|
||||||
ExecStart=/opt/radicale/start.sh
|
WorkingDirectory=/opt/radicale
|
||||||
|
ExecStart=/usr/local/bin/uv run -m radicale --config /etc/radicale/config
|
||||||
Restart=on-failure
|
Restart=on-failure
|
||||||
# User=radicale
|
# User=radicale
|
||||||
# Deny other users access to the calendar data
|
# Deny other users access to the calendar data
|
||||||
|
|||||||
@@ -3,7 +3,7 @@
|
|||||||
# Copyright (c) 2021-2026 community-scripts ORG
|
# Copyright (c) 2021-2026 community-scripts ORG
|
||||||
# Author: vhsdream
|
# Author: vhsdream
|
||||||
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
|
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
|
||||||
# Source: https://github.com/slskd/slskd/, https://soularr.net
|
# Source: https://github.com/slskd/slskd/, https://github.com/mrusse/soularr
|
||||||
|
|
||||||
source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
|
source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
|
||||||
color
|
color
|
||||||
@@ -13,71 +13,71 @@ setting_up_container
|
|||||||
network_check
|
network_check
|
||||||
update_os
|
update_os
|
||||||
|
|
||||||
msg_info "Installing Dependencies"
|
fetch_and_deploy_gh_release "Slskd" "slskd/slskd" "prebuild" "latest" "/opt/slskd" "slskd-*-linux-x64.zip"
|
||||||
$STD apt install -y \
|
|
||||||
python3-pip
|
|
||||||
msg_ok "Installed Dependencies"
|
|
||||||
|
|
||||||
msg_info "Setup ${APPLICATION}"
|
msg_info "Configuring Slskd"
|
||||||
tmp_file=$(mktemp)
|
|
||||||
RELEASE=$(curl -s https://api.github.com/repos/slskd/slskd/releases/latest | grep "tag_name" | awk '{print substr($2, 2, length($2)-3) }')
|
|
||||||
curl -fsSL "https://github.com/slskd/slskd/releases/download/${RELEASE}/slskd-${RELEASE}-linux-x64.zip" -o $tmp_file
|
|
||||||
$STD unzip $tmp_file -d /opt/${APPLICATION}
|
|
||||||
echo "${RELEASE}" >/opt/${APPLICATION}_version.txt
|
|
||||||
JWT_KEY=$(openssl rand -base64 44)
|
JWT_KEY=$(openssl rand -base64 44)
|
||||||
SLSKD_API_KEY=$(openssl rand -base64 44)
|
SLSKD_API_KEY=$(openssl rand -base64 44)
|
||||||
cp /opt/${APPLICATION}/config/slskd.example.yml /opt/${APPLICATION}/config/slskd.yml
|
cp /opt/slskd/config/slskd.example.yml /opt/slskd/config/slskd.yml
|
||||||
sed -i \
|
sed -i \
|
||||||
-e "\|web:|,\|cidr|s|^#||" \
|
-e '/web:/,/cidr/s/^# //' \
|
||||||
-e "\|https:|,\|5031|s|false|true|" \
|
-e '/https:/,/port: 5031/s/false/true/' \
|
||||||
|
-e '/port: 5030/,/socket/s/,.*$//' \
|
||||||
|
-e '/content_path:/,/authentication/s/false/true/' \
|
||||||
-e "\|api_keys|,\|cidr|s|<some.*$|$SLSKD_API_KEY|; \
|
-e "\|api_keys|,\|cidr|s|<some.*$|$SLSKD_API_KEY|; \
|
||||||
s|role: readonly|role: readwrite|; \
|
s|role: readonly|role: readwrite|; \
|
||||||
s|0.0.0.0/0,::/0|& # Replace this with your subnet|" \
|
s|0.0.0.0/0,::/0|& # Replace this with your subnet|" \
|
||||||
-e "\|soulseek|,\|write_queue|s|^#||" \
|
|
||||||
-e "\|jwt:|,\|ttl|s|key: ~|key: $JWT_KEY|" \
|
-e "\|jwt:|,\|ttl|s|key: ~|key: $JWT_KEY|" \
|
||||||
-e "s|^ picture|# picture|" \
|
-e '/soulseek/,/write_queue/s/^# //' \
|
||||||
/opt/${APPLICATION}/config/slskd.yml
|
-e 's/^.*picture/#&/' /opt/slskd/config/slskd.yml
|
||||||
msg_ok "Setup ${APPLICATION}"
|
msg_ok "Configured Slskd"
|
||||||
|
|
||||||
msg_info "Installing Soularr"
|
read -rp "${TAB3}Do you want to install Soularr? y/N " soularr
|
||||||
rm -rf /usr/lib/python3.*/EXTERNALLY-MANAGED
|
if [[ ${soularr,,} =~ ^(y|yes)$ ]]; then
|
||||||
cd /tmp
|
PYTHON_VERSION="3.11" setup_uv
|
||||||
curl -fsSL -o main.zip https://github.com/mrusse/soularr/archive/refs/heads/main.zip
|
fetch_and_deploy_gh_release "Soularr" "mrusse/soularr" "tarball" "latest" "/opt/soularr"
|
||||||
$STD unzip main.zip
|
cd /opt/soularr
|
||||||
mv soularr-main /opt/soularr
|
$STD uv venv venv
|
||||||
cd /opt/soularr
|
$STD source venv/bin/activate
|
||||||
$STD pip install -r requirements.txt
|
$STD uv pip install -r requirements.txt
|
||||||
sed -i \
|
sed -i \
|
||||||
-e "\|[Slskd]|,\|host_url|s|yourslskdapikeygoeshere|$SLSKD_API_KEY|" \
|
-e "\|[Slskd]|,\|host_url|s|yourslskdapikeygoeshere|$SLSKD_API_KEY|" \
|
||||||
-e "/host_url/s/slskd/localhost/" \
|
-e "/host_url/s/slskd/localhost/" \
|
||||||
/opt/soularr/config.ini
|
/opt/soularr/config.ini
|
||||||
sed -i \
|
cat <<EOF >/opt/soularr/run.sh
|
||||||
-e "/#This\|#Default\|INTERVAL/{N;d;}" \
|
#!/usr/bin/env bash
|
||||||
-e "/while\|#Pass/d" \
|
|
||||||
-e "\|python|s|app|opt/soularr|; s|python|python3|" \
|
|
||||||
-e "/dt/,+2d" \
|
|
||||||
/opt/soularr/run.sh
|
|
||||||
sed -i -E "/(soularr.py)/s/.{5}$//; /if/,/fi/s/.{4}//" /opt/soularr/run.sh
|
|
||||||
chmod +x /opt/soularr/run.sh
|
|
||||||
msg_ok "Installed Soularr"
|
|
||||||
|
|
||||||
msg_info "Creating Services"
|
if ps aux | grep "[s]oularr.py" >/dev/null; then
|
||||||
cat <<EOF >/etc/systemd/system/${APPLICATION}.service
|
echo "Soularr is already running. Exiting..."
|
||||||
|
exit 1
|
||||||
|
else
|
||||||
|
source /opt/soularr/venv/bin/activate
|
||||||
|
uv run python3 -u /opt/soularr/soularr.py --config-dir /opt/soularr
|
||||||
|
fi
|
||||||
|
EOF
|
||||||
|
chmod +x /opt/soularr/run.sh
|
||||||
|
deactivate
|
||||||
|
msg_ok "Installed Soularr"
|
||||||
|
fi
|
||||||
|
|
||||||
|
msg_info "Creating Service"
|
||||||
|
cat <<EOF >/etc/systemd/system/slskd.service
|
||||||
[Unit]
|
[Unit]
|
||||||
Description=${APPLICATION} Service
|
Description=Slskd Service
|
||||||
After=network.target
|
After=network.target
|
||||||
Wants=network.target
|
Wants=network.target
|
||||||
|
|
||||||
[Service]
|
[Service]
|
||||||
WorkingDirectory=/opt/${APPLICATION}
|
WorkingDirectory=/opt/slskd
|
||||||
ExecStart=/opt/${APPLICATION}/slskd --config /opt/${APPLICATION}/config/slskd.yml
|
ExecStart=/opt/slskd/slskd --config /opt/slskd/config/slskd.yml
|
||||||
Restart=always
|
Restart=always
|
||||||
|
|
||||||
[Install]
|
[Install]
|
||||||
WantedBy=multi-user.target
|
WantedBy=multi-user.target
|
||||||
EOF
|
EOF
|
||||||
|
|
||||||
cat <<EOF >/etc/systemd/system/soularr.timer
|
if [[ -d /opt/soularr ]]; then
|
||||||
|
cat <<EOF >/etc/systemd/system/soularr.timer
|
||||||
[Unit]
|
[Unit]
|
||||||
Description=Soularr service timer
|
Description=Soularr service timer
|
||||||
RefuseManualStart=no
|
RefuseManualStart=no
|
||||||
@@ -85,15 +85,15 @@ RefuseManualStop=no
|
|||||||
|
|
||||||
[Timer]
|
[Timer]
|
||||||
Persistent=true
|
Persistent=true
|
||||||
# run every 5 minutes
|
# run every 10 minutes
|
||||||
OnCalendar=*-*-* *:0/5:00
|
OnCalendar=*-*-* *:0/10:00
|
||||||
Unit=soularr.service
|
Unit=soularr.service
|
||||||
|
|
||||||
[Install]
|
[Install]
|
||||||
WantedBy=timers.target
|
WantedBy=timers.target
|
||||||
EOF
|
EOF
|
||||||
|
|
||||||
cat <<EOF >/etc/systemd/system/soularr.service
|
cat <<EOF >/etc/systemd/system/soularr.service
|
||||||
[Unit]
|
[Unit]
|
||||||
Description=Soularr service
|
Description=Soularr service
|
||||||
After=network.target slskd.service
|
After=network.target slskd.service
|
||||||
@@ -106,10 +106,9 @@ ExecStart=/bin/bash -c /opt/soularr/run.sh
|
|||||||
[Install]
|
[Install]
|
||||||
WantedBy=multi-user.target
|
WantedBy=multi-user.target
|
||||||
EOF
|
EOF
|
||||||
systemctl enable -q --now ${APPLICATION}
|
msg_warn "Add your Lidarr API key to Soularr in '/opt/soularr/config.ini', then run 'systemctl enable --now soularr.timer'"
|
||||||
systemctl enable -q soularr.timer
|
fi
|
||||||
rm -rf $tmp_file
|
systemctl enable -q --now slskd
|
||||||
rm -rf /tmp/main.zip
|
|
||||||
msg_ok "Created Services"
|
msg_ok "Created Services"
|
||||||
|
|
||||||
motd_ssh
|
motd_ssh
|
||||||
|
|||||||
@@ -15,16 +15,18 @@ update_os
|
|||||||
|
|
||||||
msg_info "Installing Dependencies"
|
msg_info "Installing Dependencies"
|
||||||
$STD apt install -y apt-transport-https
|
$STD apt install -y apt-transport-https
|
||||||
|
curl -fsSL "https://dl.ui.com/unifi/unifi-repo.gpg" -o "/usr/share/keyrings/unifi-repo.gpg"
|
||||||
|
cat <<EOF | sudo tee /etc/apt/sources.list.d/100-ubnt-unifi.sources >/dev/null
|
||||||
|
Types: deb
|
||||||
|
URIs: https://www.ui.com/downloads/unifi/debian
|
||||||
|
Suites: stable
|
||||||
|
Components: ubiquiti
|
||||||
|
Architectures: amd64
|
||||||
|
Signed-By: /usr/share/keyrings/unifi-repo.gpg
|
||||||
|
EOF
|
||||||
|
$STD apt update
|
||||||
msg_ok "Installed Dependencies"
|
msg_ok "Installed Dependencies"
|
||||||
|
|
||||||
setup_deb822_repo \
|
|
||||||
"unifi" \
|
|
||||||
"https://dl.ui.com/unifi/unifi-repo.gpg" \
|
|
||||||
"https://www.ui.com/downloads/unifi/debian" \
|
|
||||||
"stable" \
|
|
||||||
"ubiquiti" \
|
|
||||||
"amd64"
|
|
||||||
|
|
||||||
JAVA_VERSION="21" setup_java
|
JAVA_VERSION="21" setup_java
|
||||||
|
|
||||||
if lscpu | grep -q 'avx'; then
|
if lscpu | grep -q 'avx'; then
|
||||||
|
|||||||
1111
misc/api.func
1111
misc/api.func
File diff suppressed because it is too large
Load Diff
418
misc/build.func
418
misc/build.func
@@ -38,15 +38,16 @@
|
|||||||
# - Captures app-declared resource defaults (CPU, RAM, Disk)
|
# - Captures app-declared resource defaults (CPU, RAM, Disk)
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
variables() {
|
variables() {
|
||||||
NSAPP=$(echo "${APP,,}" | tr -d ' ') # This function sets the NSAPP variable by converting the value of the APP variable to lowercase and removing any spaces.
|
NSAPP=$(echo "${APP,,}" | tr -d ' ') # This function sets the NSAPP variable by converting the value of the APP variable to lowercase and removing any spaces.
|
||||||
var_install="${NSAPP}-install" # sets the var_install variable by appending "-install" to the value of NSAPP.
|
var_install="${NSAPP}-install" # sets the var_install variable by appending "-install" to the value of NSAPP.
|
||||||
INTEGER='^[0-9]+([.][0-9]+)?$' # it defines the INTEGER regular expression pattern.
|
INTEGER='^[0-9]+([.][0-9]+)?$' # it defines the INTEGER regular expression pattern.
|
||||||
PVEHOST_NAME=$(hostname) # gets the Proxmox Hostname and sets it to Uppercase
|
PVEHOST_NAME=$(hostname) # gets the Proxmox Hostname and sets it to Uppercase
|
||||||
DIAGNOSTICS="yes" # sets the DIAGNOSTICS variable to "yes", used for the API call.
|
DIAGNOSTICS="yes" # sets the DIAGNOSTICS variable to "yes", used for the API call.
|
||||||
METHOD="default" # sets the METHOD variable to "default", used for the API call.
|
METHOD="default" # sets the METHOD variable to "default", used for the API call.
|
||||||
RANDOM_UUID="$(cat /proc/sys/kernel/random/uuid)" # generates a random UUID and sets it to the RANDOM_UUID variable.
|
RANDOM_UUID="$(cat /proc/sys/kernel/random/uuid)" # generates a random UUID and sets it to the RANDOM_UUID variable.
|
||||||
SESSION_ID="${RANDOM_UUID:0:8}" # Short session ID (first 8 chars of UUID) for log files
|
SESSION_ID="${RANDOM_UUID:0:8}" # Short session ID (first 8 chars of UUID) for log files
|
||||||
BUILD_LOG="/tmp/create-lxc-${SESSION_ID}.log" # Host-side container creation log
|
BUILD_LOG="/tmp/create-lxc-${SESSION_ID}.log" # Host-side container creation log
|
||||||
|
combined_log="/tmp/install-${SESSION_ID}-combined.log" # Combined log (build + install) for failed installations
|
||||||
CTTYPE="${CTTYPE:-${CT_TYPE:-1}}"
|
CTTYPE="${CTTYPE:-${CT_TYPE:-1}}"
|
||||||
|
|
||||||
# Parse dev_mode early
|
# Parse dev_mode early
|
||||||
@@ -385,7 +386,7 @@ validate_hostname() {
|
|||||||
|
|
||||||
# Split by dots and validate each label
|
# Split by dots and validate each label
|
||||||
local IFS='.'
|
local IFS='.'
|
||||||
read -ra labels <<< "$hostname"
|
read -ra labels <<<"$hostname"
|
||||||
for label in "${labels[@]}"; do
|
for label in "${labels[@]}"; do
|
||||||
# Each label: 1-63 chars, alphanumeric, hyphens allowed (not at start/end)
|
# Each label: 1-63 chars, alphanumeric, hyphens allowed (not at start/end)
|
||||||
if [[ -z "$label" ]] || [[ ${#label} -gt 63 ]]; then
|
if [[ -z "$label" ]] || [[ ${#label} -gt 63 ]]; then
|
||||||
@@ -489,7 +490,7 @@ validate_ipv6_address() {
|
|||||||
# Check that no segment exceeds 4 hex chars
|
# Check that no segment exceeds 4 hex chars
|
||||||
local IFS=':'
|
local IFS=':'
|
||||||
local -a segments
|
local -a segments
|
||||||
read -ra segments <<< "$addr"
|
read -ra segments <<<"$addr"
|
||||||
for seg in "${segments[@]}"; do
|
for seg in "${segments[@]}"; do
|
||||||
if [[ ${#seg} -gt 4 ]]; then
|
if [[ ${#seg} -gt 4 ]]; then
|
||||||
return 1
|
return 1
|
||||||
@@ -539,14 +540,14 @@ validate_gateway_in_subnet() {
|
|||||||
|
|
||||||
# Convert IPs to integers
|
# Convert IPs to integers
|
||||||
local IFS='.'
|
local IFS='.'
|
||||||
read -r i1 i2 i3 i4 <<< "$ip"
|
read -r i1 i2 i3 i4 <<<"$ip"
|
||||||
read -r g1 g2 g3 g4 <<< "$gateway"
|
read -r g1 g2 g3 g4 <<<"$gateway"
|
||||||
|
|
||||||
local ip_int=$(( (i1 << 24) + (i2 << 16) + (i3 << 8) + i4 ))
|
local ip_int=$(((i1 << 24) + (i2 << 16) + (i3 << 8) + i4))
|
||||||
local gw_int=$(( (g1 << 24) + (g2 << 16) + (g3 << 8) + g4 ))
|
local gw_int=$(((g1 << 24) + (g2 << 16) + (g3 << 8) + g4))
|
||||||
|
|
||||||
# Check if both are in same network
|
# Check if both are in same network
|
||||||
if (( (ip_int & mask) != (gw_int & mask) )); then
|
if (((ip_int & mask) != (gw_int & mask))); then
|
||||||
return 1
|
return 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
@@ -1079,117 +1080,117 @@ load_vars_file() {
|
|||||||
# Validate values before setting (skip empty values - they use defaults)
|
# Validate values before setting (skip empty values - they use defaults)
|
||||||
if [[ -n "$var_val" ]]; then
|
if [[ -n "$var_val" ]]; then
|
||||||
case "$var_key" in
|
case "$var_key" in
|
||||||
var_mac)
|
var_mac)
|
||||||
if ! validate_mac_address "$var_val"; then
|
if ! validate_mac_address "$var_val"; then
|
||||||
msg_warn "Invalid MAC address '$var_val' in $file, ignoring"
|
msg_warn "Invalid MAC address '$var_val' in $file, ignoring"
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
;;
|
||||||
|
var_vlan)
|
||||||
|
if ! validate_vlan_tag "$var_val"; then
|
||||||
|
msg_warn "Invalid VLAN tag '$var_val' in $file (must be 1-4094), ignoring"
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
;;
|
||||||
|
var_mtu)
|
||||||
|
if ! validate_mtu "$var_val"; then
|
||||||
|
msg_warn "Invalid MTU '$var_val' in $file (must be 576-65535), ignoring"
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
;;
|
||||||
|
var_tags)
|
||||||
|
if ! validate_tags "$var_val"; then
|
||||||
|
msg_warn "Invalid tags '$var_val' in $file (alphanumeric, -, _, ; only), ignoring"
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
;;
|
||||||
|
var_timezone)
|
||||||
|
if ! validate_timezone "$var_val"; then
|
||||||
|
msg_warn "Invalid timezone '$var_val' in $file, ignoring"
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
;;
|
||||||
|
var_brg)
|
||||||
|
if ! validate_bridge "$var_val"; then
|
||||||
|
msg_warn "Bridge '$var_val' not found in $file, ignoring"
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
;;
|
||||||
|
var_gateway)
|
||||||
|
if ! validate_gateway_ip "$var_val"; then
|
||||||
|
msg_warn "Invalid gateway IP '$var_val' in $file, ignoring"
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
;;
|
||||||
|
var_hostname)
|
||||||
|
if ! validate_hostname "$var_val"; then
|
||||||
|
msg_warn "Invalid hostname '$var_val' in $file, ignoring"
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
;;
|
||||||
|
var_cpu)
|
||||||
|
if ! [[ "$var_val" =~ ^[0-9]+$ ]] || ((var_val < 1 || var_val > 128)); then
|
||||||
|
msg_warn "Invalid CPU count '$var_val' in $file (must be 1-128), ignoring"
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
;;
|
||||||
|
var_ram)
|
||||||
|
if ! [[ "$var_val" =~ ^[0-9]+$ ]] || ((var_val < 256)); then
|
||||||
|
msg_warn "Invalid RAM '$var_val' in $file (must be >= 256 MiB), ignoring"
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
;;
|
||||||
|
var_disk)
|
||||||
|
if ! [[ "$var_val" =~ ^[0-9]+$ ]] || ((var_val < 1)); then
|
||||||
|
msg_warn "Invalid disk size '$var_val' in $file (must be >= 1 GB), ignoring"
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
;;
|
||||||
|
var_unprivileged)
|
||||||
|
if [[ "$var_val" != "0" && "$var_val" != "1" ]]; then
|
||||||
|
msg_warn "Invalid unprivileged value '$var_val' in $file (must be 0 or 1), ignoring"
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
;;
|
||||||
|
var_nesting)
|
||||||
|
if [[ "$var_val" != "0" && "$var_val" != "1" ]]; then
|
||||||
|
msg_warn "Invalid nesting value '$var_val' in $file (must be 0 or 1), ignoring"
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
# Warn about potential issues with systemd-based OS when nesting is disabled via vars file
|
||||||
|
if [[ "$var_val" == "0" && "${var_os:-debian}" != "alpine" ]]; then
|
||||||
|
msg_warn "Nesting disabled in $file - modern systemd-based distributions may require nesting for proper operation"
|
||||||
|
fi
|
||||||
|
;;
|
||||||
|
var_keyctl)
|
||||||
|
if [[ "$var_val" != "0" && "$var_val" != "1" ]]; then
|
||||||
|
msg_warn "Invalid keyctl value '$var_val' in $file (must be 0 or 1), ignoring"
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
;;
|
||||||
|
var_net)
|
||||||
|
# var_net can be: dhcp, static IP/CIDR, or IP range
|
||||||
|
if [[ "$var_val" != "dhcp" ]]; then
|
||||||
|
if is_ip_range "$var_val"; then
|
||||||
|
: # IP range is valid, will be resolved at runtime
|
||||||
|
elif ! validate_ip_address "$var_val"; then
|
||||||
|
msg_warn "Invalid network '$var_val' in $file (must be dhcp or IP/CIDR), ignoring"
|
||||||
continue
|
continue
|
||||||
fi
|
fi
|
||||||
;;
|
fi
|
||||||
var_vlan)
|
;;
|
||||||
if ! validate_vlan_tag "$var_val"; then
|
var_fuse | var_tun | var_gpu | var_ssh | var_verbose | var_protection)
|
||||||
msg_warn "Invalid VLAN tag '$var_val' in $file (must be 1-4094), ignoring"
|
if [[ "$var_val" != "yes" && "$var_val" != "no" ]]; then
|
||||||
continue
|
msg_warn "Invalid boolean '$var_val' for $var_key in $file (must be yes/no), ignoring"
|
||||||
fi
|
continue
|
||||||
;;
|
fi
|
||||||
var_mtu)
|
;;
|
||||||
if ! validate_mtu "$var_val"; then
|
var_ipv6_method)
|
||||||
msg_warn "Invalid MTU '$var_val' in $file (must be 576-65535), ignoring"
|
if [[ "$var_val" != "auto" && "$var_val" != "dhcp" && "$var_val" != "static" && "$var_val" != "none" ]]; then
|
||||||
continue
|
msg_warn "Invalid IPv6 method '$var_val' in $file (must be auto/dhcp/static/none), ignoring"
|
||||||
fi
|
continue
|
||||||
;;
|
fi
|
||||||
var_tags)
|
;;
|
||||||
if ! validate_tags "$var_val"; then
|
|
||||||
msg_warn "Invalid tags '$var_val' in $file (alphanumeric, -, _, ; only), ignoring"
|
|
||||||
continue
|
|
||||||
fi
|
|
||||||
;;
|
|
||||||
var_timezone)
|
|
||||||
if ! validate_timezone "$var_val"; then
|
|
||||||
msg_warn "Invalid timezone '$var_val' in $file, ignoring"
|
|
||||||
continue
|
|
||||||
fi
|
|
||||||
;;
|
|
||||||
var_brg)
|
|
||||||
if ! validate_bridge "$var_val"; then
|
|
||||||
msg_warn "Bridge '$var_val' not found in $file, ignoring"
|
|
||||||
continue
|
|
||||||
fi
|
|
||||||
;;
|
|
||||||
var_gateway)
|
|
||||||
if ! validate_gateway_ip "$var_val"; then
|
|
||||||
msg_warn "Invalid gateway IP '$var_val' in $file, ignoring"
|
|
||||||
continue
|
|
||||||
fi
|
|
||||||
;;
|
|
||||||
var_hostname)
|
|
||||||
if ! validate_hostname "$var_val"; then
|
|
||||||
msg_warn "Invalid hostname '$var_val' in $file, ignoring"
|
|
||||||
continue
|
|
||||||
fi
|
|
||||||
;;
|
|
||||||
var_cpu)
|
|
||||||
if ! [[ "$var_val" =~ ^[0-9]+$ ]] || ((var_val < 1 || var_val > 128)); then
|
|
||||||
msg_warn "Invalid CPU count '$var_val' in $file (must be 1-128), ignoring"
|
|
||||||
continue
|
|
||||||
fi
|
|
||||||
;;
|
|
||||||
var_ram)
|
|
||||||
if ! [[ "$var_val" =~ ^[0-9]+$ ]] || ((var_val < 256)); then
|
|
||||||
msg_warn "Invalid RAM '$var_val' in $file (must be >= 256 MiB), ignoring"
|
|
||||||
continue
|
|
||||||
fi
|
|
||||||
;;
|
|
||||||
var_disk)
|
|
||||||
if ! [[ "$var_val" =~ ^[0-9]+$ ]] || ((var_val < 1)); then
|
|
||||||
msg_warn "Invalid disk size '$var_val' in $file (must be >= 1 GB), ignoring"
|
|
||||||
continue
|
|
||||||
fi
|
|
||||||
;;
|
|
||||||
var_unprivileged)
|
|
||||||
if [[ "$var_val" != "0" && "$var_val" != "1" ]]; then
|
|
||||||
msg_warn "Invalid unprivileged value '$var_val' in $file (must be 0 or 1), ignoring"
|
|
||||||
continue
|
|
||||||
fi
|
|
||||||
;;
|
|
||||||
var_nesting)
|
|
||||||
if [[ "$var_val" != "0" && "$var_val" != "1" ]]; then
|
|
||||||
msg_warn "Invalid nesting value '$var_val' in $file (must be 0 or 1), ignoring"
|
|
||||||
continue
|
|
||||||
fi
|
|
||||||
# Warn about potential issues with systemd-based OS when nesting is disabled via vars file
|
|
||||||
if [[ "$var_val" == "0" && "${var_os:-debian}" != "alpine" ]]; then
|
|
||||||
msg_warn "Nesting disabled in $file - modern systemd-based distributions may require nesting for proper operation"
|
|
||||||
fi
|
|
||||||
;;
|
|
||||||
var_keyctl)
|
|
||||||
if [[ "$var_val" != "0" && "$var_val" != "1" ]]; then
|
|
||||||
msg_warn "Invalid keyctl value '$var_val' in $file (must be 0 or 1), ignoring"
|
|
||||||
continue
|
|
||||||
fi
|
|
||||||
;;
|
|
||||||
var_net)
|
|
||||||
# var_net can be: dhcp, static IP/CIDR, or IP range
|
|
||||||
if [[ "$var_val" != "dhcp" ]]; then
|
|
||||||
if is_ip_range "$var_val"; then
|
|
||||||
: # IP range is valid, will be resolved at runtime
|
|
||||||
elif ! validate_ip_address "$var_val"; then
|
|
||||||
msg_warn "Invalid network '$var_val' in $file (must be dhcp or IP/CIDR), ignoring"
|
|
||||||
continue
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
;;
|
|
||||||
var_fuse|var_tun|var_gpu|var_ssh|var_verbose|var_protection)
|
|
||||||
if [[ "$var_val" != "yes" && "$var_val" != "no" ]]; then
|
|
||||||
msg_warn "Invalid boolean '$var_val' for $var_key in $file (must be yes/no), ignoring"
|
|
||||||
continue
|
|
||||||
fi
|
|
||||||
;;
|
|
||||||
var_ipv6_method)
|
|
||||||
if [[ "$var_val" != "auto" && "$var_val" != "dhcp" && "$var_val" != "static" && "$var_val" != "none" ]]; then
|
|
||||||
msg_warn "Invalid IPv6 method '$var_val' in $file (must be auto/dhcp/static/none), ignoring"
|
|
||||||
continue
|
|
||||||
fi
|
|
||||||
;;
|
|
||||||
esac
|
esac
|
||||||
fi
|
fi
|
||||||
|
|
||||||
@@ -2764,6 +2765,26 @@ Advanced:
|
|||||||
[[ "$APT_CACHER" == "yes" ]] && echo -e "${INFO}${BOLD}${DGN}APT Cacher: ${BGN}$APT_CACHER_IP${CL}"
|
[[ "$APT_CACHER" == "yes" ]] && echo -e "${INFO}${BOLD}${DGN}APT Cacher: ${BGN}$APT_CACHER_IP${CL}"
|
||||||
echo -e "${SEARCH}${BOLD}${DGN}Verbose Mode: ${BGN}$VERBOSE${CL}"
|
echo -e "${SEARCH}${BOLD}${DGN}Verbose Mode: ${BGN}$VERBOSE${CL}"
|
||||||
echo -e "${CREATING}${BOLD}${RD}Creating an LXC of ${APP} using the above advanced settings${CL}"
|
echo -e "${CREATING}${BOLD}${RD}Creating an LXC of ${APP} using the above advanced settings${CL}"
|
||||||
|
|
||||||
|
# Log settings to file
|
||||||
|
log_section "CONTAINER SETTINGS (ADVANCED) - ${APP}"
|
||||||
|
log_msg "Application: ${APP}"
|
||||||
|
log_msg "PVE Version: ${PVEVERSION} (Kernel: ${KERNEL_VERSION})"
|
||||||
|
log_msg "Operating System: $var_os ($var_version)"
|
||||||
|
log_msg "Container Type: $([ "$CT_TYPE" == "1" ] && echo "Unprivileged" || echo "Privileged")"
|
||||||
|
log_msg "Container ID: $CT_ID"
|
||||||
|
log_msg "Hostname: $HN"
|
||||||
|
log_msg "Disk Size: ${DISK_SIZE} GB"
|
||||||
|
log_msg "CPU Cores: $CORE_COUNT"
|
||||||
|
log_msg "RAM Size: ${RAM_SIZE} MiB"
|
||||||
|
log_msg "Bridge: $BRG"
|
||||||
|
log_msg "IPv4: $NET"
|
||||||
|
log_msg "IPv6: $IPV6_METHOD"
|
||||||
|
log_msg "FUSE Support: ${ENABLE_FUSE:-no}"
|
||||||
|
log_msg "Nesting: $([ "${ENABLE_NESTING:-1}" == "1" ] && echo "Enabled" || echo "Disabled")"
|
||||||
|
log_msg "GPU Passthrough: ${ENABLE_GPU:-no}"
|
||||||
|
log_msg "Verbose Mode: $VERBOSE"
|
||||||
|
log_msg "Session ID: ${SESSION_ID}"
|
||||||
}
|
}
|
||||||
|
|
||||||
# ==============================================================================
|
# ==============================================================================
|
||||||
@@ -2871,6 +2892,7 @@ diagnostics_menu() {
|
|||||||
# - Prints summary of default values (ID, OS, type, disk, RAM, CPU, etc.)
|
# - Prints summary of default values (ID, OS, type, disk, RAM, CPU, etc.)
|
||||||
# - Uses icons and formatting for readability
|
# - Uses icons and formatting for readability
|
||||||
# - Convert CT_TYPE to description
|
# - Convert CT_TYPE to description
|
||||||
|
# - Also logs settings to log file for debugging
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
echo_default() {
|
echo_default() {
|
||||||
CT_TYPE_DESC="Unprivileged"
|
CT_TYPE_DESC="Unprivileged"
|
||||||
@@ -2892,6 +2914,20 @@ echo_default() {
|
|||||||
fi
|
fi
|
||||||
echo -e "${CREATING}${BOLD}${BL}Creating a ${APP} LXC using the above default settings${CL}"
|
echo -e "${CREATING}${BOLD}${BL}Creating a ${APP} LXC using the above default settings${CL}"
|
||||||
echo -e " "
|
echo -e " "
|
||||||
|
|
||||||
|
# Log settings to file
|
||||||
|
log_section "CONTAINER SETTINGS - ${APP}"
|
||||||
|
log_msg "Application: ${APP}"
|
||||||
|
log_msg "PVE Version: ${PVEVERSION} (Kernel: ${KERNEL_VERSION})"
|
||||||
|
log_msg "Container ID: ${CT_ID}"
|
||||||
|
log_msg "Operating System: $var_os ($var_version)"
|
||||||
|
log_msg "Container Type: $CT_TYPE_DESC"
|
||||||
|
log_msg "Disk Size: ${DISK_SIZE} GB"
|
||||||
|
log_msg "CPU Cores: ${CORE_COUNT}"
|
||||||
|
log_msg "RAM Size: ${RAM_SIZE} MiB"
|
||||||
|
[[ -n "${var_gpu:-}" && "${var_gpu}" == "yes" ]] && log_msg "GPU Passthrough: Enabled"
|
||||||
|
[[ "$VERBOSE" == "yes" ]] && log_msg "Verbose Mode: Enabled"
|
||||||
|
log_msg "Session ID: ${SESSION_ID}"
|
||||||
}
|
}
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
@@ -3078,10 +3114,10 @@ settings_menu() {
|
|||||||
|
|
||||||
case "$choice" in
|
case "$choice" in
|
||||||
1) diagnostics_menu ;;
|
1) diagnostics_menu ;;
|
||||||
2) nano /usr/local/community-scripts/default.vars ;;
|
2) ${EDITOR:-nano} /usr/local/community-scripts/default.vars ;;
|
||||||
3)
|
3)
|
||||||
if [ -f "$(get_app_defaults_path)" ]; then
|
if [ -f "$(get_app_defaults_path)" ]; then
|
||||||
nano "$(get_app_defaults_path)"
|
${EDITOR:-nano} "$(get_app_defaults_path)"
|
||||||
else
|
else
|
||||||
# Back was selected (no app.vars available)
|
# Back was selected (no app.vars available)
|
||||||
return
|
return
|
||||||
@@ -3636,6 +3672,9 @@ $PCT_OPTIONS_STRING"
|
|||||||
exit 214
|
exit 214
|
||||||
fi
|
fi
|
||||||
msg_ok "Storage space validated"
|
msg_ok "Storage space validated"
|
||||||
|
|
||||||
|
# Report installation start to API (early - captures failed installs too)
|
||||||
|
post_to_api
|
||||||
fi
|
fi
|
||||||
|
|
||||||
create_lxc_container || exit $?
|
create_lxc_container || exit $?
|
||||||
@@ -4010,6 +4049,9 @@ EOF'
|
|||||||
# Install SSH keys
|
# Install SSH keys
|
||||||
install_ssh_keys_into_ct
|
install_ssh_keys_into_ct
|
||||||
|
|
||||||
|
# Start timer for duration tracking
|
||||||
|
start_install_timer
|
||||||
|
|
||||||
# Run application installer
|
# Run application installer
|
||||||
# Disable error trap - container errors are handled internally via flag file
|
# Disable error trap - container errors are handled internally via flag file
|
||||||
set +Eeuo pipefail # Disable ALL error handling temporarily
|
set +Eeuo pipefail # Disable ALL error handling temporarily
|
||||||
@@ -4040,25 +4082,59 @@ EOF'
|
|||||||
if [[ $install_exit_code -ne 0 ]]; then
|
if [[ $install_exit_code -ne 0 ]]; then
|
||||||
msg_error "Installation failed in container ${CTID} (exit code: ${install_exit_code})"
|
msg_error "Installation failed in container ${CTID} (exit code: ${install_exit_code})"
|
||||||
|
|
||||||
# Copy both logs from container before potential deletion
|
# Copy install log from container BEFORE API call so get_error_text() can read it
|
||||||
local build_log_copied=false
|
local build_log_copied=false
|
||||||
local install_log_copied=false
|
local install_log_copied=false
|
||||||
|
local combined_log="/tmp/${NSAPP:-lxc}-${CTID}-${SESSION_ID}.log"
|
||||||
|
|
||||||
if [[ -n "$CTID" && -n "${SESSION_ID:-}" ]]; then
|
if [[ -n "$CTID" && -n "${SESSION_ID:-}" ]]; then
|
||||||
# Copy BUILD_LOG (creation log) if it exists
|
# Create combined log with header
|
||||||
|
{
|
||||||
|
echo "================================================================================"
|
||||||
|
echo "COMBINED INSTALLATION LOG - ${APP:-LXC}"
|
||||||
|
echo "Container ID: ${CTID}"
|
||||||
|
echo "Session ID: ${SESSION_ID}"
|
||||||
|
echo "Timestamp: $(date '+%Y-%m-%d %H:%M:%S')"
|
||||||
|
echo "================================================================================"
|
||||||
|
echo ""
|
||||||
|
} >"$combined_log"
|
||||||
|
|
||||||
|
# Append BUILD_LOG (host-side creation log) if it exists
|
||||||
if [[ -f "${BUILD_LOG}" ]]; then
|
if [[ -f "${BUILD_LOG}" ]]; then
|
||||||
cp "${BUILD_LOG}" "/tmp/create-lxc-${CTID}-${SESSION_ID}.log" 2>/dev/null && build_log_copied=true
|
{
|
||||||
|
echo "================================================================================"
|
||||||
|
echo "PHASE 1: CONTAINER CREATION (Host)"
|
||||||
|
echo "================================================================================"
|
||||||
|
cat "${BUILD_LOG}"
|
||||||
|
echo ""
|
||||||
|
} >>"$combined_log"
|
||||||
|
build_log_copied=true
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Copy INSTALL_LOG from container
|
# Copy and append INSTALL_LOG from container
|
||||||
if pct pull "$CTID" "/root/.install-${SESSION_ID}.log" "/tmp/install-lxc-${CTID}-${SESSION_ID}.log" 2>/dev/null; then
|
local temp_install_log="/tmp/.install-temp-${SESSION_ID}.log"
|
||||||
|
if pct pull "$CTID" "/root/.install-${SESSION_ID}.log" "$temp_install_log" 2>/dev/null; then
|
||||||
|
{
|
||||||
|
echo "================================================================================"
|
||||||
|
echo "PHASE 2: APPLICATION INSTALLATION (Container)"
|
||||||
|
echo "================================================================================"
|
||||||
|
cat "$temp_install_log"
|
||||||
|
echo ""
|
||||||
|
} >>"$combined_log"
|
||||||
|
rm -f "$temp_install_log"
|
||||||
install_log_copied=true
|
install_log_copied=true
|
||||||
|
# Point INSTALL_LOG to combined log so get_error_text() finds it
|
||||||
|
INSTALL_LOG="$combined_log"
|
||||||
fi
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
# Show available logs
|
# Report failure to telemetry API (now with log available on host)
|
||||||
|
post_update_to_api "failed" "$install_exit_code"
|
||||||
|
|
||||||
|
# Show combined log location
|
||||||
|
if [[ -n "$CTID" && -n "${SESSION_ID:-}" ]]; then
|
||||||
echo ""
|
echo ""
|
||||||
[[ "$build_log_copied" == true ]] && echo -e "${GN}✔${CL} Container creation log: ${BL}/tmp/create-lxc-${CTID}-${SESSION_ID}.log${CL}"
|
echo -e "${GN}✔${CL} Installation log: ${BL}${combined_log}${CL}"
|
||||||
[[ "$install_log_copied" == true ]] && echo -e "${GN}✔${CL} Installation log: ${BL}/tmp/install-lxc-${CTID}-${SESSION_ID}.log${CL}"
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Dev mode: Keep container or open breakpoint shell
|
# Dev mode: Keep container or open breakpoint shell
|
||||||
@@ -4116,6 +4192,10 @@ EOF'
|
|||||||
echo -e "${BFR}${CM}${GN}Container ${CTID} removed${CL}"
|
echo -e "${BFR}${CM}${GN}Container ${CTID} removed${CL}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# Force one final status update attempt after cleanup
|
||||||
|
# This ensures status is updated even if the first attempt failed (e.g., HTTP 400)
|
||||||
|
post_update_to_api "failed" "$install_exit_code" "force"
|
||||||
|
|
||||||
exit $install_exit_code
|
exit $install_exit_code
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
@@ -5119,18 +5199,74 @@ EOF
|
|||||||
# SECTION 10: ERROR HANDLING & EXIT TRAPS
|
# SECTION 10: ERROR HANDLING & EXIT TRAPS
|
||||||
# ==============================================================================
|
# ==============================================================================
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
# ensure_log_on_host()
|
||||||
|
#
|
||||||
|
# - Ensures INSTALL_LOG points to a readable file on the host
|
||||||
|
# - If INSTALL_LOG points to a container path (e.g. /root/.install-*),
|
||||||
|
# tries to pull it from the container and create a combined log
|
||||||
|
# - This allows get_error_text() to find actual error output for telemetry
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
ensure_log_on_host() {
|
||||||
|
# Already readable on host? Nothing to do.
|
||||||
|
[[ -n "${INSTALL_LOG:-}" && -s "${INSTALL_LOG}" ]] && return 0
|
||||||
|
|
||||||
|
# Try pulling from container and creating combined log
|
||||||
|
if [[ -n "${CTID:-}" && -n "${SESSION_ID:-}" ]] && command -v pct &>/dev/null; then
|
||||||
|
local combined_log="/tmp/${NSAPP:-lxc}-${CTID}-${SESSION_ID}.log"
|
||||||
|
if [[ ! -s "$combined_log" ]]; then
|
||||||
|
# Create combined log
|
||||||
|
{
|
||||||
|
echo "================================================================================"
|
||||||
|
echo "COMBINED INSTALLATION LOG - ${APP:-LXC}"
|
||||||
|
echo "Container ID: ${CTID}"
|
||||||
|
echo "Session ID: ${SESSION_ID}"
|
||||||
|
echo "Timestamp: $(date '+%Y-%m-%d %H:%M:%S')"
|
||||||
|
echo "================================================================================"
|
||||||
|
echo ""
|
||||||
|
} >"$combined_log" 2>/dev/null || return 0
|
||||||
|
# Append BUILD_LOG if it exists
|
||||||
|
if [[ -f "${BUILD_LOG:-}" ]]; then
|
||||||
|
{
|
||||||
|
echo "================================================================================"
|
||||||
|
echo "PHASE 1: CONTAINER CREATION (Host)"
|
||||||
|
echo "================================================================================"
|
||||||
|
cat "${BUILD_LOG}"
|
||||||
|
echo ""
|
||||||
|
} >>"$combined_log"
|
||||||
|
fi
|
||||||
|
# Pull INSTALL_LOG from container
|
||||||
|
local temp_log="/tmp/.install-temp-${SESSION_ID}.log"
|
||||||
|
if pct pull "$CTID" "/root/.install-${SESSION_ID}.log" "$temp_log" 2>/dev/null; then
|
||||||
|
{
|
||||||
|
echo "================================================================================"
|
||||||
|
echo "PHASE 2: APPLICATION INSTALLATION (Container)"
|
||||||
|
echo "================================================================================"
|
||||||
|
cat "$temp_log"
|
||||||
|
echo ""
|
||||||
|
} >>"$combined_log"
|
||||||
|
rm -f "$temp_log"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
if [[ -s "$combined_log" ]]; then
|
||||||
|
INSTALL_LOG="$combined_log"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# api_exit_script()
|
# api_exit_script()
|
||||||
#
|
#
|
||||||
# - Exit trap handler for reporting to API telemetry
|
# - Exit trap handler for reporting to API telemetry
|
||||||
# - Captures exit code and reports to API using centralized error descriptions
|
# - Captures exit code and reports to PocketBase using centralized error descriptions
|
||||||
# - Uses explain_exit_code() from error_handler.func for consistent error messages
|
# - Uses explain_exit_code() from api.func for consistent error messages
|
||||||
# - Posts failure status with exit code to API (error description added automatically)
|
# - Posts failure status with exit code to API (error description resolved automatically)
|
||||||
# - Only executes on non-zero exit codes
|
# - Only executes on non-zero exit codes
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
api_exit_script() {
|
api_exit_script() {
|
||||||
exit_code=$?
|
exit_code=$?
|
||||||
if [ $exit_code -ne 0 ]; then
|
if [ $exit_code -ne 0 ]; then
|
||||||
|
ensure_log_on_host
|
||||||
post_update_to_api "failed" "$exit_code"
|
post_update_to_api "failed" "$exit_code"
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
@@ -5138,6 +5274,6 @@ api_exit_script() {
|
|||||||
if command -v pveversion >/dev/null 2>&1; then
|
if command -v pveversion >/dev/null 2>&1; then
|
||||||
trap 'api_exit_script' EXIT
|
trap 'api_exit_script' EXIT
|
||||||
fi
|
fi
|
||||||
trap 'post_update_to_api "failed" "$BASH_COMMAND"' ERR
|
trap 'ensure_log_on_host; post_update_to_api "failed" "$?"' ERR
|
||||||
trap 'post_update_to_api "failed" "INTERRUPTED"' SIGINT
|
trap 'ensure_log_on_host; post_update_to_api "failed" "130"' SIGINT
|
||||||
trap 'post_update_to_api "failed" "TERMINATED"' SIGTERM
|
trap 'ensure_log_on_host; post_update_to_api "failed" "143"' SIGTERM
|
||||||
|
|||||||
@@ -413,6 +413,69 @@ get_active_logfile() {
|
|||||||
# Legacy compatibility: SILENT_LOGFILE points to active log
|
# Legacy compatibility: SILENT_LOGFILE points to active log
|
||||||
SILENT_LOGFILE="$(get_active_logfile)"
|
SILENT_LOGFILE="$(get_active_logfile)"
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
# strip_ansi()
|
||||||
|
#
|
||||||
|
# - Removes ANSI escape sequences from input text
|
||||||
|
# - Used to clean colored output for log files
|
||||||
|
# - Handles both piped input and arguments
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
strip_ansi() {
|
||||||
|
if [[ $# -gt 0 ]]; then
|
||||||
|
echo -e "$*" | sed 's/\x1b\[[0-9;]*m//g; s/\x1b\[[0-9;]*[a-zA-Z]//g'
|
||||||
|
else
|
||||||
|
sed 's/\x1b\[[0-9;]*m//g; s/\x1b\[[0-9;]*[a-zA-Z]//g'
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
# log_msg()
|
||||||
|
#
|
||||||
|
# - Writes message to active log file without ANSI codes
|
||||||
|
# - Adds timestamp prefix for log correlation
|
||||||
|
# - Creates log file if it doesn't exist
|
||||||
|
# - Arguments: message text (can include ANSI codes, will be stripped)
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
log_msg() {
|
||||||
|
local msg="$*"
|
||||||
|
local logfile
|
||||||
|
logfile="$(get_active_logfile)"
|
||||||
|
|
||||||
|
[[ -z "$msg" ]] && return
|
||||||
|
[[ -z "$logfile" ]] && return
|
||||||
|
|
||||||
|
# Ensure log directory exists
|
||||||
|
mkdir -p "$(dirname "$logfile")" 2>/dev/null || true
|
||||||
|
|
||||||
|
# Strip ANSI codes and write with timestamp
|
||||||
|
local clean_msg
|
||||||
|
clean_msg=$(strip_ansi "$msg")
|
||||||
|
echo "[$(date '+%Y-%m-%d %H:%M:%S')] $clean_msg" >>"$logfile"
|
||||||
|
}
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
# log_section()
|
||||||
|
#
|
||||||
|
# - Writes a section header to the log file
|
||||||
|
# - Used for separating different phases of installation
|
||||||
|
# - Arguments: section name
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
log_section() {
|
||||||
|
local section="$1"
|
||||||
|
local logfile
|
||||||
|
logfile="$(get_active_logfile)"
|
||||||
|
|
||||||
|
[[ -z "$logfile" ]] && return
|
||||||
|
mkdir -p "$(dirname "$logfile")" 2>/dev/null || true
|
||||||
|
|
||||||
|
{
|
||||||
|
echo ""
|
||||||
|
echo "================================================================================"
|
||||||
|
echo "[$(date '+%Y-%m-%d %H:%M:%S')] $section"
|
||||||
|
echo "================================================================================"
|
||||||
|
} >>"$logfile"
|
||||||
|
}
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# silent()
|
# silent()
|
||||||
#
|
#
|
||||||
@@ -555,6 +618,9 @@ msg_info() {
|
|||||||
[[ -n "${MSG_INFO_SHOWN["$msg"]+x}" ]] && return
|
[[ -n "${MSG_INFO_SHOWN["$msg"]+x}" ]] && return
|
||||||
MSG_INFO_SHOWN["$msg"]=1
|
MSG_INFO_SHOWN["$msg"]=1
|
||||||
|
|
||||||
|
# Log to file
|
||||||
|
log_msg "[INFO] $msg"
|
||||||
|
|
||||||
stop_spinner
|
stop_spinner
|
||||||
SPINNER_MSG="$msg"
|
SPINNER_MSG="$msg"
|
||||||
|
|
||||||
@@ -598,6 +664,7 @@ msg_ok() {
|
|||||||
stop_spinner
|
stop_spinner
|
||||||
clear_line
|
clear_line
|
||||||
echo -e "$CM ${GN}${msg}${CL}"
|
echo -e "$CM ${GN}${msg}${CL}"
|
||||||
|
log_msg "[OK] $msg"
|
||||||
unset MSG_INFO_SHOWN["$msg"]
|
unset MSG_INFO_SHOWN["$msg"]
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -613,6 +680,7 @@ msg_error() {
|
|||||||
stop_spinner
|
stop_spinner
|
||||||
local msg="$1"
|
local msg="$1"
|
||||||
echo -e "${BFR:-}${CROSS:-✖️} ${RD}${msg}${CL}" >&2
|
echo -e "${BFR:-}${CROSS:-✖️} ${RD}${msg}${CL}" >&2
|
||||||
|
log_msg "[ERROR] $msg"
|
||||||
}
|
}
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
@@ -627,6 +695,7 @@ msg_warn() {
|
|||||||
stop_spinner
|
stop_spinner
|
||||||
local msg="$1"
|
local msg="$1"
|
||||||
echo -e "${BFR:-}${INFO:-ℹ️} ${YWB}${msg}${CL}" >&2
|
echo -e "${BFR:-}${INFO:-ℹ️} ${YWB}${msg}${CL}" >&2
|
||||||
|
log_msg "[WARN] $msg"
|
||||||
}
|
}
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
@@ -644,6 +713,7 @@ msg_custom() {
|
|||||||
[[ -z "$msg" ]] && return
|
[[ -z "$msg" ]] && return
|
||||||
stop_spinner
|
stop_spinner
|
||||||
echo -e "${BFR:-} ${symbol} ${color}${msg}${CL:-\e[0m}"
|
echo -e "${BFR:-} ${symbol} ${color}${msg}${CL:-\e[0m}"
|
||||||
|
log_msg "$msg"
|
||||||
}
|
}
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
|||||||
@@ -27,100 +27,90 @@
|
|||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# explain_exit_code()
|
# explain_exit_code()
|
||||||
#
|
#
|
||||||
# - Maps numeric exit codes to human-readable error descriptions
|
# - Canonical version is defined in api.func (sourced before this file)
|
||||||
# - Supports:
|
# - This section only provides a fallback if api.func was not loaded
|
||||||
# * Generic/Shell errors (1, 2, 126, 127, 128, 130, 137, 139, 143)
|
# - See api.func SECTION 1 for the authoritative exit code mappings
|
||||||
# * Package manager errors (APT, DPKG: 100, 101, 255)
|
|
||||||
# * Node.js/npm errors (243-249, 254)
|
|
||||||
# * Python/pip/uv errors (210-212)
|
|
||||||
# * PostgreSQL errors (231-234)
|
|
||||||
# * MySQL/MariaDB errors (241-244)
|
|
||||||
# * MongoDB errors (251-254)
|
|
||||||
# * Proxmox custom codes (200-231)
|
|
||||||
# - Returns description string for given exit code
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
explain_exit_code() {
|
if ! declare -f explain_exit_code &>/dev/null; then
|
||||||
local code="$1"
|
explain_exit_code() {
|
||||||
case "$code" in
|
local code="$1"
|
||||||
# --- Generic / Shell ---
|
case "$code" in
|
||||||
1) echo "General error / Operation not permitted" ;;
|
1) echo "General error / Operation not permitted" ;;
|
||||||
2) echo "Misuse of shell builtins (e.g. syntax error)" ;;
|
2) echo "Misuse of shell builtins (e.g. syntax error)" ;;
|
||||||
126) echo "Command invoked cannot execute (permission problem?)" ;;
|
6) echo "curl: DNS resolution failed (could not resolve host)" ;;
|
||||||
127) echo "Command not found" ;;
|
7) echo "curl: Failed to connect (network unreachable / host down)" ;;
|
||||||
128) echo "Invalid argument to exit" ;;
|
22) echo "curl: HTTP error returned (404, 429, 500+)" ;;
|
||||||
130) echo "Terminated by Ctrl+C (SIGINT)" ;;
|
28) echo "curl: Operation timeout (network slow or server not responding)" ;;
|
||||||
137) echo "Killed (SIGKILL / Out of memory?)" ;;
|
35) echo "curl: SSL/TLS handshake failed (certificate error)" ;;
|
||||||
139) echo "Segmentation fault (core dumped)" ;;
|
100) echo "APT: Package manager error (broken packages / dependency problems)" ;;
|
||||||
143) echo "Terminated (SIGTERM)" ;;
|
101) echo "APT: Configuration error (bad sources.list, malformed config)" ;;
|
||||||
|
102) echo "APT: Lock held by another process (dpkg/apt still running)" ;;
|
||||||
# --- Package manager / APT / DPKG ---
|
124) echo "Command timed out (timeout command)" ;;
|
||||||
100) echo "APT: Package manager error (broken packages / dependency problems)" ;;
|
126) echo "Command invoked cannot execute (permission problem?)" ;;
|
||||||
101) echo "APT: Configuration error (bad sources.list, malformed config)" ;;
|
127) echo "Command not found" ;;
|
||||||
255) echo "DPKG: Fatal internal error" ;;
|
128) echo "Invalid argument to exit" ;;
|
||||||
|
130) echo "Terminated by Ctrl+C (SIGINT)" ;;
|
||||||
# --- Node.js / npm / pnpm / yarn ---
|
134) echo "Process aborted (SIGABRT - possibly Node.js heap overflow)" ;;
|
||||||
243) echo "Node.js: Out of memory (JavaScript heap out of memory)" ;;
|
137) echo "Killed (SIGKILL / Out of memory?)" ;;
|
||||||
245) echo "Node.js: Invalid command-line option" ;;
|
139) echo "Segmentation fault (core dumped)" ;;
|
||||||
246) echo "Node.js: Internal JavaScript Parse Error" ;;
|
141) echo "Broken pipe (SIGPIPE - output closed prematurely)" ;;
|
||||||
247) echo "Node.js: Fatal internal error" ;;
|
143) echo "Terminated (SIGTERM)" ;;
|
||||||
248) echo "Node.js: Invalid C++ addon / N-API failure" ;;
|
150) echo "Systemd: Service failed to start" ;;
|
||||||
249) echo "Node.js: Inspector error" ;;
|
151) echo "Systemd: Service unit not found" ;;
|
||||||
254) echo "npm/pnpm/yarn: Unknown fatal error" ;;
|
152) echo "Permission denied (EACCES)" ;;
|
||||||
|
153) echo "Build/compile failed (make/gcc/cmake)" ;;
|
||||||
# --- Python / pip / uv ---
|
154) echo "Node.js: Native addon build failed (node-gyp)" ;;
|
||||||
210) echo "Python: Virtualenv / uv environment missing or broken" ;;
|
160) echo "Python: Virtualenv / uv environment missing or broken" ;;
|
||||||
211) echo "Python: Dependency resolution failed" ;;
|
161) echo "Python: Dependency resolution failed" ;;
|
||||||
212) echo "Python: Installation aborted (permissions or EXTERNALLY-MANAGED)" ;;
|
162) echo "Python: Installation aborted (permissions or EXTERNALLY-MANAGED)" ;;
|
||||||
|
170) echo "PostgreSQL: Connection failed (server not running / wrong socket)" ;;
|
||||||
# --- PostgreSQL ---
|
171) echo "PostgreSQL: Authentication failed (bad user/password)" ;;
|
||||||
231) echo "PostgreSQL: Connection failed (server not running / wrong socket)" ;;
|
172) echo "PostgreSQL: Database does not exist" ;;
|
||||||
232) echo "PostgreSQL: Authentication failed (bad user/password)" ;;
|
173) echo "PostgreSQL: Fatal error in query / syntax" ;;
|
||||||
233) echo "PostgreSQL: Database does not exist" ;;
|
180) echo "MySQL/MariaDB: Connection failed (server not running / wrong socket)" ;;
|
||||||
234) echo "PostgreSQL: Fatal error in query / syntax" ;;
|
181) echo "MySQL/MariaDB: Authentication failed (bad user/password)" ;;
|
||||||
|
182) echo "MySQL/MariaDB: Database does not exist" ;;
|
||||||
# --- MySQL / MariaDB ---
|
183) echo "MySQL/MariaDB: Fatal error in query / syntax" ;;
|
||||||
241) echo "MySQL/MariaDB: Connection failed (server not running / wrong socket)" ;;
|
190) echo "MongoDB: Connection failed (server not running)" ;;
|
||||||
242) echo "MySQL/MariaDB: Authentication failed (bad user/password)" ;;
|
191) echo "MongoDB: Authentication failed (bad user/password)" ;;
|
||||||
243) echo "MySQL/MariaDB: Database does not exist" ;;
|
192) echo "MongoDB: Database not found" ;;
|
||||||
244) echo "MySQL/MariaDB: Fatal error in query / syntax" ;;
|
193) echo "MongoDB: Fatal query error" ;;
|
||||||
|
200) echo "Proxmox: Failed to create lock file" ;;
|
||||||
# --- MongoDB ---
|
203) echo "Proxmox: Missing CTID variable" ;;
|
||||||
251) echo "MongoDB: Connection failed (server not running)" ;;
|
204) echo "Proxmox: Missing PCT_OSTYPE variable" ;;
|
||||||
252) echo "MongoDB: Authentication failed (bad user/password)" ;;
|
205) echo "Proxmox: Invalid CTID (<100)" ;;
|
||||||
253) echo "MongoDB: Database not found" ;;
|
206) echo "Proxmox: CTID already in use" ;;
|
||||||
254) echo "MongoDB: Fatal query error" ;;
|
207) echo "Proxmox: Password contains unescaped special characters" ;;
|
||||||
|
208) echo "Proxmox: Invalid configuration (DNS/MAC/Network format)" ;;
|
||||||
# --- Proxmox Custom Codes ---
|
209) echo "Proxmox: Container creation failed" ;;
|
||||||
200) echo "Proxmox: Failed to create lock file" ;;
|
210) echo "Proxmox: Cluster not quorate" ;;
|
||||||
203) echo "Proxmox: Missing CTID variable" ;;
|
211) echo "Proxmox: Timeout waiting for template lock" ;;
|
||||||
204) echo "Proxmox: Missing PCT_OSTYPE variable" ;;
|
212) echo "Proxmox: Storage type 'iscsidirect' does not support containers (VMs only)" ;;
|
||||||
205) echo "Proxmox: Invalid CTID (<100)" ;;
|
213) echo "Proxmox: Storage type does not support 'rootdir' content" ;;
|
||||||
206) echo "Proxmox: CTID already in use" ;;
|
214) echo "Proxmox: Not enough storage space" ;;
|
||||||
207) echo "Proxmox: Password contains unescaped special characters" ;;
|
215) echo "Proxmox: Container created but not listed (ghost state)" ;;
|
||||||
208) echo "Proxmox: Invalid configuration (DNS/MAC/Network format)" ;;
|
216) echo "Proxmox: RootFS entry missing in config" ;;
|
||||||
209) echo "Proxmox: Container creation failed" ;;
|
217) echo "Proxmox: Storage not accessible" ;;
|
||||||
210) echo "Proxmox: Cluster not quorate" ;;
|
218) echo "Proxmox: Template file corrupted or incomplete" ;;
|
||||||
211) echo "Proxmox: Timeout waiting for template lock" ;;
|
219) echo "Proxmox: CephFS does not support containers - use RBD" ;;
|
||||||
212) echo "Proxmox: Storage type 'iscsidirect' does not support containers (VMs only)" ;;
|
220) echo "Proxmox: Unable to resolve template path" ;;
|
||||||
213) echo "Proxmox: Storage type does not support 'rootdir' content" ;;
|
221) echo "Proxmox: Template file not readable" ;;
|
||||||
214) echo "Proxmox: Not enough storage space" ;;
|
222) echo "Proxmox: Template download failed" ;;
|
||||||
215) echo "Proxmox: Container created but not listed (ghost state)" ;;
|
223) echo "Proxmox: Template not available after download" ;;
|
||||||
216) echo "Proxmox: RootFS entry missing in config" ;;
|
224) echo "Proxmox: PBS storage is for backups only" ;;
|
||||||
217) echo "Proxmox: Storage not accessible" ;;
|
225) echo "Proxmox: No template available for OS/Version" ;;
|
||||||
219) echo "Proxmox: CephFS does not support containers - use RBD" ;;
|
231) echo "Proxmox: LXC stack upgrade failed" ;;
|
||||||
224) echo "Proxmox: PBS storage is for backups only" ;;
|
243) echo "Node.js: Out of memory (JavaScript heap out of memory)" ;;
|
||||||
218) echo "Proxmox: Template file corrupted or incomplete" ;;
|
245) echo "Node.js: Invalid command-line option" ;;
|
||||||
220) echo "Proxmox: Unable to resolve template path" ;;
|
246) echo "Node.js: Internal JavaScript Parse Error" ;;
|
||||||
221) echo "Proxmox: Template file not readable" ;;
|
247) echo "Node.js: Fatal internal error" ;;
|
||||||
222) echo "Proxmox: Template download failed" ;;
|
248) echo "Node.js: Invalid C++ addon / N-API failure" ;;
|
||||||
223) echo "Proxmox: Template not available after download" ;;
|
249) echo "npm/pnpm/yarn: Unknown fatal error" ;;
|
||||||
225) echo "Proxmox: No template available for OS/Version" ;;
|
255) echo "DPKG: Fatal internal error" ;;
|
||||||
231) echo "Proxmox: LXC stack upgrade failed" ;;
|
*) echo "Unknown error" ;;
|
||||||
|
esac
|
||||||
# --- Default ---
|
}
|
||||||
*) echo "Unknown error" ;;
|
fi
|
||||||
esac
|
|
||||||
}
|
|
||||||
|
|
||||||
# ==============================================================================
|
# ==============================================================================
|
||||||
# SECTION 2: ERROR HANDLERS
|
# SECTION 2: ERROR HANDLERS
|
||||||
@@ -197,12 +187,7 @@ error_handler() {
|
|||||||
|
|
||||||
# Create error flag file with exit code for host detection
|
# Create error flag file with exit code for host detection
|
||||||
echo "$exit_code" >"/root/.install-${SESSION_ID:-error}.failed" 2>/dev/null || true
|
echo "$exit_code" >"/root/.install-${SESSION_ID:-error}.failed" 2>/dev/null || true
|
||||||
|
# Log path is shown by host as combined log - no need to show container path
|
||||||
if declare -f msg_custom >/dev/null 2>&1; then
|
|
||||||
msg_custom "📋" "${YW}" "Log saved to: ${container_log}"
|
|
||||||
else
|
|
||||||
echo -e "${YW}Log saved to:${CL} ${BL}${container_log}${CL}"
|
|
||||||
fi
|
|
||||||
else
|
else
|
||||||
# HOST CONTEXT: Show local log path and offer container cleanup
|
# HOST CONTEXT: Show local log path and offer container cleanup
|
||||||
if declare -f msg_custom >/dev/null 2>&1; then
|
if declare -f msg_custom >/dev/null 2>&1; then
|
||||||
@@ -213,6 +198,11 @@ error_handler() {
|
|||||||
|
|
||||||
# Offer to remove container if it exists (build errors after container creation)
|
# Offer to remove container if it exists (build errors after container creation)
|
||||||
if [[ -n "${CTID:-}" ]] && command -v pct &>/dev/null && pct status "$CTID" &>/dev/null; then
|
if [[ -n "${CTID:-}" ]] && command -v pct &>/dev/null && pct status "$CTID" &>/dev/null; then
|
||||||
|
# Report failure to API before container cleanup
|
||||||
|
if declare -f post_update_to_api &>/dev/null; then
|
||||||
|
post_update_to_api "failed" "$exit_code"
|
||||||
|
fi
|
||||||
|
|
||||||
echo ""
|
echo ""
|
||||||
echo -en "${YW}Remove broken container ${CTID}? (Y/n) [auto-remove in 60s]: ${CL}"
|
echo -en "${YW}Remove broken container ${CTID}? (Y/n) [auto-remove in 60s]: ${CL}"
|
||||||
|
|
||||||
@@ -232,6 +222,12 @@ error_handler() {
|
|||||||
pct destroy "$CTID" &>/dev/null || true
|
pct destroy "$CTID" &>/dev/null || true
|
||||||
echo -e "${GN}✔${CL} Container ${CTID} removed"
|
echo -e "${GN}✔${CL} Container ${CTID} removed"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# Force one final status update attempt after cleanup
|
||||||
|
# This ensures status is updated even if the first attempt failed (e.g., HTTP 400)
|
||||||
|
if declare -f post_update_to_api &>/dev/null; then
|
||||||
|
post_update_to_api "failed" "$exit_code" "force"
|
||||||
|
fi
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
@@ -253,6 +249,18 @@ error_handler() {
|
|||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
on_exit() {
|
on_exit() {
|
||||||
local exit_code=$?
|
local exit_code=$?
|
||||||
|
# Report orphaned "installing" records to telemetry API
|
||||||
|
# Catches ALL exit paths: errors (non-zero), signals, AND clean exits where
|
||||||
|
# post_to_api was called ("installing" sent) but post_update_to_api was never called
|
||||||
|
if [[ "${POST_TO_API_DONE:-}" == "true" && "${POST_UPDATE_DONE:-}" != "true" ]]; then
|
||||||
|
if declare -f post_update_to_api >/dev/null 2>&1; then
|
||||||
|
if [[ $exit_code -ne 0 ]]; then
|
||||||
|
post_update_to_api "failed" "$exit_code"
|
||||||
|
else
|
||||||
|
post_update_to_api "failed" "1"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
fi
|
||||||
[[ -n "${lockfile:-}" && -e "$lockfile" ]] && rm -f "$lockfile"
|
[[ -n "${lockfile:-}" && -e "$lockfile" ]] && rm -f "$lockfile"
|
||||||
exit "$exit_code"
|
exit "$exit_code"
|
||||||
}
|
}
|
||||||
@@ -265,6 +273,10 @@ on_exit() {
|
|||||||
# - Exits with code 130 (128 + SIGINT=2)
|
# - Exits with code 130 (128 + SIGINT=2)
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
on_interrupt() {
|
on_interrupt() {
|
||||||
|
# Report interruption to telemetry API (prevents stuck "installing" records)
|
||||||
|
if declare -f post_update_to_api >/dev/null 2>&1; then
|
||||||
|
post_update_to_api "failed" "130"
|
||||||
|
fi
|
||||||
if declare -f msg_error >/dev/null 2>&1; then
|
if declare -f msg_error >/dev/null 2>&1; then
|
||||||
msg_error "Interrupted by user (SIGINT)"
|
msg_error "Interrupted by user (SIGINT)"
|
||||||
else
|
else
|
||||||
@@ -282,6 +294,10 @@ on_interrupt() {
|
|||||||
# - Triggered by external process termination
|
# - Triggered by external process termination
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
on_terminate() {
|
on_terminate() {
|
||||||
|
# Report termination to telemetry API (prevents stuck "installing" records)
|
||||||
|
if declare -f post_update_to_api >/dev/null 2>&1; then
|
||||||
|
post_update_to_api "failed" "143"
|
||||||
|
fi
|
||||||
if declare -f msg_error >/dev/null 2>&1; then
|
if declare -f msg_error >/dev/null 2>&1; then
|
||||||
msg_error "Terminated by signal (SIGTERM)"
|
msg_error "Terminated by signal (SIGTERM)"
|
||||||
else
|
else
|
||||||
|
|||||||
@@ -465,6 +465,7 @@ manage_tool_repository() {
|
|||||||
msg_error "Failed to download MongoDB GPG key"
|
msg_error "Failed to download MongoDB GPG key"
|
||||||
return 1
|
return 1
|
||||||
fi
|
fi
|
||||||
|
chmod 644 "/etc/apt/keyrings/mongodb-server-${version}.gpg"
|
||||||
|
|
||||||
# Setup repository
|
# Setup repository
|
||||||
local distro_codename
|
local distro_codename
|
||||||
@@ -1294,12 +1295,33 @@ setup_deb822_repo() {
|
|||||||
return 1
|
return 1
|
||||||
}
|
}
|
||||||
|
|
||||||
# Import GPG
|
# Import GPG key (auto-detect binary vs ASCII-armored format)
|
||||||
curl -fsSL "$gpg_url" | gpg --dearmor --yes -o "/etc/apt/keyrings/${name}.gpg" || {
|
local tmp_gpg
|
||||||
msg_error "Failed to import GPG key for ${name}"
|
tmp_gpg=$(mktemp) || return 1
|
||||||
|
curl -fsSL "$gpg_url" -o "$tmp_gpg" || {
|
||||||
|
msg_error "Failed to download GPG key for ${name}"
|
||||||
|
rm -f "$tmp_gpg"
|
||||||
return 1
|
return 1
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if grep -q "BEGIN PGP" "$tmp_gpg" 2>/dev/null; then
|
||||||
|
# ASCII-armored — dearmor to binary
|
||||||
|
gpg --dearmor --yes -o "/etc/apt/keyrings/${name}.gpg" < "$tmp_gpg" || {
|
||||||
|
msg_error "Failed to dearmor GPG key for ${name}"
|
||||||
|
rm -f "$tmp_gpg"
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
else
|
||||||
|
# Already in binary GPG format — copy directly
|
||||||
|
cp "$tmp_gpg" "/etc/apt/keyrings/${name}.gpg" || {
|
||||||
|
msg_error "Failed to install GPG key for ${name}"
|
||||||
|
rm -f "$tmp_gpg"
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
fi
|
||||||
|
rm -f "$tmp_gpg"
|
||||||
|
chmod 644 "/etc/apt/keyrings/${name}.gpg"
|
||||||
|
|
||||||
# Write deb822
|
# Write deb822
|
||||||
{
|
{
|
||||||
echo "Types: deb"
|
echo "Types: deb"
|
||||||
|
|||||||
@@ -75,14 +75,37 @@ pct exec "$CTID" -- bash -c '
|
|||||||
set -e
|
set -e
|
||||||
export DEBIAN_FRONTEND=noninteractive
|
export DEBIAN_FRONTEND=noninteractive
|
||||||
|
|
||||||
ID=$(grep "^ID=" /etc/os-release | cut -d"=" -f2)
|
# Source os-release properly (handles quoted values)
|
||||||
VER=$(grep "^VERSION_CODENAME=" /etc/os-release | cut -d"=" -f2)
|
source /etc/os-release
|
||||||
|
|
||||||
# fallback if DNS is poisoned or blocked
|
# Fallback if DNS is poisoned or blocked
|
||||||
ORIG_RESOLV="/etc/resolv.conf"
|
ORIG_RESOLV="/etc/resolv.conf"
|
||||||
BACKUP_RESOLV="/tmp/resolv.conf.backup"
|
BACKUP_RESOLV="/tmp/resolv.conf.backup"
|
||||||
|
|
||||||
if ! dig +short pkgs.tailscale.com | grep -qvE "^127\.|^0\.0\.0\.0$"; then
|
# Check DNS resolution using multiple methods (dig may not be installed)
|
||||||
|
dns_check_failed=true
|
||||||
|
if command -v dig &>/dev/null; then
|
||||||
|
if dig +short pkgs.tailscale.com 2>/dev/null | grep -qvE "^127\.|^0\.0\.0\.0$|^$"; then
|
||||||
|
dns_check_failed=false
|
||||||
|
fi
|
||||||
|
elif command -v host &>/dev/null; then
|
||||||
|
if host pkgs.tailscale.com 2>/dev/null | grep -q "has address"; then
|
||||||
|
dns_check_failed=false
|
||||||
|
fi
|
||||||
|
elif command -v nslookup &>/dev/null; then
|
||||||
|
if nslookup pkgs.tailscale.com 2>/dev/null | grep -q "Address:"; then
|
||||||
|
dns_check_failed=false
|
||||||
|
fi
|
||||||
|
elif command -v getent &>/dev/null; then
|
||||||
|
if getent hosts pkgs.tailscale.com &>/dev/null; then
|
||||||
|
dns_check_failed=false
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
# No DNS tools available, try curl directly and assume DNS works
|
||||||
|
dns_check_failed=false
|
||||||
|
fi
|
||||||
|
|
||||||
|
if $dns_check_failed; then
|
||||||
echo "[INFO] DNS resolution for pkgs.tailscale.com failed (blocked or redirected)."
|
echo "[INFO] DNS resolution for pkgs.tailscale.com failed (blocked or redirected)."
|
||||||
echo "[INFO] Temporarily overriding /etc/resolv.conf with Cloudflare DNS (1.1.1.1)"
|
echo "[INFO] Temporarily overriding /etc/resolv.conf with Cloudflare DNS (1.1.1.1)"
|
||||||
cp "$ORIG_RESOLV" "$BACKUP_RESOLV"
|
cp "$ORIG_RESOLV" "$BACKUP_RESOLV"
|
||||||
@@ -92,17 +115,22 @@ fi
|
|||||||
if ! command -v curl &>/dev/null; then
|
if ! command -v curl &>/dev/null; then
|
||||||
echo "[INFO] curl not found, installing..."
|
echo "[INFO] curl not found, installing..."
|
||||||
apt-get update -qq
|
apt-get update -qq
|
||||||
apt-get install -y curl >/dev/null
|
apt update -qq
|
||||||
|
apt install -y curl >/dev/null
|
||||||
fi
|
fi
|
||||||
|
|
||||||
curl -fsSL https://pkgs.tailscale.com/stable/${ID}/${VER}.noarmor.gpg \
|
# Ensure keyrings directory exists
|
||||||
|
mkdir -p /usr/share/keyrings
|
||||||
|
|
||||||
|
curl -fsSL "https://pkgs.tailscale.com/stable/${ID}/${VERSION_CODENAME}.noarmor.gpg" \
|
||||||
| tee /usr/share/keyrings/tailscale-archive-keyring.gpg >/dev/null
|
| tee /usr/share/keyrings/tailscale-archive-keyring.gpg >/dev/null
|
||||||
|
|
||||||
echo "deb [signed-by=/usr/share/keyrings/tailscale-archive-keyring.gpg] https://pkgs.tailscale.com/stable/${ID} ${VER} main" \
|
echo "deb [signed-by=/usr/share/keyrings/tailscale-archive-keyring.gpg] https://pkgs.tailscale.com/stable/${ID} ${VERSION_CODENAME} main" \
|
||||||
>/etc/apt/sources.list.d/tailscale.list
|
>/etc/apt/sources.list.d/tailscale.list
|
||||||
|
|
||||||
apt-get update -qq
|
apt-get update -qq
|
||||||
apt-get install -y tailscale >/dev/null
|
apt update -qq
|
||||||
|
apt install -y tailscale >/dev/null
|
||||||
|
|
||||||
if [[ -f /tmp/resolv.conf.backup ]]; then
|
if [[ -f /tmp/resolv.conf.backup ]]; then
|
||||||
echo "[INFO] Restoring original /etc/resolv.conf"
|
echo "[INFO] Restoring original /etc/resolv.conf"
|
||||||
|
|||||||
@@ -131,7 +131,7 @@ function detect_service() {
|
|||||||
|
|
||||||
function backup_container() {
|
function backup_container() {
|
||||||
msg_info "Creating backup for container $1"
|
msg_info "Creating backup for container $1"
|
||||||
vzdump $1 --compress zstd --storage $STORAGE_CHOICE -notes-template "community-scripts backup updater" >/dev/null 2>&1
|
vzdump $1 --compress zstd --storage $STORAGE_CHOICE -notes-template "{{guestname}} - community-scripts backup updater" >/dev/null 2>&1
|
||||||
status=$?
|
status=$?
|
||||||
|
|
||||||
if [ $status -eq 0 ]; then
|
if [ $status -eq 0 ]; then
|
||||||
@@ -151,11 +151,11 @@ function get_backup_storages() {
|
|||||||
split($0, a, ":")
|
split($0, a, ":")
|
||||||
type = a[1]
|
type = a[1]
|
||||||
name = a[2]
|
name = a[2]
|
||||||
sub(/^ +/, "", name)
|
gsub(/^[ \t]+|[ \t]+$/, "", name)
|
||||||
has_content = 0
|
has_content = 0
|
||||||
has_backup = 0
|
has_backup = 0
|
||||||
}
|
}
|
||||||
/^ +content/ {
|
/^[ \t]*content/ {
|
||||||
has_content = 1
|
has_content = 1
|
||||||
if ($0 ~ /backup/) has_backup = 1
|
if ($0 ~ /backup/) has_backup = 1
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -70,7 +70,7 @@ function error_handler() {
|
|||||||
local line_number="$1"
|
local line_number="$1"
|
||||||
local command="$2"
|
local command="$2"
|
||||||
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
|
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
|
||||||
post_update_to_api "failed" "${command}"
|
post_update_to_api "failed" "${exit_code}"
|
||||||
echo -e "\n$error_message\n"
|
echo -e "\n$error_message\n"
|
||||||
cleanup_vmid
|
cleanup_vmid
|
||||||
}
|
}
|
||||||
@@ -203,7 +203,6 @@ function exit-script() {
|
|||||||
|
|
||||||
function default_settings() {
|
function default_settings() {
|
||||||
VMID=$(get_valid_nextid)
|
VMID=$(get_valid_nextid)
|
||||||
FORMAT=",efitype=4m"
|
|
||||||
MACHINE=""
|
MACHINE=""
|
||||||
DISK_SIZE="4G"
|
DISK_SIZE="4G"
|
||||||
DISK_CACHE=""
|
DISK_CACHE=""
|
||||||
@@ -259,11 +258,9 @@ function advanced_settings() {
|
|||||||
3>&1 1>&2 2>&3); then
|
3>&1 1>&2 2>&3); then
|
||||||
if [ "$MACH" = q35 ]; then
|
if [ "$MACH" = q35 ]; then
|
||||||
echo -e "${CONTAINERTYPE}${BOLD}${DGN}Machine Type: ${BGN}$MACH${CL}"
|
echo -e "${CONTAINERTYPE}${BOLD}${DGN}Machine Type: ${BGN}$MACH${CL}"
|
||||||
FORMAT=""
|
|
||||||
MACHINE=" -machine q35"
|
MACHINE=" -machine q35"
|
||||||
else
|
else
|
||||||
echo -e "${CONTAINERTYPE}${BOLD}${DGN}Machine Type: ${BGN}$MACH${CL}"
|
echo -e "${CONTAINERTYPE}${BOLD}${DGN}Machine Type: ${BGN}$MACH${CL}"
|
||||||
FORMAT=",efitype=4m"
|
|
||||||
MACHINE=""
|
MACHINE=""
|
||||||
fi
|
fi
|
||||||
else
|
else
|
||||||
@@ -476,31 +473,45 @@ case $STORAGE_TYPE in
|
|||||||
nfs | dir | cifs)
|
nfs | dir | cifs)
|
||||||
DISK_EXT=".qcow2"
|
DISK_EXT=".qcow2"
|
||||||
DISK_REF="$VMID/"
|
DISK_REF="$VMID/"
|
||||||
DISK_IMPORT="-format qcow2"
|
DISK_IMPORT="--format qcow2"
|
||||||
THIN=""
|
THIN=""
|
||||||
;;
|
;;
|
||||||
btrfs)
|
btrfs)
|
||||||
DISK_EXT=".raw"
|
DISK_EXT=".raw"
|
||||||
DISK_REF="$VMID/"
|
DISK_REF="$VMID/"
|
||||||
DISK_IMPORT="-format raw"
|
DISK_IMPORT="--format raw"
|
||||||
FORMAT=",efitype=4m"
|
|
||||||
THIN=""
|
THIN=""
|
||||||
;;
|
;;
|
||||||
|
*)
|
||||||
|
DISK_EXT=""
|
||||||
|
DISK_REF=""
|
||||||
|
DISK_IMPORT="--format raw"
|
||||||
|
;;
|
||||||
esac
|
esac
|
||||||
for i in {0,1}; do
|
|
||||||
disk="DISK$i"
|
|
||||||
eval DISK"${i}"=vm-"${VMID}"-disk-"${i}"${DISK_EXT:-}
|
|
||||||
eval DISK"${i}"_REF="${STORAGE}":"${DISK_REF:-}"${!disk}
|
|
||||||
done
|
|
||||||
|
|
||||||
msg_info "Creating a Arch Linux VM"
|
msg_info "Creating a Arch Linux VM"
|
||||||
qm create $VMID -agent 1${MACHINE} -tablet 0 -localtime 1 -bios ovmf${CPU_TYPE} -cores $CORE_COUNT -memory $RAM_SIZE \
|
qm create $VMID -agent 1${MACHINE} -tablet 0 -localtime 1 -bios ovmf${CPU_TYPE} -cores $CORE_COUNT -memory $RAM_SIZE \
|
||||||
-name $HN -tags community-script -net0 virtio,bridge=$BRG,macaddr=$MAC$VLAN$MTU -onboot 1 -ostype l26 -scsihw virtio-scsi-pci
|
-name $HN -tags community-script -net0 virtio,bridge=$BRG,macaddr=$MAC$VLAN$MTU -onboot 1 -ostype l26 -scsihw virtio-scsi-pci
|
||||||
pvesm alloc $STORAGE $VMID $DISK0 4M 1>&/dev/null
|
|
||||||
qm importdisk $VMID ${FILE} $STORAGE ${DISK_IMPORT:-} 1>&/dev/null
|
if qm disk import --help >/dev/null 2>&1; then
|
||||||
|
IMPORT_CMD=(qm disk import)
|
||||||
|
else
|
||||||
|
IMPORT_CMD=(qm importdisk)
|
||||||
|
fi
|
||||||
|
|
||||||
|
IMPORT_OUT="$("${IMPORT_CMD[@]}" "$VMID" "${FILE}" "$STORAGE" ${DISK_IMPORT:-} 2>&1 || true)"
|
||||||
|
DISK_REF_IMPORTED="$(printf '%s\n' "$IMPORT_OUT" | sed -n "s/.*successfully imported disk '\([^']\+\)'.*/\1/p" | tr -d "\r\"'")"
|
||||||
|
[[ -z "$DISK_REF_IMPORTED" ]] && DISK_REF_IMPORTED="$(pvesm list "$STORAGE" | awk -v id="$VMID" '$5 ~ ("vm-"id"-disk-") {print $1":"$5}' | sort | tail -n1)"
|
||||||
|
[[ -z "$DISK_REF_IMPORTED" ]] && {
|
||||||
|
msg_error "Unable to determine imported disk reference."
|
||||||
|
echo "$IMPORT_OUT"
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
msg_ok "Imported disk (${CL}${BL}${DISK_REF_IMPORTED}${CL})"
|
||||||
|
|
||||||
qm set $VMID \
|
qm set $VMID \
|
||||||
-efidisk0 ${DISK0_REF}${FORMAT} \
|
-efidisk0 ${STORAGE}:0,efitype=4m \
|
||||||
-scsi0 ${DISK1_REF},${DISK_CACHE}${THIN}size=${DISK_SIZE} \
|
-scsi0 ${DISK_REF_IMPORTED},${DISK_CACHE}${THIN%,} \
|
||||||
-ide2 ${STORAGE}:cloudinit \
|
-ide2 ${STORAGE}:cloudinit \
|
||||||
-boot order=scsi0 \
|
-boot order=scsi0 \
|
||||||
-serial0 socket >/dev/null
|
-serial0 socket >/dev/null
|
||||||
|
|||||||
@@ -70,7 +70,7 @@ function error_handler() {
|
|||||||
local line_number="$1"
|
local line_number="$1"
|
||||||
local command="$2"
|
local command="$2"
|
||||||
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
|
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
|
||||||
post_update_to_api "failed" "${command}"
|
post_update_to_api "failed" "${exit_code}"
|
||||||
echo -e "\n$error_message\n"
|
echo -e "\n$error_message\n"
|
||||||
cleanup_vmid
|
cleanup_vmid
|
||||||
}
|
}
|
||||||
@@ -201,6 +201,17 @@ function exit-script() {
|
|||||||
exit
|
exit
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function select_cloud_init() {
|
||||||
|
if (whiptail --backtitle "Proxmox VE Helper Scripts" --title "CLOUD-INIT" \
|
||||||
|
--yesno "Enable Cloud-Init for VM configuration?\n\nCloud-Init allows automatic configuration of:\n- User accounts and passwords\n- SSH keys\n- Network settings (DHCP/Static)\n- DNS configuration\n\nYou can also configure these settings later in Proxmox UI.\n\nNote: Without Cloud-Init, the nocloud image will be used with console auto-login." --defaultno 18 68); then
|
||||||
|
CLOUD_INIT="yes"
|
||||||
|
echo -e "${CLOUD}${BOLD}${DGN}Cloud-Init: ${BGN}yes${CL}"
|
||||||
|
else
|
||||||
|
CLOUD_INIT="no"
|
||||||
|
echo -e "${CLOUD}${BOLD}${DGN}Cloud-Init: ${BGN}no${CL}"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
function default_settings() {
|
function default_settings() {
|
||||||
VMID=$(get_valid_nextid)
|
VMID=$(get_valid_nextid)
|
||||||
FORMAT=",efitype=4m"
|
FORMAT=",efitype=4m"
|
||||||
@@ -216,7 +227,6 @@ function default_settings() {
|
|||||||
VLAN=""
|
VLAN=""
|
||||||
MTU=""
|
MTU=""
|
||||||
START_VM="yes"
|
START_VM="yes"
|
||||||
CLOUD_INIT="no"
|
|
||||||
METHOD="default"
|
METHOD="default"
|
||||||
echo -e "${CONTAINERID}${BOLD}${DGN}Virtual Machine ID: ${BGN}${VMID}${CL}"
|
echo -e "${CONTAINERID}${BOLD}${DGN}Virtual Machine ID: ${BGN}${VMID}${CL}"
|
||||||
echo -e "${CONTAINERTYPE}${BOLD}${DGN}Machine Type: ${BGN}i440fx${CL}"
|
echo -e "${CONTAINERTYPE}${BOLD}${DGN}Machine Type: ${BGN}i440fx${CL}"
|
||||||
@@ -230,7 +240,7 @@ function default_settings() {
|
|||||||
echo -e "${MACADDRESS}${BOLD}${DGN}MAC Address: ${BGN}${MAC}${CL}"
|
echo -e "${MACADDRESS}${BOLD}${DGN}MAC Address: ${BGN}${MAC}${CL}"
|
||||||
echo -e "${VLANTAG}${BOLD}${DGN}VLAN: ${BGN}Default${CL}"
|
echo -e "${VLANTAG}${BOLD}${DGN}VLAN: ${BGN}Default${CL}"
|
||||||
echo -e "${DEFAULT}${BOLD}${DGN}Interface MTU Size: ${BGN}Default${CL}"
|
echo -e "${DEFAULT}${BOLD}${DGN}Interface MTU Size: ${BGN}Default${CL}"
|
||||||
echo -e "${CLOUD}${BOLD}${DGN}Configure Cloud-init: ${BGN}no${CL}"
|
select_cloud_init
|
||||||
echo -e "${GATEWAY}${BOLD}${DGN}Start VM when completed: ${BGN}yes${CL}"
|
echo -e "${GATEWAY}${BOLD}${DGN}Start VM when completed: ${BGN}yes${CL}"
|
||||||
echo -e "${CREATING}${BOLD}${DGN}Creating a Debian 13 VM using the above default settings${CL}"
|
echo -e "${CREATING}${BOLD}${DGN}Creating a Debian 13 VM using the above default settings${CL}"
|
||||||
}
|
}
|
||||||
@@ -400,13 +410,7 @@ function advanced_settings() {
|
|||||||
exit-script
|
exit-script
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if (whiptail --backtitle "Proxmox VE Helper Scripts" --title "CLOUD-INIT" --yesno "Configure the VM with Cloud-init?" --defaultno 10 58); then
|
select_cloud_init
|
||||||
echo -e "${CLOUD}${BOLD}${DGN}Configure Cloud-init: ${BGN}yes${CL}"
|
|
||||||
CLOUD_INIT="yes"
|
|
||||||
else
|
|
||||||
echo -e "${CLOUD}${BOLD}${DGN}Configure Cloud-init: ${BGN}no${CL}"
|
|
||||||
CLOUD_INIT="no"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if (whiptail --backtitle "Proxmox VE Helper Scripts" --title "START VIRTUAL MACHINE" --yesno "Start VM when completed?" 10 58); then
|
if (whiptail --backtitle "Proxmox VE Helper Scripts" --title "START VIRTUAL MACHINE" --yesno "Start VM when completed?" 10 58); then
|
||||||
echo -e "${GATEWAY}${BOLD}${DGN}Start VM when completed: ${BGN}yes${CL}"
|
echo -e "${GATEWAY}${BOLD}${DGN}Start VM when completed: ${BGN}yes${CL}"
|
||||||
@@ -473,6 +477,17 @@ else
|
|||||||
fi
|
fi
|
||||||
msg_ok "Using ${CL}${BL}$STORAGE${CL} ${GN}for Storage Location."
|
msg_ok "Using ${CL}${BL}$STORAGE${CL} ${GN}for Storage Location."
|
||||||
msg_ok "Virtual Machine ID is ${CL}${BL}$VMID${CL}."
|
msg_ok "Virtual Machine ID is ${CL}${BL}$VMID${CL}."
|
||||||
|
|
||||||
|
# ==============================================================================
|
||||||
|
# PREREQUISITES
|
||||||
|
# ==============================================================================
|
||||||
|
if ! command -v virt-customize &>/dev/null; then
|
||||||
|
msg_info "Installing libguestfs-tools"
|
||||||
|
apt-get update >/dev/null 2>&1
|
||||||
|
apt-get install -y libguestfs-tools >/dev/null 2>&1
|
||||||
|
msg_ok "Installed libguestfs-tools"
|
||||||
|
fi
|
||||||
|
|
||||||
msg_info "Retrieving the URL for the Debian 13 Qcow2 Disk Image"
|
msg_info "Retrieving the URL for the Debian 13 Qcow2 Disk Image"
|
||||||
if [ "$CLOUD_INIT" == "yes" ]; then
|
if [ "$CLOUD_INIT" == "yes" ]; then
|
||||||
URL=https://cloud.debian.org/images/cloud/trixie/latest/debian-13-genericcloud-amd64.qcow2
|
URL=https://cloud.debian.org/images/cloud/trixie/latest/debian-13-genericcloud-amd64.qcow2
|
||||||
@@ -486,6 +501,50 @@ echo -en "\e[1A\e[0K"
|
|||||||
FILE=$(basename $URL)
|
FILE=$(basename $URL)
|
||||||
msg_ok "Downloaded ${CL}${BL}${FILE}${CL}"
|
msg_ok "Downloaded ${CL}${BL}${FILE}${CL}"
|
||||||
|
|
||||||
|
# ==============================================================================
|
||||||
|
# IMAGE CUSTOMIZATION
|
||||||
|
# ==============================================================================
|
||||||
|
msg_info "Customizing ${FILE} image"
|
||||||
|
|
||||||
|
WORK_FILE=$(mktemp --suffix=.qcow2)
|
||||||
|
cp "$FILE" "$WORK_FILE"
|
||||||
|
|
||||||
|
# Set hostname
|
||||||
|
virt-customize -q -a "$WORK_FILE" --hostname "${HN}" >/dev/null 2>&1
|
||||||
|
|
||||||
|
# Prepare for unique machine-id on first boot
|
||||||
|
virt-customize -q -a "$WORK_FILE" --run-command "truncate -s 0 /etc/machine-id" >/dev/null 2>&1
|
||||||
|
virt-customize -q -a "$WORK_FILE" --run-command "rm -f /var/lib/dbus/machine-id" >/dev/null 2>&1
|
||||||
|
|
||||||
|
# Disable systemd-firstboot to prevent interactive prompts blocking the console
|
||||||
|
virt-customize -q -a "$WORK_FILE" --run-command "systemctl disable systemd-firstboot.service 2>/dev/null; rm -f /etc/systemd/system/sysinit.target.wants/systemd-firstboot.service; ln -sf /dev/null /etc/systemd/system/systemd-firstboot.service" >/dev/null 2>&1 || true
|
||||||
|
|
||||||
|
# Pre-seed firstboot settings so it won't prompt even if triggered
|
||||||
|
virt-customize -q -a "$WORK_FILE" --run-command "echo 'Etc/UTC' > /etc/timezone && ln -sf /usr/share/zoneinfo/Etc/UTC /etc/localtime" >/dev/null 2>&1 || true
|
||||||
|
virt-customize -q -a "$WORK_FILE" --run-command "touch /etc/locale.conf" >/dev/null 2>&1 || true
|
||||||
|
|
||||||
|
if [ "$CLOUD_INIT" == "yes" ]; then
|
||||||
|
# Cloud-Init handles SSH and login
|
||||||
|
virt-customize -q -a "$WORK_FILE" --run-command "sed -i 's/^#*PermitRootLogin.*/PermitRootLogin yes/' /etc/ssh/sshd_config" >/dev/null 2>&1 || true
|
||||||
|
virt-customize -q -a "$WORK_FILE" --run-command "sed -i 's/^#*PasswordAuthentication.*/PasswordAuthentication yes/' /etc/ssh/sshd_config" >/dev/null 2>&1 || true
|
||||||
|
else
|
||||||
|
# Configure auto-login on serial console (ttyS0) and virtual console (tty1)
|
||||||
|
virt-customize -q -a "$WORK_FILE" --run-command "mkdir -p /etc/systemd/system/serial-getty@ttyS0.service.d" >/dev/null 2>&1 || true
|
||||||
|
virt-customize -q -a "$WORK_FILE" --run-command 'cat > /etc/systemd/system/serial-getty@ttyS0.service.d/autologin.conf << EOF
|
||||||
|
[Service]
|
||||||
|
ExecStart=
|
||||||
|
ExecStart=-/sbin/agetty --autologin root --noclear %I \$TERM
|
||||||
|
EOF' >/dev/null 2>&1 || true
|
||||||
|
virt-customize -q -a "$WORK_FILE" --run-command "mkdir -p /etc/systemd/system/getty@tty1.service.d" >/dev/null 2>&1 || true
|
||||||
|
virt-customize -q -a "$WORK_FILE" --run-command 'cat > /etc/systemd/system/getty@tty1.service.d/autologin.conf << EOF
|
||||||
|
[Service]
|
||||||
|
ExecStart=
|
||||||
|
ExecStart=-/sbin/agetty --autologin root --noclear %I \$TERM
|
||||||
|
EOF' >/dev/null 2>&1 || true
|
||||||
|
fi
|
||||||
|
|
||||||
|
msg_ok "Customized image"
|
||||||
|
|
||||||
STORAGE_TYPE=$(pvesm status -storage "$STORAGE" | awk 'NR>1 {print $2}')
|
STORAGE_TYPE=$(pvesm status -storage "$STORAGE" | awk 'NR>1 {print $2}')
|
||||||
case $STORAGE_TYPE in
|
case $STORAGE_TYPE in
|
||||||
nfs | dir)
|
nfs | dir)
|
||||||
@@ -501,6 +560,11 @@ btrfs)
|
|||||||
FORMAT=",efitype=4m"
|
FORMAT=",efitype=4m"
|
||||||
THIN=""
|
THIN=""
|
||||||
;;
|
;;
|
||||||
|
*)
|
||||||
|
DISK_EXT=""
|
||||||
|
DISK_REF=""
|
||||||
|
DISK_IMPORT="-format raw"
|
||||||
|
;;
|
||||||
esac
|
esac
|
||||||
for i in {0,1}; do
|
for i in {0,1}; do
|
||||||
disk="DISK$i"
|
disk="DISK$i"
|
||||||
@@ -512,7 +576,7 @@ msg_info "Creating a Debian 13 VM"
|
|||||||
qm create $VMID -agent 1${MACHINE} -tablet 0 -localtime 1 -bios ovmf${CPU_TYPE} -cores $CORE_COUNT -memory $RAM_SIZE \
|
qm create $VMID -agent 1${MACHINE} -tablet 0 -localtime 1 -bios ovmf${CPU_TYPE} -cores $CORE_COUNT -memory $RAM_SIZE \
|
||||||
-name $HN -tags community-script -net0 virtio,bridge=$BRG,macaddr=$MAC$VLAN$MTU -onboot 1 -ostype l26 -scsihw virtio-scsi-pci
|
-name $HN -tags community-script -net0 virtio,bridge=$BRG,macaddr=$MAC$VLAN$MTU -onboot 1 -ostype l26 -scsihw virtio-scsi-pci
|
||||||
pvesm alloc $STORAGE $VMID $DISK0 4M 1>&/dev/null
|
pvesm alloc $STORAGE $VMID $DISK0 4M 1>&/dev/null
|
||||||
qm importdisk $VMID ${FILE} $STORAGE ${DISK_IMPORT:-} 1>&/dev/null
|
qm importdisk $VMID ${WORK_FILE} $STORAGE ${DISK_IMPORT:-} 1>&/dev/null
|
||||||
if [ "$CLOUD_INIT" == "yes" ]; then
|
if [ "$CLOUD_INIT" == "yes" ]; then
|
||||||
qm set $VMID \
|
qm set $VMID \
|
||||||
-efidisk0 ${DISK0_REF}${FORMAT} \
|
-efidisk0 ${DISK0_REF}${FORMAT} \
|
||||||
@@ -527,6 +591,10 @@ else
|
|||||||
-boot order=scsi0 \
|
-boot order=scsi0 \
|
||||||
-serial0 socket >/dev/null
|
-serial0 socket >/dev/null
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# Clean up work file
|
||||||
|
rm -f "$WORK_FILE"
|
||||||
|
|
||||||
DESCRIPTION=$(
|
DESCRIPTION=$(
|
||||||
cat <<EOF
|
cat <<EOF
|
||||||
<div align='center'>
|
<div align='center'>
|
||||||
|
|||||||
@@ -70,7 +70,7 @@ function error_handler() {
|
|||||||
local line_number="$1"
|
local line_number="$1"
|
||||||
local command="$2"
|
local command="$2"
|
||||||
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
|
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
|
||||||
post_update_to_api "failed" "${command}"
|
post_update_to_api "failed" "${exit_code}"
|
||||||
echo -e "\n$error_message\n"
|
echo -e "\n$error_message\n"
|
||||||
cleanup_vmid
|
cleanup_vmid
|
||||||
}
|
}
|
||||||
@@ -501,6 +501,11 @@ btrfs)
|
|||||||
FORMAT=",efitype=4m"
|
FORMAT=",efitype=4m"
|
||||||
THIN=""
|
THIN=""
|
||||||
;;
|
;;
|
||||||
|
*)
|
||||||
|
DISK_EXT=""
|
||||||
|
DISK_REF=""
|
||||||
|
DISK_IMPORT="-format raw"
|
||||||
|
;;
|
||||||
esac
|
esac
|
||||||
for i in {0,1}; do
|
for i in {0,1}; do
|
||||||
disk="DISK$i"
|
disk="DISK$i"
|
||||||
|
|||||||
@@ -45,7 +45,7 @@ function error_handler() {
|
|||||||
local line_number="$1"
|
local line_number="$1"
|
||||||
local command="$2"
|
local command="$2"
|
||||||
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
|
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
|
||||||
post_update_to_api "failed" "${command}"
|
post_update_to_api "failed" "${exit_code}"
|
||||||
echo -e "\n$error_message\n"
|
echo -e "\n$error_message\n"
|
||||||
cleanup_vmid
|
cleanup_vmid
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -74,7 +74,7 @@ function error_handler() {
|
|||||||
local line_number="$1"
|
local line_number="$1"
|
||||||
local command="$2"
|
local command="$2"
|
||||||
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
|
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
|
||||||
post_update_to_api "failed" "${command}"
|
post_update_to_api "failed" "${exit_code}"
|
||||||
echo -e "\n$error_message\n"
|
echo -e "\n$error_message\n"
|
||||||
cleanup_vmid
|
cleanup_vmid
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -71,7 +71,7 @@ function error_handler() {
|
|||||||
local line_number="$1"
|
local line_number="$1"
|
||||||
local command="$2"
|
local command="$2"
|
||||||
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
|
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
|
||||||
post_update_to_api "failed" "${command}"
|
post_update_to_api "failed" "${exit_code}"
|
||||||
echo -e "\n$error_message\n"
|
echo -e "\n$error_message\n"
|
||||||
cleanup_vmid
|
cleanup_vmid
|
||||||
}
|
}
|
||||||
@@ -566,6 +566,11 @@ zfspool)
|
|||||||
DISK_REF=""
|
DISK_REF=""
|
||||||
DISK_IMPORT="-format raw"
|
DISK_IMPORT="-format raw"
|
||||||
;;
|
;;
|
||||||
|
*)
|
||||||
|
DISK_EXT=""
|
||||||
|
DISK_REF=""
|
||||||
|
DISK_IMPORT="-format raw"
|
||||||
|
;;
|
||||||
esac
|
esac
|
||||||
|
|
||||||
DISK_VAR="vm-${VMID}-disk-0${DISK_EXT:-}"
|
DISK_VAR="vm-${VMID}-disk-0${DISK_EXT:-}"
|
||||||
|
|||||||
@@ -70,7 +70,7 @@ function error_handler() {
|
|||||||
local line_number="$1"
|
local line_number="$1"
|
||||||
local command="$2"
|
local command="$2"
|
||||||
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
|
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
|
||||||
post_update_to_api "failed" "${command}"
|
post_update_to_api "failed" "${exit_code}"
|
||||||
echo -e "\n$error_message\n"
|
echo -e "\n$error_message\n"
|
||||||
cleanup_vmid
|
cleanup_vmid
|
||||||
}
|
}
|
||||||
@@ -487,6 +487,11 @@ btrfs)
|
|||||||
FORMAT=",efitype=4m"
|
FORMAT=",efitype=4m"
|
||||||
THIN=""
|
THIN=""
|
||||||
;;
|
;;
|
||||||
|
*)
|
||||||
|
DISK_EXT=""
|
||||||
|
DISK_REF=""
|
||||||
|
DISK_IMPORT="-format raw"
|
||||||
|
;;
|
||||||
esac
|
esac
|
||||||
for i in {0,1,2}; do
|
for i in {0,1,2}; do
|
||||||
disk="DISK$i"
|
disk="DISK$i"
|
||||||
|
|||||||
@@ -74,7 +74,7 @@ function error_handler() {
|
|||||||
local exit_code="$?"
|
local exit_code="$?"
|
||||||
local line_number="$1"
|
local line_number="$1"
|
||||||
local command="$2"
|
local command="$2"
|
||||||
post_update_to_api "failed" "$command"
|
post_update_to_api "failed" "$exit_code"
|
||||||
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
|
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
|
||||||
echo -e "\n$error_message\n"
|
echo -e "\n$error_message\n"
|
||||||
cleanup_vmid
|
cleanup_vmid
|
||||||
|
|||||||
@@ -48,7 +48,7 @@ function error_handler() {
|
|||||||
local exit_code="$?"
|
local exit_code="$?"
|
||||||
local line_number="$1"
|
local line_number="$1"
|
||||||
local command="$2"
|
local command="$2"
|
||||||
post_update_to_api "failed" "$command"
|
post_update_to_api "failed" "$exit_code"
|
||||||
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
|
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
|
||||||
echo -e "\n$error_message\n"
|
echo -e "\n$error_message\n"
|
||||||
cleanup_vmid
|
cleanup_vmid
|
||||||
@@ -619,6 +619,11 @@ btrfs)
|
|||||||
FORMAT=",efitype=4m"
|
FORMAT=",efitype=4m"
|
||||||
THIN=""
|
THIN=""
|
||||||
;;
|
;;
|
||||||
|
*)
|
||||||
|
DISK_EXT=""
|
||||||
|
DISK_REF=""
|
||||||
|
DISK_IMPORT="-format raw"
|
||||||
|
;;
|
||||||
esac
|
esac
|
||||||
for i in {0,1}; do
|
for i in {0,1}; do
|
||||||
disk="DISK$i"
|
disk="DISK$i"
|
||||||
|
|||||||
@@ -71,7 +71,7 @@ function error_handler() {
|
|||||||
local line_number="$1"
|
local line_number="$1"
|
||||||
local command="$2"
|
local command="$2"
|
||||||
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
|
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
|
||||||
post_update_to_api "failed" "${command}"
|
post_update_to_api "failed" "${exit_code}"
|
||||||
echo -e "\n$error_message\n"
|
echo -e "\n$error_message\n"
|
||||||
cleanup_vmid
|
cleanup_vmid
|
||||||
}
|
}
|
||||||
@@ -500,6 +500,11 @@ btrfs)
|
|||||||
FORMAT=",efitype=4m"
|
FORMAT=",efitype=4m"
|
||||||
THIN=""
|
THIN=""
|
||||||
;;
|
;;
|
||||||
|
*)
|
||||||
|
DISK_EXT=""
|
||||||
|
DISK_REF=""
|
||||||
|
DISK_IMPORT="-format raw"
|
||||||
|
;;
|
||||||
esac
|
esac
|
||||||
for i in {0,1,2}; do
|
for i in {0,1,2}; do
|
||||||
disk="DISK$i"
|
disk="DISK$i"
|
||||||
|
|||||||
@@ -79,7 +79,7 @@ function error_handler() {
|
|||||||
local line_number="$1"
|
local line_number="$1"
|
||||||
local command="$2"
|
local command="$2"
|
||||||
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
|
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
|
||||||
post_update_to_api "failed" "${command}"
|
post_update_to_api "failed" "${exit_code}"
|
||||||
echo -e "\n$error_message\n"
|
echo -e "\n$error_message\n"
|
||||||
cleanup_vmid
|
cleanup_vmid
|
||||||
}
|
}
|
||||||
@@ -402,6 +402,11 @@ nfs | dir)
|
|||||||
DISK_REF="$VMID/"
|
DISK_REF="$VMID/"
|
||||||
DISK_IMPORT="-format qcow2"
|
DISK_IMPORT="-format qcow2"
|
||||||
;;
|
;;
|
||||||
|
*)
|
||||||
|
DISK_EXT=""
|
||||||
|
DISK_REF=""
|
||||||
|
DISK_IMPORT="-format raw"
|
||||||
|
;;
|
||||||
esac
|
esac
|
||||||
for i in {0,1}; do
|
for i in {0,1}; do
|
||||||
disk="DISK$i"
|
disk="DISK$i"
|
||||||
|
|||||||
@@ -66,7 +66,7 @@ function error_handler() {
|
|||||||
local exit_code="$?"
|
local exit_code="$?"
|
||||||
local line_number="$1"
|
local line_number="$1"
|
||||||
local command="$2"
|
local command="$2"
|
||||||
post_update_to_api "failed" "$command"
|
post_update_to_api "failed" "$exit_code"
|
||||||
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
|
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
|
||||||
echo -e "\n$error_message\n"
|
echo -e "\n$error_message\n"
|
||||||
cleanup_vmid
|
cleanup_vmid
|
||||||
@@ -482,6 +482,11 @@ btrfs)
|
|||||||
FORMAT=",efitype=4m"
|
FORMAT=",efitype=4m"
|
||||||
THIN=""
|
THIN=""
|
||||||
;;
|
;;
|
||||||
|
*)
|
||||||
|
DISK_EXT=""
|
||||||
|
DISK_REF=""
|
||||||
|
DISK_IMPORT="-format raw"
|
||||||
|
;;
|
||||||
esac
|
esac
|
||||||
for i in {0,1}; do
|
for i in {0,1}; do
|
||||||
disk="DISK$i"
|
disk="DISK$i"
|
||||||
|
|||||||
@@ -69,7 +69,7 @@ function error_handler() {
|
|||||||
local exit_code="$?"
|
local exit_code="$?"
|
||||||
local line_number="$1"
|
local line_number="$1"
|
||||||
local command="$2"
|
local command="$2"
|
||||||
post_update_to_api "failed" "$command"
|
post_update_to_api "failed" "$exit_code"
|
||||||
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
|
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
|
||||||
echo -e "\n$error_message\n"
|
echo -e "\n$error_message\n"
|
||||||
cleanup_vmid
|
cleanup_vmid
|
||||||
@@ -484,6 +484,11 @@ btrfs)
|
|||||||
FORMAT=",efitype=4m"
|
FORMAT=",efitype=4m"
|
||||||
THIN=""
|
THIN=""
|
||||||
;;
|
;;
|
||||||
|
*)
|
||||||
|
DISK_EXT=""
|
||||||
|
DISK_REF=""
|
||||||
|
DISK_IMPORT="-format raw"
|
||||||
|
;;
|
||||||
esac
|
esac
|
||||||
for i in {0,1}; do
|
for i in {0,1}; do
|
||||||
disk="DISK$i"
|
disk="DISK$i"
|
||||||
|
|||||||
@@ -68,7 +68,7 @@ function error_handler() {
|
|||||||
local exit_code="$?"
|
local exit_code="$?"
|
||||||
local line_number="$1"
|
local line_number="$1"
|
||||||
local command="$2"
|
local command="$2"
|
||||||
post_update_to_api "failed" "$command"
|
post_update_to_api "failed" "$exit_code"
|
||||||
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
|
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
|
||||||
echo -e "\n$error_message\n"
|
echo -e "\n$error_message\n"
|
||||||
cleanup_vmid
|
cleanup_vmid
|
||||||
@@ -483,6 +483,11 @@ btrfs)
|
|||||||
FORMAT=",efitype=4m"
|
FORMAT=",efitype=4m"
|
||||||
THIN=""
|
THIN=""
|
||||||
;;
|
;;
|
||||||
|
*)
|
||||||
|
DISK_EXT=""
|
||||||
|
DISK_REF=""
|
||||||
|
DISK_IMPORT="-format raw"
|
||||||
|
;;
|
||||||
esac
|
esac
|
||||||
for i in {0,1}; do
|
for i in {0,1}; do
|
||||||
disk="DISK$i"
|
disk="DISK$i"
|
||||||
|
|||||||
@@ -69,7 +69,7 @@ function error_handler() {
|
|||||||
local line_number="$1"
|
local line_number="$1"
|
||||||
local command="$2"
|
local command="$2"
|
||||||
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
|
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
|
||||||
post_update_to_api "failed" "${command}"
|
post_update_to_api "failed" "${exit_code}"
|
||||||
echo -e "\n$error_message\n"
|
echo -e "\n$error_message\n"
|
||||||
cleanup_vmid
|
cleanup_vmid
|
||||||
}
|
}
|
||||||
|
|||||||
Reference in New Issue
Block a user