Compare commits

..

45 Commits

Author SHA1 Message Date
github-actions[bot]
0230c7f7a1 Update CHANGELOG.md 2026-05-03 20:38:46 +00:00
CanbiZ (MickLesk)
08b1398e7b Refactor: PeaNUT for v6 (#14224) 2026-05-03 22:38:30 +02:00
community-scripts-pr-app[bot]
dca3fb40a8 Update CHANGELOG.md (#14229)
Co-authored-by: github-actions[bot] <github-actions[bot]@users.noreply.github.com>
2026-05-03 20:36:20 +00:00
Michael Oultram
37eafa199d core: fix validate_bridge function (#14206) 2026-05-03 22:35:53 +02:00
community-scripts-pr-app[bot]
6729fa2a87 Update CHANGELOG.md (#14228)
Co-authored-by: github-actions[bot] <github-actions[bot]@users.noreply.github.com>
2026-05-03 20:30:48 +00:00
CanbiZ (MickLesk)
cd6bd154d9 fix(pangolin): pin version, drop manual SQL, use upstream migrator (#14223) 2026-05-03 22:30:21 +02:00
community-scripts-pr-app[bot]
92f2079a79 Update CHANGELOG.md (#14227)
Co-authored-by: github-actions[bot] <github-actions[bot]@users.noreply.github.com>
2026-05-03 20:30:03 +00:00
CanbiZ (MickLesk)
683231127c fix(pbs/pve): guard sed against missing /etc/apt/sources.list (#14222) 2026-05-03 22:29:40 +02:00
community-scripts-pr-app[bot]
7733ac2806 Update CHANGELOG.md (#14226)
Co-authored-by: github-actions[bot] <github-actions[bot]@users.noreply.github.com>
2026-05-03 20:29:32 +00:00
Tom Frenzel
1d83725249 Hortusfox: fix update issues (#14214) 2026-05-03 22:29:06 +02:00
community-scripts-pr-app[bot]
980fa9fbb0 Update CHANGELOG.md (#14209)
Co-authored-by: github-actions[bot] <github-actions[bot]@users.noreply.github.com>
2026-05-03 00:16:03 +00:00
community-scripts-pr-app[bot]
01d6df5903 Archive old changelog entries (#14208)
Co-authored-by: github-actions[bot] <github-actions[bot]@users.noreply.github.com>
2026-05-03 00:15:35 +00:00
community-scripts-pr-app[bot]
3fce87b1d0 Update CHANGELOG.md (#14205)
Co-authored-by: github-actions[bot] <github-actions[bot]@users.noreply.github.com>
2026-05-02 22:18:01 +00:00
community-scripts-pr-app[bot]
f4a96e8b4d Update CHANGELOG.md (#14203)
Co-authored-by: github-actions[bot] <github-actions[bot]@users.noreply.github.com>
2026-05-02 21:59:33 +00:00
Michel Roegl-Brunner
4d163aa8f8 Revert "tools.func: add GitLab release check/fetch/deploy helpers (#14133)" (#14202)
This reverts commit 9503db319c.
2026-05-02 23:59:09 +02:00
community-scripts-pr-app[bot]
ec059f44ad Update CHANGELOG.md (#14197)
Co-authored-by: github-actions[bot] <github-actions[bot]@users.noreply.github.com>
2026-05-02 21:44:59 +00:00
push-app-to-main[bot]
52bed128f0 Add protonmail-bridge (ct) (#14136) 2026-05-02 23:44:36 +02:00
community-scripts-pr-app[bot]
ca409fc06b Update CHANGELOG.md (#14196)
Co-authored-by: github-actions[bot] <github-actions[bot]@users.noreply.github.com>
2026-05-02 21:43:50 +00:00
CanbiZ (MickLesk)
9503db319c tools.func: add GitLab release check/fetch/deploy helpers (#14133) 2026-05-02 23:43:26 +02:00
community-scripts-pr-app[bot]
d56fa7ab50 Update CHANGELOG.md (#14191)
Co-authored-by: github-actions[bot] <github-actions[bot]@users.noreply.github.com>
2026-05-02 17:00:51 +00:00
community-scripts-pr-app[bot]
d4fd89931f Update CHANGELOG.md (#14190)
Co-authored-by: github-actions[bot] <github-actions[bot]@users.noreply.github.com>
2026-05-02 17:00:42 +00:00
community-scripts-pr-app[bot]
5b7d65ce5c Update CHANGELOG.md (#14189)
Co-authored-by: github-actions[bot] <github-actions[bot]@users.noreply.github.com>
2026-05-02 17:00:30 +00:00
Slaviša Arežina
7e3d3d2cf4 Nagios: Ping fix (#14186) 2026-05-02 19:00:19 +02:00
push-app-to-main[bot]
2714d9fae4 Tube Archivist (#14123)
Co-authored-by: Slaviša Arežina <58952836+tremor021@users.noreply.github.com>
Co-authored-by: push-app-to-main[bot] <203845782+push-app-to-main[bot]@users.noreply.github.com>
Co-authored-by: CanbiZ (MickLesk) <47820557+MickLesk@users.noreply.github.com>
2026-05-02 19:00:06 +02:00
community-scripts-pr-app[bot]
7af8e907e4 Update CHANGELOG.md (#14183)
Co-authored-by: github-actions[bot] <github-actions[bot]@users.noreply.github.com>
2026-05-02 11:41:07 +00:00
CanbiZ (MickLesk)
0190f4e7f1 opnsense-vm: retry pvesm alloc on transient zfs 'got timeout' errors (#14157) 2026-05-02 13:40:49 +02:00
community-scripts-pr-app[bot]
87fa14afaf Update CHANGELOG.md (#14182)
Co-authored-by: github-actions[bot] <github-actions[bot]@users.noreply.github.com>
2026-05-02 11:40:44 +00:00
community-scripts-pr-app[bot]
546de16ef6 Update CHANGELOG.md (#14181)
Co-authored-by: github-actions[bot] <github-actions[bot]@users.noreply.github.com>
2026-05-02 11:40:34 +00:00
CanbiZ (MickLesk)
553925b8cc ImmichFrame: keep dotnet-sdk installed so update can run dotnet publish (#14158) 2026-05-02 13:40:14 +02:00
community-scripts-pr-app[bot]
7fd0b9f35b Update CHANGELOG.md (#14180)
Co-authored-by: github-actions[bot] <github-actions[bot]@users.noreply.github.com>
2026-05-02 11:40:12 +00:00
Chris
6b8a606375 Use UV sync for shelfmark backend build; update to Python 3.14 (#14170) 2026-05-02 13:39:48 +02:00
community-scripts-pr-app[bot]
9033793a66 Update CHANGELOG.md (#14179)
Co-authored-by: github-actions[bot] <github-actions[bot]@users.noreply.github.com>
2026-05-02 10:07:08 +00:00
community-scripts-pr-app[bot]
ccc0ff7a2f Update CHANGELOG.md (#14178)
Co-authored-by: github-actions[bot] <github-actions[bot]@users.noreply.github.com>
2026-05-02 10:06:59 +00:00
community-scripts-pr-app[bot]
218fd9060e Update CHANGELOG.md (#14177)
Co-authored-by: github-actions[bot] <github-actions[bot]@users.noreply.github.com>
2026-05-02 10:06:41 +00:00
CanbiZ (MickLesk)
a48d400da5 alpine: remove deb/ubuntu-only resource & storage checks from update-script (#14166) 2026-05-02 12:06:37 +02:00
community-scripts-pr-app[bot]
208d34d7a6 Update CHANGELOG.md (#14176)
Co-authored-by: github-actions[bot] <github-actions[bot]@users.noreply.github.com>
2026-05-02 10:06:29 +00:00
CanbiZ (MickLesk)
9578c6fa91 core: prompt to also run installed addon update scripts (…/bin/update_*) after update_script (#14162) 2026-05-02 12:06:16 +02:00
community-scripts-pr-app[bot]
a7bcd44ae6 Update CHANGELOG.md (#14175)
Co-authored-by: github-actions[bot] <github-actions[bot]@users.noreply.github.com>
2026-05-02 10:06:02 +00:00
CanbiZ (MickLesk)
289708cc10 Threadfin: use 'threadfin-app' as app name to avoid version-file clash (#14159) 2026-05-02 12:05:37 +02:00
community-scripts-pr-app[bot]
86293fda1b Update CHANGELOG.md (#14168)
Co-authored-by: github-actions[bot] <github-actions[bot]@users.noreply.github.com>
2026-05-01 21:07:56 +00:00
Joerg Heinemann
cf391086e5 Step ca update (#14058)
* Patch for step-ca.sh

Patch for making $STD happy (/usr/bin/step is a symlink to /usr/bin/step-cli)

* Refactor step-ca installation script

Refactor step-ca installation script to improve configuration and template handling.

- Carve out step-ca-admin.sh
- Patch for making $STD happy (/usr/bin/step is a symlink to /usr/bin/step-cli)
- Define enhanced x509 CA and Certificate Templates
- Configure CA Provisioners, DB and CRL settings
- Generate Root CA Certificate and Key
   - Validity: 219168h (~25 Years)
   - maxPathLen: 1 (Root -> Intermediate -> Leaf) => Only one Intermediate CA allowed below Root CA
   - Active revocation on Intermediate CA and Leaf Certificates by the usage of build-in Certificate Revocation List (CRL)
- Generate Intermediate CA Certificate Bundle and Key
   - Validity: 175368h (~20 Years)
   - maxPathLen: 0 (Root -> Intermediate -> Leaf) => Intermediate CA is only allowed to issue Leaf Certificates
   - Active revocation on Leaf Certificates by the usage of build-in Certificate Revocation List (CRL)
   - Bundle: Certificate Chain (including Root CA Certificate)

* Update source URL in step-ca.sh script
2026-05-01 23:07:30 +02:00
community-scripts-pr-app[bot]
bc72ce83ce Update CHANGELOG.md (#14167)
Co-authored-by: github-actions[bot] <github-actions[bot]@users.noreply.github.com>
2026-05-01 20:48:14 +00:00
push-app-to-main[bot]
9eee1a7f95 SoulSync (#14124)
* Add soulsync (ct)

* Update pip install command to use requirements.txt

* Update soulsync.sh

---------

Co-authored-by: push-app-to-main[bot] <203845782+push-app-to-main[bot]@users.noreply.github.com>
Co-authored-by: Slaviša Arežina <58952836+tremor021@users.noreply.github.com>
2026-05-01 22:47:49 +02:00
community-scripts-pr-app[bot]
ecd1e29df5 Update CHANGELOG.md (#14165)
Co-authored-by: github-actions[bot] <github-actions[bot]@users.noreply.github.com>
2026-05-01 20:44:53 +00:00
push-app-to-main[bot]
b556b5f8c6 Teable (#14125)
* Add teable (ct)

* Apply suggestion from @tremor021

---------

Co-authored-by: push-app-to-main[bot] <203845782+push-app-to-main[bot]@users.noreply.github.com>
Co-authored-by: Slaviša Arežina <58952836+tremor021@users.noreply.github.com>
2026-05-01 22:44:29 +02:00
39 changed files with 1586 additions and 1001 deletions

107
.github/changelogs/2026/04.md generated vendored
View File

@@ -1,3 +1,110 @@
## 2026-04-30
### 🆕 New Scripts
- Nagios ([#14126](https://github.com/community-scripts/ProxmoxVE/pull/14126))
- Neko ([#14121](https://github.com/community-scripts/ProxmoxVE/pull/14121))
### 🚀 Updated Scripts
- #### 🐞 Bug Fixes
- alpine-docker: install openssl as core dependency | alpine-komodo: check & install openssl if missing [@MickLesk](https://github.com/MickLesk) ([#14134](https://github.com/community-scripts/ProxmoxVE/pull/14134))
- endurain: update source references to Codeberg [@MickLesk](https://github.com/MickLesk) ([#14128](https://github.com/community-scripts/ProxmoxVE/pull/14128))
### 💾 Core
- #### 🔧 Refactor
- tools.func: Manage minor versions for MongoDB 8.x [@tremor021](https://github.com/tremor021) ([#14131](https://github.com/community-scripts/ProxmoxVE/pull/14131))
## 2026-04-29
### 🚀 Updated Scripts
- #### 🐞 Bug Fixes
- GrayLog: MongoDB update to 8.2.x [@tremor021](https://github.com/tremor021) ([#14114](https://github.com/community-scripts/ProxmoxVE/pull/14114))
- Graylog: Better information in the log file [@tremor021](https://github.com/tremor021) ([#14110](https://github.com/community-scripts/ProxmoxVE/pull/14110))
- #### 🔧 Refactor
- Refactor: checkMK [@MickLesk](https://github.com/MickLesk) ([#14105](https://github.com/community-scripts/ProxmoxVE/pull/14105))
- PatchMon: Unpin release [@tremor021](https://github.com/tremor021) ([#14097](https://github.com/community-scripts/ProxmoxVE/pull/14097))
### 💾 Core
- #### 🔧 Refactor
- core: add guidance when storage lacks rootdir support [@MickLesk](https://github.com/MickLesk) ([#14108](https://github.com/community-scripts/ProxmoxVE/pull/14108))
## 2026-04-28
### 🆕 New Scripts
- StoryBook ([#14081](https://github.com/community-scripts/ProxmoxVE/pull/14081))
- CoreDNS ([#14082](https://github.com/community-scripts/ProxmoxVE/pull/14082))
### 🚀 Updated Scripts
- Fix Dawarich Install/Update [@Jerry1098](https://github.com/Jerry1098) ([#14078](https://github.com/community-scripts/ProxmoxVE/pull/14078))
- #### ✨ New Features
- PatchMon Version 2.0.2 Script update [@9technologygroup](https://github.com/9technologygroup) ([#14095](https://github.com/community-scripts/ProxmoxVE/pull/14095))
## 2026-04-27
### 🚀 Updated Scripts
- Add pamUsername column to userOrgs table [@JVKeller](https://github.com/JVKeller) ([#14075](https://github.com/community-scripts/ProxmoxVE/pull/14075))
- #### 🐞 Bug Fixes
- Dawarich: run db:migrate before assets:precompile [@MickLesk](https://github.com/MickLesk) ([#14051](https://github.com/community-scripts/ProxmoxVE/pull/14051))
- TechnitiumDNS: always install .NET 10 if not already present [@MickLesk](https://github.com/MickLesk) ([#14049](https://github.com/community-scripts/ProxmoxVE/pull/14049))
- #### 💥 Breaking Changes
- PatchMon: v2.0.0 migration [@vhsdream](https://github.com/vhsdream) ([#14015](https://github.com/community-scripts/ProxmoxVE/pull/14015))
### 💾 Core
- #### 🔧 Refactor
- Update build.func - fixed spelling mistake [@m1ckywill](https://github.com/m1ckywill) ([#14047](https://github.com/community-scripts/ProxmoxVE/pull/14047))
### 🧰 Tools
- #### 🐞 Bug Fixes
- update-lxcs/apps: avoid pct exec on containers mid-shutdown [@MickLesk](https://github.com/MickLesk) ([#14050](https://github.com/community-scripts/ProxmoxVE/pull/14050))
- #### ✨ New Features
- Add patchmon-agent report execution in update script [@heinemannj](https://github.com/heinemannj) ([#14054](https://github.com/community-scripts/ProxmoxVE/pull/14054))
## 2026-04-26
### 🆕 New Scripts
- TREK ([#14017](https://github.com/community-scripts/ProxmoxVE/pull/14017))
### 🚀 Updated Scripts
- fix(2fauth): handle stale backup directory on update [@omertahaoztop](https://github.com/omertahaoztop) ([#14018](https://github.com/community-scripts/ProxmoxVE/pull/14018))
- #### 🐞 Bug Fixes
- Increase Frigate default CPU cores from 4 to 8 [@MickLesk](https://github.com/MickLesk) ([#14039](https://github.com/community-scripts/ProxmoxVE/pull/14039))
- Technitium DNS: Ensure directories exist before running service [@tremor021](https://github.com/tremor021) ([#14030](https://github.com/community-scripts/ProxmoxVE/pull/14030))
### 💾 Core
- #### 🐞 Bug Fixes
- core: Correct deb822 repository flat path detection [@MickLesk](https://github.com/MickLesk) ([#14037](https://github.com/community-scripts/ProxmoxVE/pull/14037))
## 2026-04-25
### 🚀 Updated Scripts

42
.github/changelogs/2026/05.md generated vendored Normal file
View File

@@ -0,0 +1,42 @@
## 2026-05-02
### 🆕 New Scripts
- protonmail-bridge ([#14136](https://github.com/community-scripts/ProxmoxVE/pull/14136))
- Tube Archivist ([#14123](https://github.com/community-scripts/ProxmoxVE/pull/14123))
### 🚀 Updated Scripts
- #### 🐞 Bug Fixes
- Nagios: Ping fix [@tremor021](https://github.com/tremor021) ([#14186](https://github.com/community-scripts/ProxmoxVE/pull/14186))
- opnsense-vm: retry pvesm alloc on transient zfs 'got timeout' errors [@MickLesk](https://github.com/MickLesk) ([#14157](https://github.com/community-scripts/ProxmoxVE/pull/14157))
- ImmichFrame: fix update by reinstalling dotnet-sdk before publish [@MickLesk](https://github.com/MickLesk) ([#14158](https://github.com/community-scripts/ProxmoxVE/pull/14158))
- [FIX]ShelfMark: Use UV sync for shelfmark backend build; update to Python 3.14 [@vhsdream](https://github.com/vhsdream) ([#14170](https://github.com/community-scripts/ProxmoxVE/pull/14170))
- alpine: remove deb/ubuntu-only resource & storage checks from update-script [@MickLesk](https://github.com/MickLesk) ([#14166](https://github.com/community-scripts/ProxmoxVE/pull/14166))
- Threadfin: use 'threadfin-app' as app name to avoid version-file clash [@MickLesk](https://github.com/MickLesk) ([#14159](https://github.com/community-scripts/ProxmoxVE/pull/14159))
### 💾 Core
- #### ✨ New Features
- core: prompt to also run installed addon update scripts (…/bin/update_*) after update_script [@MickLesk](https://github.com/MickLesk) ([#14162](https://github.com/community-scripts/ProxmoxVE/pull/14162))
## 2026-05-01
### 🆕 New Scripts
- SoulSync ([#14124](https://github.com/community-scripts/ProxmoxVE/pull/14124))
- Teable ([#14125](https://github.com/community-scripts/ProxmoxVE/pull/14125))
### 🚀 Updated Scripts
- #### 🐞 Bug Fixes
- Step ca update [@heinemannj](https://github.com/heinemannj) ([#14058](https://github.com/community-scripts/ProxmoxVE/pull/14058))
- paperless-ngx: refresh NLTK data on update [@kurtislanderson](https://github.com/kurtislanderson) ([#14144](https://github.com/community-scripts/ProxmoxVE/pull/14144))
- [Pelican Panel] stop deleting the public storage [@LetterN](https://github.com/LetterN) ([#14145](https://github.com/community-scripts/ProxmoxVE/pull/14145))
- #### 🔧 Refactor
- Mail-Archiver: update dependencies [@tremor021](https://github.com/tremor021) ([#14152](https://github.com/community-scripts/ProxmoxVE/pull/14152))

View File

@@ -44,6 +44,9 @@ Exercise vigilance regarding copycat or coat-tailing sites that seek to exploit
@@ -57,7 +60,14 @@ Exercise vigilance regarding copycat or coat-tailing sites that seek to exploit
<details>
<summary><h4>April (25 entries)</h4></summary>
<summary><h4>May (2 entries)</h4></summary>
[View May 2026 Changelog](.github/changelogs/2026/05.md)
</details>
<details>
<summary><h4>April (30 entries)</h4></summary>
[View April 2026 Changelog](.github/changelogs/2026/04.md)
@@ -448,12 +458,67 @@ Exercise vigilance regarding copycat or coat-tailing sites that seek to exploit
</details>
## 2026-05-01
## 2026-05-03
### 🚀 Updated Scripts
- #### 🐞 Bug Fixes
- Hortusfox: fix update issues [@tomfrenzel](https://github.com/tomfrenzel) ([#14214](https://github.com/community-scripts/ProxmoxVE/pull/14214))
- #### ✨ New Features
- Refactor: PeaNUT for v6 [@MickLesk](https://github.com/MickLesk) ([#14224](https://github.com/community-scripts/ProxmoxVE/pull/14224))
- pangolin: pin version, drop manual SQL, use upstream migrator [@MickLesk](https://github.com/MickLesk) ([#14223](https://github.com/community-scripts/ProxmoxVE/pull/14223))
### 💾 Core
- #### 🐞 Bug Fixes
- core: fix validate_bridge function [@MichaelOultram](https://github.com/MichaelOultram) ([#14206](https://github.com/community-scripts/ProxmoxVE/pull/14206))
### 🧰 Tools
- #### 🐞 Bug Fixes
- pve/pbs scripts: guard sed against missing /etc/apt/sources.list [@MickLesk](https://github.com/MickLesk) ([#14222](https://github.com/community-scripts/ProxmoxVE/pull/14222))
## 2026-05-02
### 🆕 New Scripts
- protonmail-bridge ([#14136](https://github.com/community-scripts/ProxmoxVE/pull/14136))
- Tube Archivist ([#14123](https://github.com/community-scripts/ProxmoxVE/pull/14123))
### 🚀 Updated Scripts
- #### 🐞 Bug Fixes
- Nagios: Ping fix [@tremor021](https://github.com/tremor021) ([#14186](https://github.com/community-scripts/ProxmoxVE/pull/14186))
- opnsense-vm: retry pvesm alloc on transient zfs 'got timeout' errors [@MickLesk](https://github.com/MickLesk) ([#14157](https://github.com/community-scripts/ProxmoxVE/pull/14157))
- ImmichFrame: fix update by reinstalling dotnet-sdk before publish [@MickLesk](https://github.com/MickLesk) ([#14158](https://github.com/community-scripts/ProxmoxVE/pull/14158))
- [FIX]ShelfMark: Use UV sync for shelfmark backend build; update to Python 3.14 [@vhsdream](https://github.com/vhsdream) ([#14170](https://github.com/community-scripts/ProxmoxVE/pull/14170))
- alpine: remove deb/ubuntu-only resource & storage checks from update-script [@MickLesk](https://github.com/MickLesk) ([#14166](https://github.com/community-scripts/ProxmoxVE/pull/14166))
- Threadfin: use 'threadfin-app' as app name to avoid version-file clash [@MickLesk](https://github.com/MickLesk) ([#14159](https://github.com/community-scripts/ProxmoxVE/pull/14159))
### 💾 Core
- #### ✨ New Features
- core: prompt to also run installed addon update scripts (…/bin/update_*) after update_script [@MickLesk](https://github.com/MickLesk) ([#14162](https://github.com/community-scripts/ProxmoxVE/pull/14162))
## 2026-05-01
### 🆕 New Scripts
- SoulSync ([#14124](https://github.com/community-scripts/ProxmoxVE/pull/14124))
- Teable ([#14125](https://github.com/community-scripts/ProxmoxVE/pull/14125))
### 🚀 Updated Scripts
- #### 🐞 Bug Fixes
- Step ca update [@heinemannj](https://github.com/heinemannj) ([#14058](https://github.com/community-scripts/ProxmoxVE/pull/14058))
- paperless-ngx: refresh NLTK data on update [@kurtislanderson](https://github.com/kurtislanderson) ([#14144](https://github.com/community-scripts/ProxmoxVE/pull/14144))
- [Pelican Panel] stop deleting the public storage [@LetterN](https://github.com/LetterN) ([#14145](https://github.com/community-scripts/ProxmoxVE/pull/14145))
@@ -1023,111 +1088,4 @@ Exercise vigilance regarding copycat or coat-tailing sites that seek to exploit
- #### 🐞 Bug Fixes
- PVE LXC-Updater: pipe apt list through cat to prevent pager hang [@MickLesk](https://github.com/MickLesk) ([#13501](https://github.com/community-scripts/ProxmoxVE/pull/13501))
## 2026-04-02
### 🚀 Updated Scripts
- #### 🐞 Bug Fixes
- Grist: Guard backup restore for empty docs/db files [@MickLesk](https://github.com/MickLesk) ([#13472](https://github.com/community-scripts/ProxmoxVE/pull/13472))
- fix(zigbee2mqtt): suppress grep error when pnpm-workspace.yaml is absent on update [@Copilot](https://github.com/Copilot) ([#13476](https://github.com/community-scripts/ProxmoxVE/pull/13476))
### 🧰 Tools
- #### 🐞 Bug Fixes
- Cron LXC Updater: Add full PATH for cron environment [@MickLesk](https://github.com/MickLesk) ([#13473](https://github.com/community-scripts/ProxmoxVE/pull/13473))
## 2026-04-01
### 🆕 New Scripts
- DrawDB ([#13454](https://github.com/community-scripts/ProxmoxVE/pull/13454))
### 🧰 Tools
- #### 🐞 Bug Fixes
- Filebrowser: make noauth setup use correct database [@MickLesk](https://github.com/MickLesk) ([#13457](https://github.com/community-scripts/ProxmoxVE/pull/13457))
## 2026-03-31
### 🚀 Updated Scripts
- #### 🐞 Bug Fixes
- Graylog: set vm.max_map_count on host for OpenSearch [@MickLesk](https://github.com/MickLesk) ([#13441](https://github.com/community-scripts/ProxmoxVE/pull/13441))
- Koillection: ensure newline before appending to .env.local [@MickLesk](https://github.com/MickLesk) ([#13440](https://github.com/community-scripts/ProxmoxVE/pull/13440))
### 💾 Core
- #### 🔧 Refactor
- core: skip empty gateway value in network config [@MickLesk](https://github.com/MickLesk) ([#13442](https://github.com/community-scripts/ProxmoxVE/pull/13442))
## 2026-03-30
### 🆕 New Scripts
- Bambuddy ([#13411](https://github.com/community-scripts/ProxmoxVE/pull/13411))
### 🚀 Updated Scripts
- #### 💥 Breaking Changes
- Rename: BirdNET > BirdNET-Go [@MickLesk](https://github.com/MickLesk) ([#13410](https://github.com/community-scripts/ProxmoxVE/pull/13410))
## 2026-03-29
### 🆕 New Scripts
- YOURLS ([#13379](https://github.com/community-scripts/ProxmoxVE/pull/13379))
### 🚀 Updated Scripts
- #### 🐞 Bug Fixes
- fix(victoriametrics): use jq to filter releases [@Joery-M](https://github.com/Joery-M) ([#13393](https://github.com/community-scripts/ProxmoxVE/pull/13393))
- Ollama: add error handling for Intel GPG key imports [@MickLesk](https://github.com/MickLesk) ([#13397](https://github.com/community-scripts/ProxmoxVE/pull/13397))
- Immich: ignore Redis connection error on maintenance mode disable [@MickLesk](https://github.com/MickLesk) ([#13398](https://github.com/community-scripts/ProxmoxVE/pull/13398))
- NPM: unmask openresty after migration from package [@MickLesk](https://github.com/MickLesk) ([#13399](https://github.com/community-scripts/ProxmoxVE/pull/13399))
## 2026-03-28
### 🚀 Updated Scripts
- #### 🐞 Bug Fixes
- Fix: Update gokapi binary name for v2.2.4+ and add migration step [@krazos](https://github.com/krazos) ([#13377](https://github.com/community-scripts/ProxmoxVE/pull/13377))
- Fix: update gokapi asset matching for v2.2.4+ naming convention [@krazos](https://github.com/krazos) ([#13369](https://github.com/community-scripts/ProxmoxVE/pull/13369))
- Tandoor Recipes: Add missing env variable [@tremor021](https://github.com/tremor021) ([#13365](https://github.com/community-scripts/ProxmoxVE/pull/13365))
- #### ✨ New Features
- FileFlows: add option to install Node [@tremor021](https://github.com/tremor021) ([#13368](https://github.com/community-scripts/ProxmoxVE/pull/13368))
## 2026-03-27
### 🆕 New Scripts
- Matter-Server ([#13355](https://github.com/community-scripts/ProxmoxVE/pull/13355))
- GeoPulse ([#13320](https://github.com/community-scripts/ProxmoxVE/pull/13320))
### 🚀 Updated Scripts
- #### 🐞 Bug Fixes
- RevealJS: Switch from gulp to vite [@tremor021](https://github.com/tremor021) ([#13336](https://github.com/community-scripts/ProxmoxVE/pull/13336))
- #### ✨ New Features
- Dispatcharr add custom Postgres port support for upgrade [@MickLesk](https://github.com/MickLesk) ([#13347](https://github.com/community-scripts/ProxmoxVE/pull/13347))
- Immich: bump to v2.6.3 [@MickLesk](https://github.com/MickLesk) ([#13324](https://github.com/community-scripts/ProxmoxVE/pull/13324))
### 🧰 Tools
- #### ✨ New Features
- Refactor/Feature-Bump/Security: Update-Cron-LXCs (Now Local Mode!) [@MickLesk](https://github.com/MickLesk) ([#13339](https://github.com/community-scripts/ProxmoxVE/pull/13339))
- PVE LXC-Updater: pipe apt list through cat to prevent pager hang [@MickLesk](https://github.com/MickLesk) ([#13501](https://github.com/community-scripts/ProxmoxVE/pull/13501))

View File

@@ -21,8 +21,6 @@ catch_errors
function update_script() {
header_info
check_container_storage
check_container_resources
if [[ ! -f /usr/local/bin/ironclaw ]]; then
msg_error "No ${APP} Installation Found!"

View File

@@ -22,8 +22,6 @@ catch_errors
function update_script() {
header_info
check_container_storage
check_container_resources
if [[ ! -d /etc/ntfy ]]; then
msg_error "No ${APP} Installation Found!"
exit

View File

@@ -21,7 +21,6 @@ catch_errors
function update_script() {
header_info
check_container_resources
if [[ ! -d /opt/redlib ]]; then
msg_error "No ${APP} Installation Found!"

View File

@@ -21,8 +21,6 @@ catch_errors
function update_script() {
header_info
check_container_storage
check_container_resources
if ! apk info -e rustypaste >/dev/null 2>&1; then
msg_error "No ${APP} Installation Found!"

View File

@@ -0,0 +1,6 @@
____ __ __ ___ _ __ ____ _ __
/ __ \_________ / /_____ ____ / |/ /___ _(_) / / __ )_____(_)___/ /___ ____
/ /_/ / ___/ __ \/ __/ __ \/ __ \/ /|_/ / __ `/ / /_____/ __ / ___/ / __ / __ `/ _ \
/ ____/ / / /_/ / /_/ /_/ / / / / / / / /_/ / / /_____/ /_/ / / / / /_/ / /_/ / __/
/_/ /_/ \____/\__/\____/_/ /_/_/ /_/\__,_/_/_/ /_____/_/ /_/\__,_/\__, /\___/
/____/

6
ct/headers/soulsync Normal file
View File

@@ -0,0 +1,6 @@
_____ _______
/ ___/____ __ __/ / ___/__ ______ _____
\__ \/ __ \/ / / / /\__ \/ / / / __ \/ ___/
___/ / /_/ / /_/ / /___/ / /_/ / / / / /__
/____/\____/\__,_/_//____/\__, /_/ /_/\___/
/____/

6
ct/headers/teable Normal file
View File

@@ -0,0 +1,6 @@
______ __ __
/_ __/__ ____ _/ /_ / /__
/ / / _ \/ __ `/ __ \/ / _ \
/ / / __/ /_/ / /_/ / / __/
/_/ \___/\__,_/_.___/_/\___/

6
ct/headers/tubearchivist Normal file
View File

@@ -0,0 +1,6 @@
______ __ ___ __ _ _ __
/_ __/_ __/ /_ ___ / | __________/ /_ (_) __(_)____/ /_
/ / / / / / __ \/ _ \ / /| | / ___/ ___/ __ \/ / | / / / ___/ __/
/ / / /_/ / /_/ / __/ / ___ |/ / / /__/ / / / /| |/ / (__ ) /_
/_/ \__,_/_.___/\___/ /_/ |_/_/ \___/_/ /_/_/ |___/_/____/\__/

View File

@@ -38,13 +38,15 @@ function update_script() {
mv /opt/hortusfox/ /opt/hortusfox-backup
msg_ok "Backed up current HortusFox installation"
fetch_and_deploy_gh_release "hortusfox" "danielbrendel/hortusfox-web" "tarball"
CLEAN_INSTALL=1 fetch_and_deploy_gh_release "hortusfox" "danielbrendel/hortusfox-web" "tarball"
msg_info "Updating HortusFox"
cd /opt/hortusfox
mv /opt/hortusfox-backup/.env /opt/hortusfox/.env
cp /opt/hortusfox-backup/.env /opt/hortusfox/.env
cp -a /opt/hortusfox-backup/public/img/. /opt/hortusfox/public/img/
export COMPOSER_ALLOW_SUPERUSER=1
$STD composer install --no-dev --optimize-autoloader
$STD php asatru migrate --no-interaction
$STD php asatru migrate:upgrade
$STD php asatru plants:attributes
$STD php asatru calendar:classes
chown -R www-data:www-data /opt/hortusfox

View File

@@ -52,6 +52,7 @@ function update_script() {
$STD make install-webconf
$STD a2enmod rewrite
$STD a2enmod cgi
setcap cap_net_raw+p /bin/ping
msg_ok "Built Nagios Core"
msg_info "Starting Nagios"

View File

@@ -6,6 +6,7 @@ source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxV
# Source: https://pangolin.net/ | Github: https://github.com/fosrl/pangolin
APP="Pangolin"
PANGOLIN_VERSION="${PANGOLIN_VERSION:-1.18.2}"
var_tags="${var_tags:-proxy}"
var_cpu="${var_cpu:-2}"
var_ram="${var_ram:-4096}"
@@ -33,7 +34,7 @@ function update_script() {
NODE_VERSION="24" setup_nodejs
if check_for_gh_release "pangolin" "fosrl/pangolin"; then
if check_for_gh_release "pangolin" "fosrl/pangolin" "$PANGOLIN_VERSION" "Pinned to a tested release because Pangolin's schema changes have repeatedly broken unattended updates. To try a newer version at your own risk, run: 'export PANGOLIN_VERSION=<tag>' and re-run update. If it breaks, please open an issue at https://github.com/community-scripts/ProxmoxVE/issues with the error log."; then
msg_info "Stopping Service"
systemctl stop pangolin
systemctl stop gerbil
@@ -41,9 +42,13 @@ function update_script() {
msg_info "Creating backup"
tar -czf /opt/pangolin_config_backup.tar.gz -C /opt/pangolin config
if [[ -f /opt/pangolin/config/db/db.sqlite ]]; then
cp -a /opt/pangolin/config/db/db.sqlite \
"/opt/pangolin/config/db/db.sqlite.pre-${PANGOLIN_VERSION}-$(date +%Y%m%d-%H%M%S).bak"
fi
msg_ok "Created backup"
CLEAN_INSTALL=1 fetch_and_deploy_gh_release "pangolin" "fosrl/pangolin" "tarball"
CLEAN_INSTALL=1 fetch_and_deploy_gh_release "pangolin" "fosrl/pangolin" "tarball" "$PANGOLIN_VERSION"
CLEAN_INSTALL=1 fetch_and_deploy_gh_release "gerbil" "fosrl/gerbil" "singlefile" "latest" "/usr/bin" "gerbil_linux_amd64"
msg_info "Updating Pangolin"
@@ -67,36 +72,16 @@ function update_script() {
rm -f /opt/pangolin_config_backup.tar.gz
msg_ok "Restored config"
msg_info "Running database migrations"
cd /opt/pangolin
# Pre-apply potentially destructive schema changes safely so drizzle-kit
# does not recreate tables (which would delete all rows).
local DB="/opt/pangolin/config/db/db.sqlite"
if [[ -f "$DB" ]]; then
sqlite3 "$DB" "ALTER TABLE 'orgs' ADD COLUMN 'settingsLogRetentionDaysConnection' integer DEFAULT 0 NOT NULL;" 2>/dev/null || true
sqlite3 "$DB" "ALTER TABLE 'clientSitesAssociationsCache' ADD COLUMN 'isJitMode' integer DEFAULT 0 NOT NULL;" 2>/dev/null || true
sqlite3 "$DB" "ALTER TABLE 'userOrgs' ADD COLUMN 'pamUsername' text;" 2>/dev/null || true
# Create new role-mapping tables and migrate data before drizzle-kit
# drops the roleId columns from userOrgs and userInvites.
sqlite3 "$DB" "CREATE TABLE IF NOT EXISTS 'userOrgRoles' (
'userId' text NOT NULL REFERENCES 'user'('id') ON DELETE CASCADE,
'orgId' text NOT NULL REFERENCES 'orgs'('orgId') ON DELETE CASCADE,
'roleId' integer NOT NULL REFERENCES 'roles'('roleId') ON DELETE CASCADE,
UNIQUE('userId', 'orgId', 'roleId')
);" 2>/dev/null || true
sqlite3 "$DB" "INSERT OR IGNORE INTO 'userOrgRoles' (userId, orgId, roleId) SELECT userId, orgId, roleId FROM 'userOrgs' WHERE roleId IS NOT NULL;" 2>/dev/null || true
sqlite3 "$DB" "CREATE TABLE IF NOT EXISTS 'userInviteRoles' (
'inviteId' text NOT NULL REFERENCES 'userInvites'('inviteId') ON DELETE CASCADE,
'roleId' integer NOT NULL REFERENCES 'roles'('roleId') ON DELETE CASCADE,
PRIMARY KEY('inviteId', 'roleId')
);" 2>/dev/null || true
sqlite3 "$DB" "INSERT OR IGNORE INTO 'userInviteRoles' (inviteId, roleId) SELECT inviteId, roleId FROM 'userInvites' WHERE roleId IS NOT NULL;" 2>/dev/null || true
if ! grep -q '^ExecStartPre=/usr/bin/node dist/migrations.mjs' /etc/systemd/system/pangolin.service 2>/dev/null; then
msg_info "Adding migration step to pangolin.service"
sed -i '/^ExecStart=\/usr\/bin\/node --enable-source-maps dist\/server.mjs/i ExecStartPre=/usr/bin/node dist/migrations.mjs' /etc/systemd/system/pangolin.service
systemctl daemon-reload
msg_ok "Updated pangolin.service"
fi
ENVIRONMENT=prod $STD npx drizzle-kit push --force --config drizzle.sqlite.config.ts
msg_info "Running database migrations"
cd /opt/pangolin
ENVIRONMENT=prod $STD node dist/migrations.mjs
msg_ok "Ran database migrations"
msg_info "Updating Badger plugin version"

View File

@@ -45,6 +45,33 @@ function update_script() {
msg_ok "Fixed entrypoint"
fi
if [[ ! -f /etc/peanut/peanut.env ]]; then
msg_info "Migrating service to EnvironmentFile"
mkdir -p /etc/peanut
cat <<EOF >/etc/peanut/peanut.env
NODE_ENV=production
#WEB_HOST=0.0.0.0
#WEB_PORT=8080
#NUT_HOST=localhost
#NUT_PORT=3493
# Disable auth entirely:
#AUTH_DISABLED=true
# Bootstrap initial account on first start (ignored afterwards):
#WEB_USERNAME=admin
#WEB_PASSWORD=changeme
EOF
chmod 600 /etc/peanut/peanut.env
sed -i '/^Environment=/d' /etc/systemd/system/peanut.service
if ! grep -q '^EnvironmentFile=/etc/peanut/peanut.env' /etc/systemd/system/peanut.service; then
sed -i '/^Type=simple/a EnvironmentFile=/etc/peanut/peanut.env' /etc/systemd/system/peanut.service
fi
systemctl daemon-reload
msg_ok "Migrated to /etc/peanut/peanut.env"
fi
msg_info "Updating PeaNUT"
cd /opt/peanut
$STD pnpm i

79
ct/protonmail-bridge.sh Normal file
View File

@@ -0,0 +1,79 @@
#!/usr/bin/env bash
source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/build.func)
# Copyright (c) 2021-2026 community-scripts ORG
# Author: Stephen Chin (steveonjava)
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
# Source: https://github.com/ProtonMail/proton-bridge
APP="ProtonMail-Bridge"
var_tags="${var_tags:-mail;proton}"
var_cpu="${var_cpu:-2}"
var_ram="${var_ram:-1024}"
var_disk="${var_disk:-8}"
var_os="${var_os:-debian}"
var_version="${var_version:-13}"
var_unprivileged="${var_unprivileged:-1}"
header_info "$APP"
variables
color
catch_errors
function update_script() {
header_info
check_container_storage
check_container_resources
if [[ ! -x /usr/bin/protonmail-bridge ]]; then
msg_error "No ${APP} Installation Found!"
exit 1
fi
if check_for_gh_release "protonmail-bridge" "ProtonMail/proton-bridge"; then
local -a bridge_units=(
protonmail-bridge
protonmail-bridge-imap.socket
protonmail-bridge-smtp.socket
protonmail-bridge-imap-proxy
protonmail-bridge-smtp-proxy
)
local unit
declare -A was_active
for unit in "${bridge_units[@]}"; do
if systemctl is-active --quiet "$unit" 2>/dev/null; then
was_active["$unit"]=1
else
was_active["$unit"]=0
fi
done
msg_info "Stopping Services"
systemctl stop protonmail-bridge-imap.socket protonmail-bridge-smtp.socket protonmail-bridge-imap-proxy protonmail-bridge-smtp-proxy protonmail-bridge
msg_ok "Stopped Services"
fetch_and_deploy_gh_release "protonmail-bridge" "ProtonMail/proton-bridge" "binary"
if [[ -f /home/protonbridge/.protonmailbridge-initialized ]]; then
msg_info "Starting Services"
for unit in "${bridge_units[@]}"; do
if [[ "${was_active[$unit]:-0}" == "1" ]]; then
systemctl start "$unit"
fi
done
msg_ok "Started Services"
else
msg_ok "Initialization not completed. Services remain disabled."
fi
msg_ok "Updated successfully!"
fi
exit
}
start
build_container
description
msg_ok "Completed successfully!\n"
echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}"
echo -e "${INFO}${YW}One-time configuration is required before Bridge services are enabled.${CL}"
echo -e "${INFO}${YW}Run this command in the container: protonmailbridge-configure${CL}"

View File

@@ -30,7 +30,7 @@ function update_script() {
fi
NODE_VERSION="24" setup_nodejs
PYTHON_VERSION="3.12" setup_uv
PYTHON_VERSION="3.14" setup_uv
if check_for_gh_release "shelfmark" "calibrain/shelfmark"; then
msg_info "Stopping Service(s)"
@@ -59,6 +59,7 @@ function update_script() {
RELEASE_VERSION=$(cat "$HOME/.shelfmark")
msg_info "Updating Shelfmark"
export VIRTUAL_ENV=/opt/shelfmark/venv
sed -i "s/^RELEASE_VERSION=.*/RELEASE_VERSION=$RELEASE_VERSION/" /etc/shelfmark/.env
cd /opt/shelfmark/src/frontend
$STD npm ci
@@ -67,9 +68,10 @@ function update_script() {
cd /opt/shelfmark
$STD uv venv -c ./venv
$STD source ./venv/bin/activate
$STD uv pip install -r ./requirements-base.txt
if [[ $(sed -n '/_BYPASS=/s/[^=]*=//p' /etc/shelfmark/.env) == "true" ]] && [[ $(sed -n '/BYPASSER=/s/[^=]*=//p' /etc/shelfmark/.env) == "false" ]]; then
$STD uv pip install -r ./requirements-shelfmark.txt
$STD uv sync --active --locked --no-default-groups --extra browser
else
$STD uv sync --active --locked --no-default-groups
fi
mv /opt/start.sh.bak /opt/shelfmark/start.sh
msg_ok "Updated Shelfmark"

68
ct/soulsync.sh Normal file
View File

@@ -0,0 +1,68 @@
#!/usr/bin/env bash
source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/build.func)
# Copyright (c) 2021-2026 community-scripts ORG
# Author: MickLesk (CanbiZ)
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
# Source: https://github.com/Nezreka/SoulSync
APP="SoulSync"
var_tags="${var_tags:-music;automation;media}"
var_cpu="${var_cpu:-2}"
var_ram="${var_ram:-2048}"
var_disk="${var_disk:-8}"
var_os="${var_os:-debian}"
var_version="${var_version:-13}"
var_unprivileged="${var_unprivileged:-1}"
header_info "$APP"
variables
color
catch_errors
function update_script() {
header_info
check_container_storage
check_container_resources
if [[ ! -f ~/.soulsync ]]; then
msg_error "No ${APP} Installation Found!"
exit
fi
if check_for_gh_release "soulsync" "Nezreka/SoulSync"; then
msg_info "Stopping Service"
systemctl stop soulsync
msg_ok "Stopped Service"
msg_info "Backing up Data"
mv /opt/soulsync/config /opt/soulsync-config.bak
mv /opt/soulsync/data /opt/soulsync-data.bak
msg_ok "Backed up Data"
CLEAN_INSTALL=1 fetch_and_deploy_gh_release "soulsync" "Nezreka/SoulSync" "tarball"
msg_info "Updating Python Dependencies"
cd /opt/soulsync
$STD uv venv --clear /opt/soulsync/.venv --python 3.11
$STD /opt/soulsync/.venv/bin/pip install -r requirements.txt
msg_ok "Updated Python Dependencies"
mv /opt/soulsync-config.bak /opt/soulsync/config
mv /opt/soulsync-data.bak /opt/soulsync/data
msg_info "Starting Service"
systemctl start soulsync
msg_ok "Started Service"
msg_ok "Updated ${APP}"
fi
exit
}
start
build_container
description
msg_ok "Completed Successfully!\n"
echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}"
echo -e "${INFO}${YW} Access it using the following URL:${CL}"
echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:8008${CL}"

View File

@@ -30,6 +30,12 @@ function update_script() {
msg_info "Updating step-ca and step-cli"
$STD apt update
$STD apt upgrade -y step-ca step-cli
# Patch for making $STD happy (/usr/bin/step is a symlink to /usr/bin/step-cli)
STEPBIN="$(which step)"
rm -f "$STEPBIN"
cp -f "$(which step-cli)" "$STEPBIN"
$STD systemctl restart step-ca
msg_ok "Updated step-ca and step-cli"

82
ct/teable.sh Normal file
View File

@@ -0,0 +1,82 @@
#!/usr/bin/env bash
source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/build.func)
# Copyright (c) 2021-2026 community-scripts ORG
# Author: MickLesk (CanbiZ)
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
# Source: https://github.com/teableio/teable
APP="Teable"
var_tags="${var_tags:-database;no-code;spreadsheet}"
var_cpu="${var_cpu:-4}"
var_ram="${var_ram:-10240}"
var_disk="${var_disk:-25}"
var_os="${var_os:-debian}"
var_version="${var_version:-13}"
var_unprivileged="${var_unprivileged:-1}"
header_info "$APP"
variables
color
catch_errors
function update_script() {
header_info
check_container_storage
check_container_resources
if [[ ! -d /opt/teable ]]; then
msg_error "No ${APP} Installation Found!"
exit
fi
if check_for_gh_release "teable" "teableio/teable"; then
msg_info "Stopping Service"
systemctl stop teable
msg_ok "Stopped Service"
msg_info "Backing up Configuration"
cp /opt/teable/.env /opt/teable.env.bak
msg_ok "Backed up Configuration"
CLEAN_INSTALL=1 fetch_and_deploy_gh_release "teable" "teableio/teable" "tarball"
msg_info "Restoring Configuration"
mv /opt/teable.env.bak /opt/teable/.env
msg_ok "Restored Configuration"
msg_info "Rebuilding Teable"
cd /opt/teable
TEABLE_VERSION=$(cat ~/.teable)
echo "NEXT_PUBLIC_BUILD_VERSION=\"${TEABLE_VERSION}\"" >>apps/nextjs-app/.env
export HUSKY=0
export NODE_OPTIONS="--max-old-space-size=8192"
$STD pnpm install --frozen-lockfile
$STD pnpm -F @teable/db-main-prisma prisma-generate --schema ./prisma/postgres/schema.prisma
NODE_ENV=production NEXT_BUILD_ENV_TYPECHECK=false \
$STD pnpm -r --filter '!playground' run build
msg_ok "Rebuilt Teable"
msg_info "Running Database Migrations"
source /opt/teable/.env
$STD pnpm -F @teable/db-main-prisma prisma-migrate deploy --schema ./prisma/postgres/schema.prisma
msg_ok "Ran Database Migrations"
msg_info "Starting Service"
systemctl start teable
msg_ok "Started Service"
msg_ok "Updated successfully!"
else
msg_ok "No update available."
fi
exit
}
start
build_container
description
msg_ok "Completed Successfully!\n"
echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}"
echo -e "${INFO}${YW} Access it using the following URL:${CL}"
echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:3000${CL}"

View File

@@ -29,12 +29,12 @@ function update_script() {
exit
fi
if check_for_gh_release "threadfin" "threadfin/threadfin"; then
if check_for_gh_release "threadfin-app" "threadfin/threadfin"; then
msg_info "Stopping Service"
systemctl stop threadfin
msg_ok "Stopped Service"
fetch_and_deploy_gh_release "threadfin" "threadfin/threadfin" "singlefile" "latest" "/opt/threadfin" "Threadfin_linux_amd64"
fetch_and_deploy_gh_release "threadfin-app" "threadfin/threadfin" "singlefile" "latest" "/opt/threadfin" "Threadfin_linux_amd64"
msg_info "Starting Service"
systemctl start threadfin

83
ct/tubearchivist.sh Normal file
View File

@@ -0,0 +1,83 @@
#!/usr/bin/env bash
source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/build.func)
# Copyright (c) 2021-2026 community-scripts ORG
# Author: MickLesk (CanbiZ)
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
# Source: https://github.com/tubearchivist/tubearchivist
APP="Tube Archivist"
var_tags="${var_tags:-media;youtube;archiving}"
var_cpu="${var_cpu:-4}"
var_ram="${var_ram:-6144}"
var_disk="${var_disk:-30}"
var_os="${var_os:-debian}"
var_version="${var_version:-13}"
var_unprivileged="${var_unprivileged:-1}"
header_info "$APP"
variables
color
catch_errors
function update_script() {
header_info
check_container_storage
check_container_resources
if [[ ! -d /opt/tubearchivist ]]; then
msg_error "No ${APP} Installation Found!"
exit
fi
if check_for_gh_release "tubearchivist" "tubearchivist/tubearchivist"; then
msg_info "Stopping Services"
systemctl stop tubearchivist tubearchivist-celery tubearchivist-beat
msg_ok "Stopped Services"
msg_info "Backing up Data"
cp /opt/tubearchivist/.env /opt/tubearchivist_env.bak
msg_ok "Backed up Data"
CLEAN_INSTALL=1 fetch_and_deploy_gh_release "tubearchivist" "tubearchivist/tubearchivist" "tarball"
msg_info "Rebuilding Tube Archivist"
cd /opt/tubearchivist/frontend
$STD npm install
$STD npm run build:deploy
mkdir -p /opt/tubearchivist/backend/static
cp -r /opt/tubearchivist/frontend/dist/* /opt/tubearchivist/backend/static/
cp /opt/tubearchivist/docker_assets/backend_start.py /opt/tubearchivist/backend/
$STD uv pip install --python /opt/tubearchivist/.venv/bin/python -r /opt/tubearchivist/backend/requirements.txt
if [[ -f /opt/tubearchivist/backend/requirements.plugins.txt ]]; then
mkdir -p /opt/yt_plugins/bgutil
$STD uv pip install --python /opt/tubearchivist/.venv/bin/python --target /opt/yt_plugins/bgutil -r /opt/tubearchivist/backend/requirements.plugins.txt
fi
msg_ok "Rebuilt Tube Archivist"
msg_info "Restoring Configuration"
mv /opt/tubearchivist_env.bak /opt/tubearchivist/.env
sed -i 's|^TA_APP_DIR=/opt/tubearchivist$|TA_APP_DIR=/opt/tubearchivist/backend|' /opt/tubearchivist/.env
sed -i 's|^TA_CACHE_DIR=/opt/tubearchivist/cache$|TA_CACHE_DIR=/cache|' /opt/tubearchivist/.env
sed -i 's|^TA_MEDIA_DIR=/opt/tubearchivist/media$|TA_MEDIA_DIR=/youtube|' /opt/tubearchivist/.env
ln -sf /opt/tubearchivist/cache /cache
ln -sf /opt/tubearchivist/media /youtube
ln -sf /opt/tubearchivist/.env /opt/tubearchivist/backend/.env
msg_ok "Restored Configuration"
msg_info "Starting Services"
systemctl start tubearchivist tubearchivist-celery tubearchivist-beat
systemctl reload nginx
msg_ok "Started Services"
msg_ok "Updated successfully!"
fi
exit
}
start
build_container
description
msg_ok "Completed Successfully!\n"
echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}"
echo -e "${INFO}${YW} Access it using the following URL:${CL}"
echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:8000${CL}"

View File

@@ -43,8 +43,6 @@ cd /tmp/immichframe/immichFrame.Web
$STD npm ci
$STD npm run build
cp -r build/* /opt/immichframe/wwwroot
$STD apt remove -y dotnet-sdk-8.0
$STD apt autoremove -y
rm -rf /tmp/immichframe
mkdir -p /opt/immichframe/Config
curl -fsSL "https://raw.githubusercontent.com/immichFrame/ImmichFrame/main/docker/Settings.example.yml" -o /opt/immichframe/Config/Settings.yml

View File

@@ -59,6 +59,7 @@ $STD ./tools/setup
$STD ./configure
$STD make
$STD make install
setcap cap_net_raw+p /bin/ping
msg_ok "Built Nagios Plugins"
msg_info "Configuring Web Authentication"

View File

@@ -22,7 +22,8 @@ $STD apt install -y \
msg_ok "Installed Dependencies"
NODE_VERSION="24" setup_nodejs
fetch_and_deploy_gh_release "pangolin" "fosrl/pangolin" "tarball"
PANGOLIN_VERSION="${PANGOLIN_VERSION:-1.18.2}"
fetch_and_deploy_gh_release "pangolin" "fosrl/pangolin" "tarball" "$PANGOLIN_VERSION"
fetch_and_deploy_gh_release "gerbil" "fosrl/gerbil" "singlefile" "latest" "/usr/bin" "gerbil_linux_amd64"
fetch_and_deploy_gh_release "traefik" "traefik/traefik" "prebuild" "latest" "/usr/bin" "traefik_v*_linux_amd64.tar.gz"
@@ -204,6 +205,7 @@ User=root
Environment=NODE_ENV=production
Environment=ENVIRONMENT=prod
WorkingDirectory=/opt/pangolin
ExecStartPre=/usr/bin/node dist/migrations.mjs
ExecStart=/usr/bin/node --enable-source-maps dist/server.mjs
Restart=always
RestartSec=10

View File

@@ -29,13 +29,28 @@ cp -r .next/static .next/standalone/.next/
mkdir -p /opt/peanut/.next/standalone/config
mkdir -p /etc/peanut/
ln -sf .next/standalone/server.js server.js
cat <<EOF >/etc/peanut/settings.yml
WEB_HOST: 0.0.0.0
WEB_PORT: 8080
NUT_HOST: 0.0.0.0
NUT_PORT: 3493
if [[ ! -f /etc/peanut/settings.yml ]]; then
cat <<EOF >/etc/peanut/settings.yml
NUT_SERVERS: []
EOF
fi
ln -sf /etc/peanut/settings.yml /opt/peanut/.next/standalone/config/settings.yml
cat <<EOF >/etc/peanut/peanut.env
NODE_ENV=production
#WEB_HOST=0.0.0.0
#WEB_PORT=8080
#NUT_HOST=localhost
#NUT_PORT=3493
# Disable auth entirely:
#AUTH_DISABLED=true
# Bootstrap initial account on first start (ignored afterwards):
#WEB_USERNAME=admin
#WEB_PASSWORD=changeme
EOF
chmod 600 /etc/peanut/peanut.env
msg_ok "Setup Peanut"
msg_info "Creating Service"
@@ -48,11 +63,7 @@ SyslogIdentifier=peanut
Restart=always
RestartSec=5
Type=simple
Environment="NODE_ENV=production"
#Environment="NUT_HOST=localhost"
#Environment="NUT_PORT=3493"
#Environment="WEB_HOST=0.0.0.0"
#Environment="WEB_PORT=8080"
EnvironmentFile=/etc/peanut/peanut.env
WorkingDirectory=/opt/peanut
ExecStart=node /opt/peanut/entrypoint.mjs
TimeoutStopSec=30

View File

@@ -0,0 +1,192 @@
#!/usr/bin/env bash
# Copyright (c) 2021-2026 community-scripts ORG
# Author: Stephen Chin (steveonjava)
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
# Source: https://github.com/ProtonMail/proton-bridge
source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
color
verb_ip6
catch_errors
setting_up_container
network_check
update_os
msg_info "Installing Dependencies"
$STD apt install -y pass
msg_ok "Installed Dependencies"
msg_info "Creating Service User"
useradd -r -m -d /home/protonbridge -s /usr/sbin/nologin protonbridge
install -d -m 0750 -o protonbridge -g protonbridge /home/protonbridge
msg_ok "Created Service User"
fetch_and_deploy_gh_release "protonmail-bridge" "ProtonMail/proton-bridge" "binary"
msg_info "Creating Services"
cat <<EOF >/etc/systemd/system/protonmail-bridge.service
[Unit]
Description=Proton Mail Bridge (noninteractive)
After=network-online.target
Wants=network-online.target
ConditionPathExists=/home/protonbridge/.protonmailbridge-initialized
[Service]
Type=simple
User=protonbridge
Group=protonbridge
WorkingDirectory=/home/protonbridge
Environment=HOME=/home/protonbridge
ExecStart=/usr/bin/protonmail-bridge --noninteractive
Restart=always
RestartSec=3
NoNewPrivileges=yes
PrivateTmp=yes
ProtectSystem=full
ProtectKernelTunables=yes
ProtectKernelModules=yes
ProtectControlGroups=yes
[Install]
WantedBy=multi-user.target
EOF
cat <<'EOF' >/etc/systemd/system/protonmail-bridge-imap.socket
[Unit]
Description=Proton Mail Bridge IMAP Socket (143)
ConditionPathExists=/home/protonbridge/.protonmailbridge-initialized
[Socket]
ListenStream=143
Accept=no
Service=protonmail-bridge-imap-proxy.service
[Install]
WantedBy=sockets.target
EOF
cat <<'EOF' >/etc/systemd/system/protonmail-bridge-imap-proxy.service
[Unit]
Description=Proton Mail Bridge IMAP Proxy (143 -> 127.0.0.1:1143)
After=protonmail-bridge.service
Requires=protonmail-bridge.service
ConditionPathExists=/home/protonbridge/.protonmailbridge-initialized
[Service]
Type=simple
Sockets=protonmail-bridge-imap.socket
ExecStart=/usr/lib/systemd/systemd-socket-proxyd 127.0.0.1:1143
NoNewPrivileges=yes
PrivateTmp=yes
EOF
cat <<'EOF' >/etc/systemd/system/protonmail-bridge-smtp.socket
[Unit]
Description=Proton Mail Bridge SMTP Socket (587)
ConditionPathExists=/home/protonbridge/.protonmailbridge-initialized
[Socket]
ListenStream=587
Accept=no
Service=protonmail-bridge-smtp-proxy.service
[Install]
WantedBy=sockets.target
EOF
cat <<'EOF' >/etc/systemd/system/protonmail-bridge-smtp-proxy.service
[Unit]
Description=Proton Mail Bridge SMTP Proxy (587 -> 127.0.0.1:1025)
After=protonmail-bridge.service
Requires=protonmail-bridge.service
ConditionPathExists=/home/protonbridge/.protonmailbridge-initialized
[Service]
Type=simple
Sockets=protonmail-bridge-smtp.socket
ExecStart=/usr/lib/systemd/systemd-socket-proxyd 127.0.0.1:1025
NoNewPrivileges=yes
PrivateTmp=yes
EOF
msg_ok "Created Services"
msg_info "Creating Helper Commands"
cat <<'EOF' >/usr/local/bin/protonmailbridge-configure
#!/usr/bin/env bash
set -euo pipefail
BRIDGE_USER="protonbridge"
BRIDGE_HOME="/home/${BRIDGE_USER}"
GNUPG_HOME="${BRIDGE_HOME}/.gnupg"
MARKER="${BRIDGE_HOME}/.protonmailbridge-initialized"
FIRST_TIME=0
if [[ ! -f "${MARKER}" ]]; then
FIRST_TIME=1
fi
# Stop sockets/proxies/bridge daemon before configuration
systemctl stop protonmail-bridge-imap.socket protonmail-bridge-smtp.socket
systemctl stop protonmail-bridge-imap-proxy protonmail-bridge-smtp-proxy protonmail-bridge
if [[ "${FIRST_TIME}" == "1" ]]; then
echo "First-time setup: initializing pass keychain for ${BRIDGE_USER} (required by Proton Mail Bridge on Linux)."
install -d -m 0700 -o "${BRIDGE_USER}" -g "${BRIDGE_USER}" "${GNUPG_HOME}"
FPR="$(runuser -u "${BRIDGE_USER}" -- env HOME="${BRIDGE_HOME}" GNUPGHOME="${GNUPG_HOME}" \
gpg --list-secret-keys --with-colons 2>/dev/null | awk -F: '$1=="fpr"{print $10; exit}')"
if [[ -z "${FPR}" ]]; then
runuser -u "${BRIDGE_USER}" -- env HOME="${BRIDGE_HOME}" GNUPGHOME="${GNUPG_HOME}" \
gpg --batch --pinentry-mode loopback --passphrase '' \
--quick-gen-key 'ProtonMail Bridge' default default never
FPR="$(runuser -u "${BRIDGE_USER}" -- env HOME="${BRIDGE_HOME}" GNUPGHOME="${GNUPG_HOME}" \
gpg --list-secret-keys --with-colons 2>/dev/null | awk -F: '$1=="fpr"{print $10; exit}')"
fi
if [[ -z "${FPR}" ]]; then
echo "Failed to detect a GPG key fingerprint for ${BRIDGE_USER}." >&2
exit 1
fi
runuser -u "${BRIDGE_USER}" -- env HOME="${BRIDGE_HOME}" GNUPGHOME="${GNUPG_HOME}" \
pass init "${FPR}"
echo
echo "To do initial configuration of the Proton Mail Bridge:"
echo "Run: login"
echo "Run: info"
echo "Run: exit"
echo
else
echo
echo "Launching Proton Mail Bridge CLI for configuration."
echo "External access is disabled until you exit."
echo "Run: exit"
echo
fi
runuser -u "${BRIDGE_USER}" -- env HOME="${BRIDGE_HOME}" \
protonmail-bridge -c
if [[ "${FIRST_TIME}" == "1" ]]; then
touch "${MARKER}"
chown "${BRIDGE_USER}:${BRIDGE_USER}" "${MARKER}"
chmod 0644 "${MARKER}"
fi
systemctl enable -q --now protonmail-bridge.service protonmail-bridge-imap.socket protonmail-bridge-smtp.socket
if [[ "${FIRST_TIME}" == "1" ]]; then
echo "Initialization complete. Services enabled and started."
else
echo "Configuration complete. Services enabled and started."
fi
EOF
chmod +x /usr/local/bin/protonmailbridge-configure
ln -sf /usr/local/bin/protonmailbridge-configure /usr/bin/protonmailbridge-configure
msg_ok "Created Helper Commands"
motd_ssh
customize
cleanup_lxc

View File

@@ -116,7 +116,7 @@ else
fi
NODE_VERSION="24" setup_nodejs
PYTHON_VERSION="3.12" setup_uv
PYTHON_VERSION="3.14" setup_uv
fetch_and_deploy_gh_release "shelfmark" "calibrain/shelfmark" "tarball" "latest" "/opt/shelfmark"
RELEASE_VERSION=$(cat "$HOME/.shelfmark")
@@ -130,11 +130,15 @@ mv /opt/shelfmark/src/frontend/dist /opt/shelfmark/frontend-dist
msg_ok "Built Shelfmark frontend"
msg_info "Configuring Shelfmark"
export VIRTUAL_ENV=/opt/shelfmark/venv
cd /opt/shelfmark
$STD uv venv --clear ./venv
$STD source ./venv/bin/activate
$STD uv pip install -r ./requirements-base.txt
[[ "$DEPLOYMENT_TYPE" == "1" ]] && $STD uv pip install -r ./requirements-shelfmark.txt
if [[ "$DEPLOYMENT_TYPE" == "1" ]]; then
$STD uv sync --active --locked --no-default-groups --extra browser
else
$STD uv sync --active --locked --no-default-groups
fi
mkdir -p {/var/log/shelfmark,/tmp/shelfmark}
msg_ok "Configured Shelfmark"

View File

@@ -0,0 +1,59 @@
#!/usr/bin/env bash
# Copyright (c) 2021-2026 community-scripts ORG
# Author: MickLesk (CanbiZ)
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
# Source: https://github.com/Nezreka/SoulSync
source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
color
verb_ip6
catch_errors
setting_up_container
network_check
update_os
msg_info "Installing Dependencies"
$STD apt install -y \
gcc \
libffi-dev \
libssl-dev \
libchromaprint-tools \
ffmpeg
msg_ok "Installed Dependencies"
UV_PYTHON="3.11" setup_uv
fetch_and_deploy_gh_release "soulsync" "Nezreka/SoulSync" "tarball"
msg_info "Setting up Application"
cd /opt/soulsync
$STD uv venv /opt/soulsync/.venv --python 3.11
$STD uv pip install -r requirements.txt --python /opt/soulsync/.venv/bin/python
mkdir -p /opt/soulsync/{config,data,logs}
msg_ok "Set up Application"
msg_info "Creating Service"
cat <<EOF >/etc/systemd/system/soulsync.service
[Unit]
Description=SoulSync Music Discovery
After=network.target
[Service]
Type=simple
User=root
WorkingDirectory=/opt/soulsync
ExecStart=/opt/soulsync/.venv/bin/python web_server.py
Environment=PYTHONPATH=/opt/soulsync PYTHONUNBUFFERED=1 DATABASE_PATH=/opt/soulsync/data/music_library.db
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
EOF
systemctl enable -q --now soulsync
msg_ok "Created Service"
motd_ssh
customize
cleanup_lxc

View File

@@ -23,21 +23,34 @@ setup_deb822_repo \
msg_info "Installing step-ca and step-cli"
$STD apt install -y step-ca step-cli
STEPHOME="/root/.step"
export STEPPATH=/etc/step-ca
STEPPATH="/etc/step-ca"
STEPHOME="/etc/step"
export STEPPATH=$STEPPATH
echo "export STEPPATH=${STEPPATH}" >> /etc/profile
export STEPHOME=$STEPHOME
echo "export STEPHOME=${STEPHOME}" >> /etc/profile
sed -i '1i export STEPPATH=/etc/step-ca' /etc/profile
sed -i '1i export STEPHOME=/root/.step' /etc/profile
mkdir -p "$STEPHOME"
setcap CAP_NET_BIND_SERVICE=+eip $(which step-ca)
# Patch for making $STD happy (/usr/bin/step is a symlink to /usr/bin/step-cli)
STEPBIN="$(which step)"
rm -f "$STEPBIN"
cp -f "$(which step-cli)" "$STEPBIN"
$STD useradd --user-group --system --home $(step path) --shell /bin/false step
# Low port-binding capabilities (ports < 1024)
# - Default step-ca listener port: 443
setcap CAP_NET_BIND_SERVICE=+eip "$(which step-ca)"
# Service User used by systemd step-ca.service
$STD useradd --user-group --system --home "$(step path)" --shell /bin/false step
msg_ok "Installed step-ca and step-cli"
DomainName="$(hostname -d)"
PKIName="$(prompt_input "Enter PKIName" "MyHomePKI" 30)"
PKICountry="$(prompt_input "Enter PKICountry" "DE" 30)"
PKIOrganizationalUnit="$(prompt_input "Enter PKIOrganizationalUnit" "MyHomeLab" 30)"
PKIProvisioner="$(prompt_input "Enter PKIProvisioner" "pki@$DomainName" 30)"
AcmeProvisioner="$(prompt_input "Enter AcmeProvisioner" "acme@$DomainName" 30)"
X509MinDur="$(prompt_input "Enter X509MinDur" "48h" 30)"
@@ -45,11 +58,15 @@ X509MaxDur="$(prompt_input "Enter X509MaxDur" "87600h" 30)"
X509DefaultDur="$(prompt_input "Enter X509DefaultDur" "168h" 30)"
msg_info "Initializing step-ca"
# Initialize step-ca
DeploymentType="standalone"
FQDN="$(hostname -f)"
IP="${LOCAL_IP}"
LISTENER=":443"
LISTENER_INSECURE=":80"
# Set different signing CA and Provisioner Passwords
EncryptionPwdDir="$(step path)/encryption"
PwdFile="$EncryptionPwdDir/ca.pwd"
ProvisionerPwdFile="$EncryptionPwdDir/provisioner.pwd"
@@ -57,19 +74,208 @@ mkdir -p "$EncryptionPwdDir"
gpg -q --gen-random --armor 2 32 >"$PwdFile"
gpg -q --gen-random --armor 2 32 >"$ProvisionerPwdFile"
$STD step ca init --deployment-type="$DeploymentType" --ssh --name="$PKIName" --dns="$FQDN" --dns="$IP" --address="$LISTENER" --provisioner="$PKIProvisioner" --password-file="$PwdFile" --provisioner-password-file="$ProvisionerPwdFile"
# Used by systemd step-ca.service
ln -s "$PwdFile" "$(step path)/password.txt"
chown -R step:step $(step path)
chmod -R 700 $(step path)
$STD step ca provisioner add "$AcmeProvisioner" --type ACME --admin-name "$AcmeProvisioner"
$STD step ca provisioner update "$PKIProvisioner" --x509-min-dur="$X509MinDur" --x509-max-dur="$X509MaxDur" --x509-default-dur="$X509DefaultDur" --allow-renewal-after-expiry
$STD step ca provisioner update "$AcmeProvisioner" --x509-min-dur="$X509MinDur" --x509-max-dur="$X509MaxDur" --x509-default-dur="$X509DefaultDur" --allow-renewal-after-expiry
$STD step certificate install --all $(step path)/certs/root_ca.crt
# Usage of:
# - SSH feature of step-ca
# - BadgerDB (badgerv2) => Default DB backend of step-ca
# - badgerFileLoadingMode: FileIO (instead of MemoryMap) for LXC with low RAM
$STD step ca init \
--deployment-type="$DeploymentType" \
--ssh \
--name="$PKIName" \
--dns="$FQDN" \
--dns="$IP" \
--address="$LISTENER" \
--provisioner="$PKIProvisioner" \
--password-file="$PwdFile" \
--provisioner-password-file="$ProvisionerPwdFile"
# Define enhanced x509 CA and Certificate Templates
mkdir -p "$(step path)/templates/ca"
mkdir -p "$(step path)/templates/x509"
CARootTemplate="$(step path)/templates/ca/root.tpl"
CAIntermediateTemplate="$(step path)/templates/ca/intermediate.tpl"
X509LeafTemplate="$(step path)/templates/x509/leaf.tpl"
X509LeafTemplateData="$(step path)/templates/x509/leaf_data.tpl"
cat <<'EOF' >"$CARootTemplate"
{
"subject": {
"country": {{ toJson .Insecure.User.country }},
"organization": {{ toJson .Insecure.User.organization }},
"organizationalUnit": {{ toJson .Insecure.User.organizationalUnit }},
"commonName": {{ toJson .Subject.CommonName }}
},
"issuer": {{ toJson .Subject }},
"keyUsage": ["certSign", "crlSign"],
"basicConstraints": {
"isCA": true,
"maxPathLen": 1
},
"issuingCertificateURL": [{{ toJson .Insecure.User.issuingCertificateURL }}],
"crlDistributionPoints": [{{ toJson .Insecure.User.crlDistributionPoints }}]
}
EOF
cat <<'EOF' >"$CAIntermediateTemplate"
{
"subject": {
"country": {{ toJson .Insecure.User.country }},
"organization": {{ toJson .Insecure.User.organization }},
"organizationalUnit": {{ toJson .Insecure.User.organizationalUnit }},
"commonName": {{ toJson .Subject.CommonName }}
},
"keyUsage": ["certSign", "crlSign"],
"basicConstraints": {
"isCA": true,
"maxPathLen": 0
},
"issuingCertificateURL": [{{ toJson .Insecure.User.issuingCertificateURL }}],
"crlDistributionPoints": [{{ toJson .Insecure.User.crlDistributionPoints }}]
}
EOF
cat <<'EOF' >"$X509LeafTemplate"
{
"subject": {
{{- if .Insecure.User.Country }}
"country": {{ toJson .Insecure.User.country }},
{{- else }}
"country": {{ toJson .country }},
{{- end }}
{{- if .Insecure.User.organization }}
"organization": {{ toJson .Insecure.User.organization }},
{{- else }}
"organization": {{ toJson .organization }},
{{- end }}
{{- if .Insecure.User.organizationalUnit }}
"organizationalUnit": {{ toJson .Insecure.User.organizationalUnit }},
{{- else }}
"organizationalUnit": {{ toJson .organizationalUnit }},
{{- end }}
"commonName": {{ toJson .Subject.CommonName }}
},
"sans": {{ toJson .SANs }},
{{- if typeIs "*rsa.PublicKey" .Insecure.CR.PublicKey }}
"keyUsage": ["keyEncipherment", "digitalSignature"],
{{- else }}
"keyUsage": ["digitalSignature"],
{{- end }}
"extKeyUsage": ["serverAuth", "clientAuth"],
{{- if .Insecure.User.issuingCertificateURL }}
"issuingCertificateURL": [{{ toJson .Insecure.User.issuingCertificateURL }}],
{{- else }}
"issuingCertificateURL": [{{ toJson .issuingCertificateURL }}],
{{- end }}
{{- if .Insecure.User.crlDistributionPoints }}
"crlDistributionPoints": [{{ toJson .Insecure.User.crlDistributionPoints }}]
{{- else }}
"crlDistributionPoints": [{{ toJson .crlDistributionPoints }}]
{{- end }}
}
EOF
cat <<EOF >"$X509LeafTemplateData"
{
"country": "${PKICountry}",
"organization": "${PKIName}",
"organizationalUnit": "${PKIOrganizationalUnit}",
"issuingCertificateURL": ["https://${FQDN}${LISTENER}/intermediates.pem"],
"crlDistributionPoints": ["https://${FQDN}${LISTENER}/crl"]
}
EOF
# Configure CA Provisioners, DB and CRL settings
$STD step ca provisioner add "$AcmeProvisioner" \
--type ACME \
--admin-name "$AcmeProvisioner"
$STD step ca provisioner update "$PKIProvisioner" \
--x509-min-dur="$X509MinDur" \
--x509-max-dur="$X509MaxDur" \
--x509-default-dur="$X509DefaultDur" \
--x509-template="$X509LeafTemplate" \
--x509-template-data="$X509LeafTemplateData" \
--allow-renewal-after-expiry
$STD step ca provisioner update "$AcmeProvisioner" \
--x509-min-dur="$X509MinDur" \
--x509-max-dur="$X509MaxDur" \
--x509-default-dur="$X509DefaultDur" \
--x509-template="$X509LeafTemplate" \
--x509-template-data="$X509LeafTemplateData" \
--allow-renewal-after-expiry
CAConfig="$(step path)/config/ca.json"
jq --arg a "${PKICountry}" '.country = $a' "${CAConfig}" > "${CAConfig}_tmp" && mv "${CAConfig}_tmp" "${CAConfig}"
jq --arg a "${PKIName}" '.organization = $a' "${CAConfig}" > "${CAConfig}_tmp" && mv "${CAConfig}_tmp" "${CAConfig}"
jq --arg a "${PKIOrganizationalUnit}" '.organizationalUnit = $a' "${CAConfig}" > "${CAConfig}_tmp" && mv "${CAConfig}_tmp" "${CAConfig}"
jq --arg a "${PKIName} Online CA" '.commonName = $a' "${CAConfig}" > "${CAConfig}_tmp" && mv "${CAConfig}_tmp" "${CAConfig}"
jq '.db.badgerFileLoadingMode = "FileIO"' "${CAConfig}" > "${CAConfig}_tmp" && mv "${CAConfig}_tmp" "${CAConfig}"
jq '.crl.enabled = true' "${CAConfig}" > "${CAConfig}_tmp" && mv "${CAConfig}_tmp" "${CAConfig}"
jq '.crl.generateOnRevoke = true' "${CAConfig}" > "${CAConfig}_tmp" && mv "${CAConfig}_tmp" "${CAConfig}"
jq '.crl.cacheDuration = "24h0m0s"' "${CAConfig}" > "${CAConfig}_tmp" && mv "${CAConfig}_tmp" "${CAConfig}"
jq '.crl.renewPeriod = "16h0m0s"' "${CAConfig}" > "${CAConfig}_tmp" && mv "${CAConfig}_tmp" "${CAConfig}"
jq --arg a "https://${FQDN}${LISTENER}/crl" '.crl.idpURL = $a' "${CAConfig}" > "${CAConfig}_tmp" && mv "${CAConfig}_tmp" "${CAConfig}"
jq --arg a "$LISTENER_INSECURE" '.insecureAddress = $a' "${CAConfig}" > "${CAConfig}_tmp" && mv "${CAConfig}_tmp" "${CAConfig}"
# Generate Root CA Certificate and Key
# - Validity: 219168h (~25 Years)
# - maxPathLen: 1 (Root -> Intermediate -> Leaf) => Only one Intermediate CA allowed below Root CA
# - Active revocation on Intermediate CA and Leaf Certificates by the usage of build-in Certificate Revocation List (CRL)
FLAGS=(--force
--template="${CARootTemplate}"
--not-after="219168h"
--password-file="${PwdFile}"
--set country="${PKICountry}"
--set organization="${PKIName}"
--set organizationalUnit="${PKIOrganizationalUnit}"
--set issuingCertificateURL="https://${FQDN}${LISTENER}/roots.pem"
--set crlDistributionPoints="https://${FQDN}${LISTENER}/crl")
$STD step certificate create "${PKIName} Root CA" \
"$(step path)/certs/root_ca.crt" \
"$(step path)/secrets/root_ca_key" \
"${FLAGS[@]}"
# Generate Intermediate CA Certificate Bundle and Key
# - Validity: 175368h (~20 Years)
# - maxPathLen: 0 (Root -> Intermediate -> Leaf) => Intermediate CA is only allowed to issue Leaf Certificates
# - Active revocation on Leaf Certificates by the usage of build-in Certificate Revocation List (CRL)
# - Bundle: Certificate Chain (including Root CA Certificate)
FLAGS=(--force
--template="${CAIntermediateTemplate}"
--ca="$(step path)/certs/root_ca.crt"
--ca-key="$(step path)/secrets/root_ca_key"
--not-after="175368h"
--ca-password-file="${PwdFile}"
--password-file="${PwdFile}"
--bundle
--set country="${PKICountry}"
--set organization="${PKIName}"
--set organizationalUnit="${PKIOrganizationalUnit}"
--set issuingCertificateURL="https://${FQDN}${LISTENER}/roots.pem"
--set crlDistributionPoints="https://${FQDN}${LISTENER}/crl")
$STD step certificate create "${PKIName} Intermediate CA" \
"$(step path)/certs/intermediate_ca.crt" \
"$(step path)/secrets/intermediate_ca_key" \
"${FLAGS[@]}"
# Install Root CA Certificate to System Trust Store
$STD step certificate install --all "$(step path)/certs/root_ca.crt"
$STD update-ca-certificates
chown -R step:step "$(step path)"
chmod -R 700 "$(step path)"
msg_ok "Initialized step-ca"
msg_info "Start step-ca as a Daemon"
# https://smallstep.com/docs/step-ca/certificate-authority-server-production/#running-step-ca-as-a-daemon
cat <<'EOF' >/etc/systemd/system/step-ca.service
[Unit]
Description=step-ca service
@@ -130,271 +336,6 @@ msg_ok "Started step-ca as a Daemon"
fetch_and_deploy_gh_release "step-badger" "lukasz-lobocki/step-badger" "prebuild" "latest" "/opt/step-badger" "step-badger_Linux_x86_64.tar.gz"
ln -s /opt/step-badger/step-badger /usr/local/bin/step-badger
msg_info "Install step-ca Admin script"
mkdir -p "$STEPHOME"
cat <<'ADDON_EOF' >"$STEPHOME/step-ca-admin.sh"
#!/usr/bin/env bash
# Copyright (c) 2021-2026 community-scripts ORG
# Author: Joerg Heinemann (heinemannj)
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
function header_info() {
clear
cat <<"EOF"
__ ___ __ _
_____/ /____ ____ _________ _ / | ____/ /___ ___ (_)___
/ ___/ __/ _ \/ __ \______/ ___/ __ `/ / /| |/ __ / __ `__ \/ / __ \
(__ ) /_/ __/ /_/ /_____/ /__/ /_/ / / ___ / /_/ / / / / / / / / / /
/____/\__/\___/ .___/ \___/\__,_/ /_/ |_\__,_/_/ /_/ /_/_/_/ /_/
/_/
EOF
}
function die() {
echo -e "\n${BL}[ERROR]${GN} ${RD}${1}${CL}\n"
exit
}
function success() {
echo -e "${BL}[SUCCESS]${GN} ${1}${CL}\n"
exit
}
function whiptail_menu() {
MENU_ARRAY=()
MSG_MAX_LENGTH=0
while read -r TAG ITEM; do
OFFSET=2
((${#ITEM} + OFFSET > MSG_MAX_LENGTH)) && MSG_MAX_LENGTH=${#ITEM}+OFFSET
MENU_ARRAY+=("$TAG" "$ITEM " "OFF")
done < <(echo "$1")
}
function x509_list() {
CERT_LIST=""
cp --recursive --force "$(step path)/db/"* "$STEPHOME/db-copy/"
cp --recursive --force "$(step path)/certs/"* "$STEPHOME/certs/ca/"
if [[ $(step-badger x509Certs "${STEPHOME}/db-copy" 2>/dev/null) ]]; then
CERT_LIST=$(step-badger x509Certs ${STEPHOME}/db-copy 2>/dev/null)
fi
}
function ssh_list() {
CERT_LIST=""
cp --recursive --force "$(step path)/db/"* "$STEPHOME/db-copy/"
cp --recursive --force "$(step path)/certs/"* "$STEPHOME/certs/ca/"
if [[ $(step-badger sshCerts "${STEPHOME}/db-copy" 2>/dev/null) ]]; then
CERT_LIST=$(step-badgersshCerts ${STEPHOME}/db-copy 2>/dev/null)
fi
}
function x509_serial_to_cn() {
x509_list
CN="$(echo "${CERT_LIST}" | grep "${SERIAL_NUMBER}" | awk '{print $2}' | sed 's/CN=//g')"
CRT="$STEPHOME/certs/x509/$CN.crt"
KEY="$STEPHOME/certs/x509/$CN.key"
if ! [[ -f ${CRT} ]]; then
die "Certificate ${CRT} not found!"
elif ! [[ -f ${KEY} ]]; then
die "Private Key ${KEY} not found!"
fi
}
function x509_revoke() {
# shellcheck disable=SC2206
SERIAL_NUMBER_ARRAY=(${CERT_SERIAL_NUMBERS})
for SERIAL_NUMBER in "${SERIAL_NUMBER_ARRAY[@]}"; do
echo -e "${BL}[Info]${GN} Revoke x509 Certificate with Serial Number ${BL}${SERIAL_NUMBER}${GN}:${CL}"
echo
TOKEN=$(step ca token --provisioner="$PROVISIONER" --provisioner-password-file="$PROVISIONER_PASSWORD" --revoke "${SERIAL_NUMBER}")
step ca revoke --token "$TOKEN" "${SERIAL_NUMBER}" || die "Failed to revoke certificate!"
echo
done
success "Finished."
}
function x509_renew() {
# shellcheck disable=SC2206
SERIAL_NUMBER_ARRAY=(${CERT_SERIAL_NUMBERS})
for SERIAL_NUMBER in "${SERIAL_NUMBER_ARRAY[@]}"; do
echo -e "${BL}[Info]${GN} Renew x509 Certificate with Serial Number ${BL}${SERIAL_NUMBER}${GN}:${CL}"
echo
x509_serial_to_cn
step ca renew "${CRT}" "${KEY}" --force || die "Failed to renew certificate!"
echo
done
success "Finished."
}
function x509_inspect() {
# shellcheck disable=SC2206
SERIAL_NUMBER_ARRAY=(${CERT_SERIAL_NUMBERS})
for SERIAL_NUMBER in "${SERIAL_NUMBER_ARRAY[@]}"; do
echo -e "${BL}[Info]${GN} Inspect x509 Certificate with Serial Number ${BL}${SERIAL_NUMBER}${GN}:${CL}\n"
x509_serial_to_cn
step certificate inspect "${CRT}" || die "Failed to inspect certificate!"
if ! [[ $(step certificate inspect "${CRT}" | grep "${SERIAL_NUMBER}") ]]; then
die "Serial Number ${SERIAL_NUMBER} mismatch!"
fi
echo -e "\n${BL}[Info]${GN} Public Key:${CL}\n"
cat "${CRT}"
echo -e "\n${BL}[Info]${GN} Private Key:${CL}\n"
cat "${KEY}"
echo
done
success "Finished."
}
function x509_request() {
FQDN=""
SAN=""
while true; do
FQDN=$(whiptail --backtitle "Proxmox VE Helper Scripts" --title "Certificate Signing Request (CSR)" --inputbox '\nFQDN (e.g. MyLXC.example.com)' 10 50 "$FQDN" 3>&1 1>&2 2>&3)
IP=$(dig +short "$FQDN")
if [[ -z "$IP" ]]; then
die "Resolution failed for $FQDN!"
fi
HOST=$(echo "$FQDN" | awk -F'.' '{print $1}')
IP=$(whiptail --backtitle "Proxmox VE Helper Scripts" --title "Certificate Signing Request (CSR)" --inputbox '\nIP Address (e.g. x.x.x.x)' 10 50 "$IP" 3>&1 1>&2 2>&3)
HOST=$(whiptail --backtitle "Proxmox VE Helper Scripts" --title "Certificate Signing Request (CSR)" --inputbox '\nHostname (e.g. MyHostName)' 10 50 "$HOST" 3>&1 1>&2 2>&3)
SAN=$(whiptail --backtitle "Proxmox VE Helper Scripts" --title "Certificate Signing Request (CSR)" --inputbox '\nSubject Alternative Name(s) (SAN) (e.g. myapp-1.example.com, myapp-2.example.com)' 10 50 "$SAN" 3>&1 1>&2 2>&3)
VALID_TO=$(whiptail --backtitle "Proxmox VE Helper Scripts" --title "Certificate Signing Request (CSR)" --inputbox '\nValidity (e.g. 2034-01-31T00:00:00Z)' 10 50 "2034-01-31T00:00:00Z" 3>&1 1>&2 2>&3)
# shellcheck disable=SC2034
if whiptail_yesno=$(whiptail --backtitle "Proxmox VE Helper Scripts" --title "Certificate Signing Request (CSR)" --yesno "Continue with below?\n
FQDN: $FQDN
Hostname: $HOST
IP Address: $IP
Subject Alternative Name(s) (SAN): $SAN
Validity: $VALID_TO" --no-button "Change" --yes-button "Continue" 15 70 3>&1 1>&2 2>&3); then
break
fi
done
echo -e "${BL}[Info]${GN} Request x509 Certificate with subject ${BL}${FQDN}${GN}:${CL}"
echo
CRT="$STEPHOME/certs/x509/$FQDN.crt"
KEY="$STEPHOME/certs/x509/$FQDN.key"
SAN="$FQDN, $HOST, $IP, $SAN"
IFS=', ' read -r -a array <<< "$SAN"
for element in "${array[@]}"
do
SAN_ARRAY+=(--san "$element")
done
step ca certificate "$FQDN" "$CRT" "$KEY" \
--provisioner="$PROVISIONER" \
--provisioner-password-file="$PROVISIONER_PASSWORD" \
--not-after="$VALID_TO" \
"${SAN_ARRAY[@]}" \
|| die "Failed to request certificate!"
echo -e "\n${BL}[Info]${GN} Inspect Certificate:${CL}\n"
step certificate inspect "${CRT}" || die "Failed to inspect certificate!"
echo -e "\n${BL}[Info]${GN} Public Key:${CL}\n"
cat "${CRT}"
echo -e "\n${BL}[Info]${GN} Private Key:${CL}\n"
cat "${KEY}"
echo
success "Finished."
}
set -eEuo pipefail
# shellcheck disable=SC2034
# shellcheck disable=SC2116
# shellcheck disable=SC2028
YW=$(echo "\033[33m")
# shellcheck disable=SC2116
# shellcheck disable=SC2028
BL=$(echo "\033[36m")
# shellcheck disable=SC2116
# shellcheck disable=SC2028
RD=$(echo "\033[01;31m")
# shellcheck disable=SC2034
CM='\xE2\x9C\x94\033'
# shellcheck disable=SC2116
# shellcheck disable=SC2028
GN=$(echo "\033[1;92m")
# shellcheck disable=SC2116
# shellcheck disable=SC2028
CL=$(echo "\033[m")
# Telemetry
# shellcheck disable=SC1090
source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/api.func) 2>/dev/null || true
declare -f init_tool_telemetry &>/dev/null && init_tool_telemetry "step-ca-admin" "step-ca"
header_info
mkdir --parents "$STEPHOME/db-copy/"
mkdir --parents "$STEPHOME/certs/ca/_archive/"
mkdir --parents "$STEPHOME/certs/ssh/_archive/"
mkdir --parents "$STEPHOME/certs/x509/_archive/"
PROVISIONER=$(jq '.authority.provisioners.[] | select(.type=="JWK") | .name' "$(step path)"/config/ca.json)
PROVISIONER="${PROVISIONER#\"}"
PROVISIONER="${PROVISIONER%\"}"
PROVISIONER_PASSWORD=$(step path)/encryption/provisioner.pwd
whiptail --backtitle "Proxmox VE Helper Scripts" --title "step-ca Admin" --yesno "This will maintain step-ca issued x509 and ssh Certificates. Proceed?" 10 58
MENU_ARRAY=("x509" "Maintain x509 Certificates." "ON")
MENU_ARRAY+=("ssh" "Maintain ssh Certificates." "OFF")
CERT_TYPE=$(whiptail --backtitle "Proxmox VE Helper Scripts" --title "step-ca Admin" --radiolist "\nSelect Certificate Type:" 16 48 6 "${MENU_ARRAY[@]}" 3>&1 1>&2 2>&3 | tr -d '"')
[[ -z ${CERT_TYPE} ]] && die "No Certificate Type selected!"
case ${CERT_TYPE} in
("x509")
x509_list
CERT_LIST=$(echo "$CERT_LIST" | awk 'NR>1 {print $1 " " $2 "|" $3 "|" $4 "|" $5}')
if [[ $CERT_LIST ]]; then
whiptail_menu "$CERT_LIST"
else
MENU_ARRAY=()
MSG_MAX_LENGTH=2
fi
MENU_ARRAY+=("" "Create a new Certificate" "OFF")
CERT_SERIAL_NUMBERS=$(whiptail --backtitle "Proxmox VE Helper Scripts" --title "Certificates on $(hostname)" --checklist "\nSelect Certificate(s) to maintain:\n" 16 $((MSG_MAX_LENGTH + 55)) 6 "${MENU_ARRAY[@]}" 3>&1 1>&2 2>&3 | tr -d '"')
[[ -z ${CERT_SERIAL_NUMBERS} ]] && x509_request
MENU_ARRAY=("Renew" "Renew x509 Certificates." "ON")
MENU_ARRAY+=("Revoke" "Revoke x509 Certificates." "OFF")
MENU_ARRAY+=("Inspect" "Inspect x509 Certificates." "OFF")
CERT_MAINTENANCE=$(whiptail --backtitle "Proxmox VE Helper Scripts" --title "step-ca Admin" --radiolist "\nSelect Maintenance Type:" 16 48 6 "${MENU_ARRAY[@]}" 3>&1 1>&2 2>&3 | tr -d '"')
case ${CERT_MAINTENANCE} in
("Renew")
x509_renew "${CERT_SERIAL_NUMBERS[@]}"
;;
("Revoke")
x509_revoke "${CERT_SERIAL_NUMBERS[@]}"
;;
("Inspect")
x509_inspect "${CERT_SERIAL_NUMBERS[@]}"
;;
*)
die "Unsupported CERT_MAINTENANCE Option!"
;;
esac
;;
("ssh")
die "Maintain ssh Certificates - To be implemented in future"
;;
*)
die "Unsupported CERT_TYPE Option!"
;;
esac
ADDON_EOF
chmod 700 "$STEPHOME/step-ca-admin.sh"
msg_ok "Installed step-ca Admin script"
motd_ssh
customize
cleanup_lxc

94
install/teable-install.sh Normal file
View File

@@ -0,0 +1,94 @@
#!/usr/bin/env bash
# Copyright (c) 2021-2026 community-scripts ORG
# Author: MickLesk (CanbiZ)
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
# Source: https://github.com/teableio/teable
source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
color
verb_ip6
catch_errors
setting_up_container
network_check
update_os
msg_info "Installing Dependencies"
$STD apt install -y \
build-essential \
python3 \
git
msg_ok "Installed Dependencies"
NODE_VERSION="24" NODE_MODULE="pnpm" setup_nodejs
PG_VERSION="16" setup_postgresql
PG_DB_NAME="teable" PG_DB_USER="teable" setup_postgresql_db
fetch_and_deploy_gh_release "teable" "teableio/teable" "tarball"
msg_info "Setting up Teable"
cd /opt/teable
TEABLE_VERSION=$(cat ~/.teable)
echo "NEXT_PUBLIC_BUILD_VERSION=\"${TEABLE_VERSION}\"" >>apps/nextjs-app/.env
export HUSKY=0
export NODE_OPTIONS="--max-old-space-size=8192"
$STD pnpm install --frozen-lockfile
$STD pnpm -F @teable/db-main-prisma prisma-generate --schema ./prisma/postgres/schema.prisma
msg_ok "Set up Teable"
msg_info "Building Teable"
NODE_ENV=production NEXT_BUILD_ENV_TYPECHECK=false \
$STD pnpm -r --filter '!playground' run build
msg_ok "Built Teable"
msg_info "Running Database Migrations"
PRISMA_DATABASE_URL="postgresql://teable:${PG_DB_PASS}@localhost:5432/teable?schema=public" \
$STD pnpm -F @teable/db-main-prisma prisma-migrate deploy --schema ./prisma/postgres/schema.prisma
msg_ok "Ran Database Migrations"
msg_info "Configuring Teable"
mkdir -p /opt/teable/.assets /opt/teable/.temporary
SECRET_KEY=$(openssl rand -base64 32)
cat <<EOF >/opt/teable/.env
PRISMA_DATABASE_URL=postgresql://teable:${PG_DB_PASS}@localhost:5432/teable?schema=public&statement_cache_size=1
PUBLIC_ORIGIN=http://${LOCAL_IP}:3000
SECRET_KEY=${SECRET_KEY}
PORT=3000
NODE_ENV=production
NEXT_TELEMETRY_DISABLED=1
BACKEND_CACHE_PROVIDER=sqlite
BACKEND_CACHE_SQLITE_URI=sqlite:///opt/teable/.assets/.cache.db
NEXTJS_DIR=apps/nextjs-app
EOF
ln -sf /opt/teable /app
rm -rf /opt/teable/static
if [ -d "/opt/teable/apps/nestjs-backend/static/static" ]; then
ln -sf /opt/teable/apps/nestjs-backend/static/static /opt/teable/static
else
ln -sf /opt/teable/apps/nestjs-backend/static /opt/teable/static
fi
msg_ok "Configured Teable"
msg_info "Creating Service"
cat <<EOF >/etc/systemd/system/teable.service
[Unit]
Description=Teable
After=network.target postgresql.service
[Service]
Type=simple
WorkingDirectory=/opt/teable
EnvironmentFile=/opt/teable/.env
ExecStart=/usr/bin/node apps/nestjs-backend/dist/index.js
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
EOF
systemctl enable -q --now teable
msg_ok "Created Service"
motd_ssh
customize
cleanup_lxc

View File

@@ -20,9 +20,7 @@ $STD apt install -y \
vlc
msg_ok "Installed Dependencies"
fetch_and_deploy_gh_release "threadfin" "threadfin/threadfin" "singlefile" "latest" "/opt/threadfin" "Threadfin_linux_amd64"
mv /root/.threadfin /root/.threadfin_version
mkdir -p /root/.threadfin
fetch_and_deploy_gh_release "threadfin-app" "threadfin/threadfin" "singlefile" "latest" "/opt/threadfin" "Threadfin_linux_amd64"
msg_info "Creating Service"
cat <<EOF >/etc/systemd/system/threadfin.service

View File

@@ -0,0 +1,294 @@
#!/usr/bin/env bash
# Copyright (c) 2021-2026 community-scripts ORG
# Author: MickLesk (CanbiZ)
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
# Source: https://github.com/tubearchivist/tubearchivist
source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
color
verb_ip6
catch_errors
setting_up_container
network_check
update_os
msg_info "Installing Dependencies"
$STD apt install -y \
build-essential \
git \
nginx \
redis-server \
atomicparsley \
python3-dev \
libldap2-dev \
libsasl2-dev \
libssl-dev \
sqlite3 \
ffmpeg
msg_ok "Installed Dependencies"
UV_PYTHON="3.13" setup_uv
NODE_VERSION="24" setup_nodejs
fetch_and_deploy_gh_release "deno" "denoland/deno" "prebuild" "latest" "/usr/local/bin" "deno-x86_64-unknown-linux-gnu.zip"
msg_info "Installing ElasticSearch"
setup_deb822_repo \
"elastic-8.x" \
"https://artifacts.elastic.co/GPG-KEY-elasticsearch" \
"https://artifacts.elastic.co/packages/8.x/apt" \
"stable" \
"main"
ES_JAVA_OPTS="-Xms1g -Xmx1g" $STD apt install -y elasticsearch
msg_ok "Installed ElasticSearch"
msg_info "Configuring ElasticSearch"
cat <<EOF >/etc/elasticsearch/elasticsearch.yml
cluster.name: tubearchivist
path.data: /var/lib/elasticsearch
path.logs: /var/log/elasticsearch
path.repo: ["/var/lib/elasticsearch/snapshot"]
network.host: 127.0.0.1
xpack.security.enabled: false
xpack.security.transport.ssl.enabled: false
xpack.security.http.ssl.enabled: false
EOF
mkdir -p /var/lib/elasticsearch/snapshot
chown -R elasticsearch:elasticsearch /var/lib/elasticsearch/snapshot
cat <<EOF >/etc/elasticsearch/jvm.options.d/heap.options
-Xms1g
-Xmx1g
EOF
sysctl -w vm.max_map_count=262144 2>/dev/null || true
cat <<EOF >/etc/sysctl.d/99-elasticsearch.conf
vm.max_map_count=262144
EOF
systemctl enable -q --now elasticsearch
msg_ok "Configured ElasticSearch"
fetch_and_deploy_gh_release "tubearchivist" "tubearchivist/tubearchivist" "tarball"
msg_info "Building Frontend"
cd /opt/tubearchivist/frontend
$STD npm install
$STD npm run build:deploy
mkdir -p /opt/tubearchivist/backend/static
cp -r /opt/tubearchivist/frontend/dist/* /opt/tubearchivist/backend/static/
msg_ok "Built Frontend"
msg_info "Setting up Tube Archivist"
cp /opt/tubearchivist/docker_assets/backend_start.py /opt/tubearchivist/backend/
$STD uv venv /opt/tubearchivist/.venv
$STD uv pip install --python /opt/tubearchivist/.venv/bin/python -r /opt/tubearchivist/backend/requirements.txt
if [[ -f /opt/tubearchivist/backend/requirements.plugins.txt ]]; then
mkdir -p /opt/yt_plugins/bgutil
$STD uv pip install --python /opt/tubearchivist/.venv/bin/python --target /opt/yt_plugins/bgutil -r /opt/tubearchivist/backend/requirements.plugins.txt
fi
TA_PASSWORD=$(openssl rand -base64 18 | tr -dc 'a-zA-Z0-9' | head -c13)
ES_PASSWORD=$(openssl rand -base64 18 | tr -dc 'a-zA-Z0-9' | head -c13)
mkdir -p /opt/tubearchivist/{cache,media}
ln -sf /opt/tubearchivist/cache /cache
ln -sf /opt/tubearchivist/media /youtube
cat <<EOF >/opt/tubearchivist/.env
TA_HOST=http://${LOCAL_IP}:8000
TA_USERNAME=admin
TA_PASSWORD=${TA_PASSWORD}
TA_BACKEND_PORT=8080
TA_APP_DIR=/opt/tubearchivist/backend
TA_CACHE_DIR=/cache
TA_MEDIA_DIR=/youtube
ES_SNAPSHOT_DIR=/var/lib/elasticsearch/snapshot
ELASTIC_PASSWORD=${ES_PASSWORD}
REDIS_CON=redis://localhost:6379
ES_URL=http://localhost:9200
TZ=UTC
PYTHONUNBUFFERED=1
YTDLP_PLUGIN_DIRS=/opt/yt_plugins
EOF
{
echo "Tube Archivist Credentials"
echo "=========================="
echo "Username: admin"
echo "Password: ${TA_PASSWORD}"
echo "Elasticsearch Password: ${ES_PASSWORD}"
} >~/tubearchivist.creds
systemctl enable -q --now redis-server
msg_ok "Set up Tube Archivist"
msg_info "Configuring Nginx"
sed -i 's/^user www-data;$/user root;/' /etc/nginx/nginx.conf
cat <<'EOF' >/etc/nginx/sites-available/default
server {
listen 8000;
location = /_auth {
internal;
proxy_pass http://localhost:8080/api/ping/;
proxy_pass_request_body off;
proxy_set_header Content-Length "";
proxy_set_header Host $http_host;
proxy_set_header Cookie $http_cookie;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
}
location /cache/videos/ {
auth_request /_auth;
alias /cache/videos/;
}
location /cache/channels/ {
auth_request /_auth;
alias /cache/channels/;
}
location /cache/playlists/ {
auth_request /_auth;
alias /cache/playlists/;
}
location /media/ {
auth_request /_auth;
alias /youtube/;
types {
text/vtt vtt;
}
}
location /youtube/ {
auth_request /_auth;
alias /youtube/;
types {
video/mp4 mp4;
}
}
location /api {
include proxy_params;
proxy_pass http://localhost:8080;
}
location /admin {
include proxy_params;
proxy_pass http://localhost:8080;
}
location /static/ {
alias /opt/tubearchivist/backend/staticfiles/;
}
root /opt/tubearchivist/backend/static;
index index.html;
location ~* ^/(?!static/|cache/).*\.(?:css|js|png|jpg|jpeg|gif|ico|svg|woff2?)$ {
try_files $uri $uri/ /index.html =404;
}
location = /index.html {
add_header Cache-Control "no-store, no-cache, must-revalidate";
add_header Pragma "no-cache";
expires 0;
}
location / {
add_header Cache-Control "no-store, no-cache, must-revalidate";
add_header Pragma "no-cache";
expires 0;
try_files $uri $uri/ /index.html =404;
}
}
EOF
systemctl enable -q nginx
systemctl restart nginx
msg_ok "Configured Nginx"
msg_info "Creating Services"
cat <<'RUNEOF' >/opt/tubearchivist/backend/run.sh
#!/bin/bash
set -e
cd /opt/tubearchivist/backend
set -a
source /opt/tubearchivist/.env
set +a
PYTHON=/opt/tubearchivist/.venv/bin/python
echo "Waiting for ElasticSearch..."
for i in $(seq 1 30); do
if curl -sf http://localhost:9200/_cluster/health >/dev/null 2>&1; then
break
fi
sleep 2
done
$PYTHON manage.py migrate
$PYTHON manage.py collectstatic --noinput -c
$PYTHON manage.py ta_envcheck
$PYTHON manage.py ta_connection
$PYTHON manage.py ta_startup
exec $PYTHON backend_start.py
RUNEOF
chmod +x /opt/tubearchivist/backend/run.sh
ln -sf /opt/tubearchivist/.env /opt/tubearchivist/backend/.env
cat <<EOF >/etc/systemd/system/tubearchivist.service
[Unit]
Description=Tube Archivist Backend
After=network.target elasticsearch.service redis-server.service
[Service]
Type=simple
User=root
WorkingDirectory=/opt/tubearchivist/backend
EnvironmentFile=/opt/tubearchivist/.env
Environment=PATH=/opt/tubearchivist/.venv/bin:/usr/local/bin:/usr/bin:/bin
ExecStart=/opt/tubearchivist/backend/run.sh
Restart=on-failure
RestartSec=10
[Install]
WantedBy=multi-user.target
EOF
cat <<EOF >/etc/systemd/system/tubearchivist-celery.service
[Unit]
Description=Tube Archivist Celery Worker
After=tubearchivist.service redis-server.service elasticsearch.service
[Service]
Type=simple
User=root
WorkingDirectory=/opt/tubearchivist/backend
EnvironmentFile=/opt/tubearchivist/.env
Environment=PATH=/opt/tubearchivist/.venv/bin:/usr/local/bin:/usr/bin:/bin
ExecStart=/opt/tubearchivist/.venv/bin/celery -A task worker --loglevel=error --concurrency=4 --max-tasks-per-child=5 --max-memory-per-child=150000
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
EOF
cat <<EOF >/etc/systemd/system/tubearchivist-beat.service
[Unit]
Description=Tube Archivist Celery Beat
After=tubearchivist.service redis-server.service
[Service]
Type=simple
User=root
WorkingDirectory=/opt/tubearchivist/backend
EnvironmentFile=/opt/tubearchivist/.env
Environment=PATH=/opt/tubearchivist/.venv/bin:/usr/local/bin:/usr/bin:/bin
ExecStartPre=/bin/bash -c 'for i in \$(seq 1 60); do sqlite3 /cache/db.sqlite3 "SELECT 1 FROM django_celery_beat_crontabschedule LIMIT 1" 2>/dev/null && exit 0; sleep 2; done; exit 1'
ExecStart=/opt/tubearchivist/.venv/bin/celery -A task beat --loglevel=error --scheduler django_celery_beat.schedulers:DatabaseScheduler
Restart=always
RestartSec=5
RuntimeMaxSec=3600
[Install]
WantedBy=multi-user.target
EOF
systemctl enable -q --now tubearchivist tubearchivist-celery tubearchivist-beat
msg_ok "Created Services"
motd_ssh
customize
cleanup_lxc

View File

@@ -513,7 +513,7 @@ validate_bridge() {
[[ -z "$bridge" ]] && return 1
# Check if bridge interface exists
if ! ip link show "$bridge" &>/dev/null; then
if ! ip link show dev "$bridge" &>/dev/null; then
return 1
fi
@@ -1062,7 +1062,6 @@ load_vars_file() {
var_gateway var_hostname var_ipv6_method var_mac var_mknod var_mount_fs var_mtu
var_net var_nesting var_ns var_os var_protection var_pw var_ram var_tags var_timezone var_tun var_unprivileged
var_verbose var_version var_vlan var_ssh var_ssh_authorized_key var_container_storage var_template_storage var_searchdomain
var_post_install
)
# Whitelist check helper
@@ -1280,7 +1279,6 @@ default_var_settings() {
var_gateway var_hostname var_ipv6_method var_mac var_mknod var_mount_fs var_mtu
var_net var_nesting var_ns var_os var_protection var_pw var_ram var_tags var_timezone var_tun var_unprivileged
var_verbose var_version var_vlan var_ssh var_ssh_authorized_key var_container_storage var_template_storage
var_post_install
)
# Snapshot: environment variables (highest precedence)
@@ -1376,11 +1374,6 @@ var_verbose=no
# GitHub Personal Access Token (optional avoids API rate limits during installs)
# Create at https://github.com/settings/tokens read-only public access is sufficient
# var_github_token=ghp_your_token_here
# Optional post-install script (host-side path to a *.sh on the Proxmox host)
# Runs ON THE HOST after the container is fully provisioned.
# Available env vars: APP, NSAPP, CTID, IP, HN, STORAGE, BRG
# var_post_install=/opt/post-install/myhook.sh
EOF
# Now choose storages (always prompt unless just one exists)
@@ -1459,7 +1452,6 @@ if ! declare -p VAR_WHITELIST >/dev/null 2>&1; then
var_gateway var_hostname var_ipv6_method var_mac var_mknod var_mount_fs var_mtu
var_net var_nesting var_ns var_os var_protection var_pw var_ram var_tags var_timezone var_tun var_unprivileged
var_verbose var_version var_vlan var_ssh var_ssh_authorized_key var_container_storage var_template_storage var_searchdomain
var_post_install
)
fi
@@ -1672,7 +1664,6 @@ _build_current_app_vars_tmp() {
[ -n "$_tpl_storage" ] && echo "var_template_storage=$(_sanitize_value "$_tpl_storage")"
[ -n "$_ct_storage" ] && echo "var_container_storage=$(_sanitize_value "$_ct_storage")"
[ -n "${var_post_install:-}" ] && echo "var_post_install=$(_sanitize_value "${var_post_install}")"
} >"$tmpf"
echo "$tmpf"
@@ -1817,7 +1808,7 @@ advanced_settings() {
TAGS="community-script${var_tags:+;${var_tags}}"
fi
local STEP=1
local MAX_STEP=29
local MAX_STEP=28
# Store values for back navigation - inherit from var_* app defaults
local _ct_type="${var_unprivileged:-1}"
@@ -1851,7 +1842,6 @@ advanced_settings() {
local _enable_mknod="${var_mknod:-0}"
local _mount_fs="${var_mount_fs:-}"
local _protect_ct="${var_protection:-no}"
local _post_install="${var_post_install:-}"
# Detect host timezone for default (if not set via var_timezone)
local _host_timezone=""
@@ -2709,61 +2699,9 @@ advanced_settings() {
;;
# ═══════════════════════════════════════════════════════════════════════════
# STEP 28: Optional host-side post-install hook (path on the Proxmox HOST)
# STEP 28: Verbose Mode & Confirmation
# ═══════════════════════════════════════════════════════════════════════════
28)
local _hook_prompt="Optional: absolute path to a *.sh file ON THE PROXMOX HOST.
It runs as root on the HOST (NOT in the LXC) after the container
is fully provisioned and started.
Available env vars: APP, NSAPP, CTID, IP, HN, STORAGE, BRG.
Leave empty to skip."
while true; do
if result=$(whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \
--title "POST-INSTALL HOOK (HOST)" \
--ok-button "Next" --cancel-button "Back" \
--inputbox "$_hook_prompt" 16 70 "${_post_install}" \
3>&1 1>&2 2>&3); then
# Normalize: strip surrounding whitespace
result="$(printf '%s' "$result" | sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//')"
if [[ -z "$result" ]]; then
_post_install=""
((STEP++))
break
fi
# Reject obvious shell-meta sneaking through
if [[ "$result" == *';'* || "$result" == *'$('* || "$result" == *'`'* || "$result" == *'&&'* || "$result" == *'||'* ]]; then
whiptail --backtitle "Proxmox VE Helper Scripts" --title "INVALID PATH" \
--msgbox "Path contains shell metacharacters. Please provide a plain absolute file path." 10 70
continue
fi
if [[ "$result" != /* ]]; then
whiptail --backtitle "Proxmox VE Helper Scripts" --title "INVALID PATH" \
--msgbox "Path must be absolute (start with /).\n\nGot: $result" 10 70
continue
fi
if [[ ! -f "$result" ]]; then
if ! whiptail --backtitle "Proxmox VE Helper Scripts" --title "FILE NOT FOUND" \
--yesno "File does not exist on host:\n\n$result\n\nKeep this path anyway?" 12 70; then
continue
fi
fi
_post_install="$result"
((STEP++))
break
else
((STEP--))
break
fi
done
;;
# ═══════════════════════════════════════════════════════════════════════════
# STEP 29: Verbose Mode & Confirmation
# ═══════════════════════════════════════════════════════════════════════════
29)
local verbose_default_flag="--defaultno"
[[ "$_verbose" == "yes" ]] && verbose_default_flag=""
@@ -2792,11 +2730,6 @@ Leave empty to skip."
local apt_display="${_apt_cacher:-no}"
[[ "$_apt_cacher" == "yes" && -n "$_apt_cacher_ip" ]] && apt_display="$_apt_cacher_ip"
local post_install_display="${_post_install:-(none)}"
local post_install_warn=""
[[ -n "$_post_install" ]] && post_install_warn="
⚠ Hook runs as root on Proxmox HOST (not in LXC)"
local summary="Container Type: $ct_type_desc
Container ID: $_ct_id
Hostname: $_hostname
@@ -2820,8 +2753,7 @@ Features:
Advanced:
Timezone: $tz_display
APT Cacher: $apt_display
Verbose: $_verbose
Post-Install Script: ${post_install_display}${post_install_warn}"
Verbose: $_verbose"
if whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \
--title "CONFIRM SETTINGS" \
@@ -2864,7 +2796,6 @@ Advanced:
APT_CACHER="$_apt_cacher"
APT_CACHER_IP="$_apt_cacher_ip"
VERBOSE="$_verbose"
var_post_install="$_post_install"
# Update var_* based on user choice (for functions that check these)
var_gpu="$_enable_gpu"
@@ -3573,6 +3504,52 @@ msg_menu() {
return 0
}
# ------------------------------------------------------------------------------
# run_addon_updates()
#
# - Scans /usr/local/bin/update_* for addon update scripts installed alongside
# the main application (e.g. by tools/addon/*.sh)
# - For each found addon, prompts the user (60s timeout, default no) whether
# it should be updated as well
# - Skipped entirely when PHS_SILENT=1 to keep unattended updates predictable
# ------------------------------------------------------------------------------
run_addon_updates() {
shopt -s nullglob
local addons=(/usr/local/bin/update_*)
shopt -u nullglob
((${#addons[@]} == 0)) && return 0
if [[ "${PHS_SILENT:-0}" == "1" ]]; then
msg_info "Detected ${#addons[@]} addon update script(s) - skipping (PHS_SILENT)"
return 0
fi
echo
echo -e "${INFO}${YW} Detected installed addon update script(s):${CL}"
local a name
for a in "${addons[@]}"; do
echo -e "${TAB}- ${a##*/update_}"
done
echo
local ans
for a in "${addons[@]}"; do
name="${a##*/update_}"
printf 'Do you also want to update addon "%s"? (y/N) [60s]: ' "$name"
ans=""
if read -r -t 60 ans; then :; else echo; fi
case "${ans,,}" in
y | yes)
bash "$a" || msg_warn "Addon update for $name failed (rc=$?)"
;;
*)
msg_info "Skipped addon: $name"
;;
esac
done
}
# ------------------------------------------------------------------------------
# start()
#
@@ -3592,6 +3569,7 @@ start() {
ensure_profile_loaded
get_lxc_ip
update_script
run_addon_updates
update_motd_ip
cleanup_lxc
else
@@ -3620,6 +3598,7 @@ start() {
ensure_profile_loaded
get_lxc_ip
update_script
run_addon_updates
update_motd_ip
cleanup_lxc
fi
@@ -6374,40 +6353,6 @@ EOF
systemctl start ping-instances.service
fi
# Optional host-side post-install hook
# Path comes from var_post_install (default.vars / app.vars / advanced settings).
# Runs ON THE PROXMOX HOST after the container is up and configured.
# Exposed env vars: APP, NSAPP, CTID, IP, HN, STORAGE, BRG.
# Output (stdout/stderr) is captured to /var/log/community-scripts/post-install-<CTID>.log
if [[ -n "${var_post_install:-}" ]]; then
local _hook_log_dir="/var/log/community-scripts"
local _hook_log="${_hook_log_dir}/post-install-${CTID}.log"
mkdir -p "$_hook_log_dir" 2>/dev/null || true
if [[ ! -f "${var_post_install}" ]]; then
msg_error "Post-install hook not found on host: ${var_post_install}"
whiptail --backtitle "Proxmox VE Helper Scripts" \
--title "POST-INSTALL HOOK FAILED" \
--msgbox "The configured post-install hook was not found on the Proxmox host:\n\n${var_post_install}\n\nThe LXC was created successfully, but the hook did NOT run." 14 72 || true
else
msg_info "Running post-install hook: ${var_post_install}"
local _hook_rc=0
APP="$APP" NSAPP="${NSAPP:-}" CTID="$CTID" IP="$IP" HN="${HN:-}" \
STORAGE="${STORAGE:-}" BRG="${BRG:-}" \
bash "${var_post_install}" >"${_hook_log}" 2>&1 || _hook_rc=$?
if [[ $_hook_rc -eq 0 ]]; then
msg_ok "Post-install hook completed (log: ${_hook_log})"
else
msg_error "Post-install hook failed (rc=${_hook_rc}) see ${_hook_log}"
local _hook_tail=""
_hook_tail="$(tail -n 15 "${_hook_log}" 2>/dev/null || true)"
whiptail --backtitle "Proxmox VE Helper Scripts" \
--title "POST-INSTALL HOOK FAILED" \
--msgbox "Hook exited with code ${_hook_rc}.\n\nScript: ${var_post_install}\nLog: ${_hook_log}\n\n--- Last log lines ---\n${_hook_tail}\n\nThe LXC itself was created successfully." 22 78 || true
fi
fi
fi
INSTALL_COMPLETE=true
post_update_to_api "done" "none"
}

View File

@@ -57,7 +57,9 @@ start_routines() {
yes)
msg_info "Switching to Debian 13 (Trixie) Sources"
rm -f /etc/apt/sources.list.d/*.list
sed -i '/proxmox/d;/bookworm/d' /etc/apt/sources.list || true
if [ -f /etc/apt/sources.list ]; then
sed -i '/proxmox/d;/bookworm/d' /etc/apt/sources.list
fi
cat >/etc/apt/sources.list.d/debian.sources <<EOF
Types: deb
URIs: http://deb.debian.org/debian

View File

@@ -1,436 +0,0 @@
#!/usr/bin/env bash
# ============================================================================
# Community-Scripts ProxmoxVE — Post-Install Hook: Example Library
# ----------------------------------------------------------------------------
# This file is NOT meant to be executed as-is.
# It is a collection of complete, copy-pasteable example hooks for the
# optional `var_post_install` feature in build.func.
#
# HOW IT WORKS
# ------------
# In the ct/*.sh CT scripts (or via Advanced Settings → Step 28) you can
# point `var_post_install` to an absolute path on the Proxmox HOST, e.g.:
#
# # in /root/.community-scripts/default.vars
# var_post_install=/opt/community-scripts/hooks/notify.sh
#
# # OR per-app, in app.vars
# var_post_install=/opt/community-scripts/hooks/vaultwarden-postprovision.sh
#
# # OR interactively in the Advanced Settings whiptail (Step 28).
#
# The hook runs ON THE PROXMOX HOST (NOT inside the LXC) as root,
# AFTER the container is fully provisioned, started and the description
# is set. stdout/stderr is captured to:
#
# /var/log/community-scripts/post-install-<CTID>.log
#
# AVAILABLE ENV VARIABLES
# -----------------------
# APP - Pretty name (e.g. "Vaultwarden")
# NSAPP - Slug / lowercase (e.g. "vaultwarden")
# CTID - Numeric container ID (e.g. "103")
# IP - IPv4 address of the LXC (e.g. "192.168.1.50")
# HN - Hostname (e.g. "vaultwarden")
# STORAGE - Storage where the rootfs lives (e.g. "local-lvm")
# BRG - Bridge (e.g. "vmbr0")
#
# GENERAL TIPS
# ------------
# - Use `set -euo pipefail` so failures actually surface.
# - Use `|| true` on best-effort steps you do not want to abort the hook.
# - The file just needs to be a valid script. `+x` is optional — it is
# invoked via `bash <path>`. Shebang is honored only if you call it
# yourself; otherwise the shebang line is purely cosmetic.
# - If the hook exits non-zero, the user gets a whiptail popup with the
# last 15 log lines. The LXC creation itself is NOT rolled back.
# - Keep hooks idempotent — they may be re-run if you recreate a CT.
#
# HOW TO USE THIS FILE
# --------------------
# 1. Copy ONE example block (between the BEGIN/END markers) into a new
# file on the Proxmox host, e.g. /opt/community-scripts/hooks/notify.sh
# 2. chmod +x /opt/community-scripts/hooks/notify.sh (optional)
# 3. Set var_post_install in default.vars / app.vars or pick the path
# in Advanced Settings.
# ============================================================================
# ============================================================================
# ▼▼▼ EXAMPLE 1 — BEGIN ▼▼▼
# ----------------------------------------------------------------------------
# Name : minimal-logger.sh
# Purpose : Append every newly created LXC to a single CSV-ish log.
# Difficulty : ⭐ Beginner
# Side effects: Writes to /var/log/community-scripts/created-lxcs.log
# Use case : You just want a paper trail of "what got created when".
# ============================================================================
#!/usr/bin/env bash
set -euo pipefail
LOG_DIR="/var/log/community-scripts"
LOG_FILE="${LOG_DIR}/created-lxcs.log"
mkdir -p "$LOG_DIR"
# Header on first use
if [[ ! -s "$LOG_FILE" ]]; then
echo "timestamp;ctid;app;hostname;ip;bridge;storage" >"$LOG_FILE"
fi
printf '%s;%s;%s;%s;%s;%s;%s\n' \
"$(date -Iseconds)" \
"${CTID}" \
"${APP}" \
"${HN}" \
"${IP}" \
"${BRG}" \
"${STORAGE}" \
>>"$LOG_FILE"
echo "Logged ${APP} (CTID=${CTID}) to ${LOG_FILE}"
# ▲▲▲ EXAMPLE 1 — END ▲▲▲
# ============================================================================
# ▼▼▼ EXAMPLE 2 — BEGIN ▼▼▼
# ----------------------------------------------------------------------------
# Name : discord-gotify-notify.sh
# Purpose : Send a rich Discord embed AND a Gotify push notification
# whenever a new LXC is provisioned.
# Difficulty : ⭐⭐ Intermediate
# Requires : curl on the host (default), reachable webhook URLs.
# Side effects: Outbound HTTPS to Discord + your Gotify server.
# ============================================================================
#!/usr/bin/env bash
set -euo pipefail
# --- CONFIG (edit me) -------------------------------------------------------
DISCORD_WEBHOOK="https://discord.com/api/webhooks/XXXXXXXX/YYYYYYYY"
GOTIFY_URL="https://gotify.example.com"
GOTIFY_TOKEN="AbCdEfGhIjKlMnO"
GOTIFY_PRIORITY=5
# ----------------------------------------------------------------------------
# Resolve the Proxmox node's hostname for context
NODE="$(hostname -s)"
TS="$(date -Iseconds)"
# --- Discord embed ----------------------------------------------------------
read -r -d '' DISCORD_PAYLOAD <<JSON || true
{
"username": "Proxmox - ${NODE}",
"avatar_url": "https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/images/logo-81x112.png",
"embeds": [{
"title": "✅ ${APP} LXC created",
"description": "A new community-script LXC has been provisioned on **${NODE}**.",
"color": 3066993,
"timestamp": "${TS}",
"fields": [
{"name": "CTID", "value": "${CTID}", "inline": true},
{"name": "Hostname", "value": "${HN}", "inline": true},
{"name": "App", "value": "${APP}", "inline": true},
{"name": "IP", "value": "${IP}", "inline": true},
{"name": "Bridge", "value": "${BRG}", "inline": true},
{"name": "Storage", "value": "${STORAGE}", "inline": true}
],
"footer": {"text": "community-scripts.org"}
}]
}
JSON
curl -fsS --max-time 10 \
-H "Content-Type: application/json" \
-X POST "$DISCORD_WEBHOOK" \
--data "$DISCORD_PAYLOAD" \
>/dev/null ||
echo "WARN: Discord webhook failed (non-fatal)"
# --- Gotify push ------------------------------------------------------------
curl -fsS --max-time 10 \
-H "X-Gotify-Key: ${GOTIFY_TOKEN}" \
-F "title=Proxmox: ${APP} LXC created" \
-F "message=CTID=${CTID} IP=${IP} HN=${HN} on ${NODE}" \
-F "priority=${GOTIFY_PRIORITY}" \
"${GOTIFY_URL}/message" \
>/dev/null ||
echo "WARN: Gotify push failed (non-fatal)"
echo "Notifications dispatched for CTID=${CTID}"
# ▲▲▲ EXAMPLE 2 — END ▲▲▲
# ============================================================================
# ▼▼▼ EXAMPLE 3 — BEGIN ▼▼▼
# ----------------------------------------------------------------------------
# Name : auto-pool-tags-backup.sh
# Purpose : Add the new LXC to a Proxmox pool, append cluster-wide tags,
# register a DNS record in pi-hole, and trigger an immediate
# snapshot backup to a configured storage.
# Difficulty : ⭐⭐⭐ Advanced
# Requires : pvesh, pct, vzdump (host-side; available by default on PVE),
# a reachable pi-hole admin API.
# ============================================================================
#!/usr/bin/env bash
set -euo pipefail
# --- CONFIG (edit me) -------------------------------------------------------
TARGET_POOL="auto-lxc"
EXTRA_TAGS=("auto-provisioned" "${NSAPP}") # community-script tag is set by build.func
BACKUP_STORAGE="pbs-main" # set to "" to skip initial backup
PIHOLE_HOST="192.168.1.5"
PIHOLE_PASSWORD="changeme" # web-UI password
DNS_DOMAIN="lan" # FQDN will be ${HN}.${DNS_DOMAIN}
# ----------------------------------------------------------------------------
# 1) Ensure the pool exists, then attach the CT
if ! pvesh get "/pools/${TARGET_POOL}" >/dev/null 2>&1; then
echo "Creating pool: ${TARGET_POOL}"
pvesh create /pools --poolid "${TARGET_POOL}" --comment "Auto-created by post-install hook" || true
fi
echo "Adding CTID=${CTID} to pool=${TARGET_POOL}"
pvesh set "/pools/${TARGET_POOL}" --vms "${CTID}" || echo "WARN: pool attach failed (non-fatal)"
# 2) Merge new tags with existing ones (preserve community-script etc.)
CURRENT_TAGS="$(pct config "${CTID}" | awk -F': ' '/^tags:/{print $2}')"
declare -A TAG_SET
IFS=';' read -r -a CUR_ARR <<<"${CURRENT_TAGS:-}"
for t in "${CUR_ARR[@]}"; do [[ -n "$t" ]] && TAG_SET["$t"]=1; done
for t in "${EXTRA_TAGS[@]}"; do [[ -n "$t" ]] && TAG_SET["$t"]=1; done
NEW_TAGS="$(
IFS=';'
echo "${!TAG_SET[*]}"
)"
echo "Setting tags: ${NEW_TAGS}"
pct set "${CTID}" --tags "${NEW_TAGS}" || echo "WARN: tag update failed (non-fatal)"
# 3) Register DNS in pi-hole (custom DNS record)
FQDN="${HN}.${DNS_DOMAIN}"
echo "Registering DNS: ${FQDN}${IP} on pi-hole ${PIHOLE_HOST}"
SID="$(curl -fsS --max-time 5 \
-d "pw=${PIHOLE_PASSWORD}" \
"http://${PIHOLE_HOST}/api/auth" 2>/dev/null |
sed -nE 's/.*"sid":"([^"]+)".*/\1/p' || true)"
if [[ -n "${SID}" ]]; then
curl -fsS --max-time 5 -X PUT \
-H "Content-Type: application/json" \
-H "sid: ${SID}" \
-d "{\"hosts\":[\"${IP} ${FQDN}\"]}" \
"http://${PIHOLE_HOST}/api/config/dns/hosts" >/dev/null ||
echo "WARN: pi-hole DNS update failed (non-fatal)"
curl -fsS --max-time 5 -X DELETE -H "sid: ${SID}" "http://${PIHOLE_HOST}/api/auth" >/dev/null || true
else
echo "WARN: could not obtain pi-hole session (skipping DNS)"
fi
# 4) Initial backup (best-effort, can take a few minutes)
if [[ -n "${BACKUP_STORAGE}" ]]; then
if pvesh get "/storage/${BACKUP_STORAGE}" >/dev/null 2>&1; then
echo "Triggering initial backup of CTID=${CTID} to ${BACKUP_STORAGE}"
vzdump "${CTID}" \
--storage "${BACKUP_STORAGE}" \
--mode snapshot \
--compress zstd \
--notes-template "Initial backup of ${APP} (CTID=${CTID})" \
--notification-mode auto ||
echo "WARN: initial backup failed (non-fatal)"
else
echo "Backup storage '${BACKUP_STORAGE}' not found — skipping."
fi
fi
echo "Post-provision routine complete for ${APP} (CTID=${CTID})"
# ▲▲▲ EXAMPLE 3 — END ▲▲▲
# ============================================================================
# ▼▼▼ EXAMPLE 4 — BEGIN ▼▼▼
# ----------------------------------------------------------------------------
# Name : inject-ssh-and-monitoring.sh
# Purpose : Push the host's admin SSH key into the new LXC, install the
# Beszel monitoring agent inside the container, and register
# an Uptime-Kuma HTTP push monitor for the LXC's IP.
# Difficulty : ⭐⭐⭐ Advanced
# Requires : pct (host), curl (inside LXC), reachable Beszel hub +
# Uptime-Kuma push URL.
# ============================================================================
#!/usr/bin/env bash
set -euo pipefail
# --- CONFIG (edit me) -------------------------------------------------------
ADMIN_KEY="/root/.ssh/admin_ed25519.pub"
BESZEL_HUB_URL="http://192.168.1.10:8090"
BESZEL_AGENT_KEY="ssh-ed25519 AAAA... beszel@hub" # public key of the hub
UPTIME_KUMA_PUSH_BASE="http://uptime.lan/api/push/abc123"
# ----------------------------------------------------------------------------
# 1) Inject the admin SSH key
if [[ -f "${ADMIN_KEY}" ]]; then
echo "Pushing admin SSH key into CTID=${CTID}"
pct exec "${CTID}" -- mkdir -p /root/.ssh
pct exec "${CTID}" -- chmod 700 /root/.ssh
pct push "${CTID}" "${ADMIN_KEY}" /root/.ssh/authorized_keys
pct exec "${CTID}" -- chmod 600 /root/.ssh/authorized_keys
else
echo "WARN: ${ADMIN_KEY} not found on host — skipping SSH key injection"
fi
# 2) Wait for outbound networking inside the CT (max 30 s)
echo "Waiting for network inside CTID=${CTID}"
for _ in $(seq 1 30); do
if pct exec "${CTID}" -- bash -c 'getent hosts deb.debian.org >/dev/null 2>&1'; then
break
fi
sleep 1
done
# 3) Install Beszel agent inside the LXC
echo "Installing Beszel agent inside CTID=${CTID}"
pct exec "${CTID}" -- bash -s <<'AGENT_INSTALL' || echo "WARN: Beszel install failed"
set -euo pipefail
ARCH="$(uname -m)"
case "$ARCH" in
x86_64) ARCH_TAG=amd64 ;;
aarch64) ARCH_TAG=arm64 ;;
*) echo "Unsupported arch: $ARCH"; exit 1 ;;
esac
TMP=$(mktemp -d)
cd "$TMP"
curl -fsSL "https://github.com/henrygd/beszel/releases/latest/download/beszel-agent_linux_${ARCH_TAG}.tar.gz" \
| tar -xz
install -m 0755 beszel-agent /usr/local/bin/beszel-agent
cat >/etc/systemd/system/beszel-agent.service <<UNIT
[Unit]
Description=Beszel Agent
After=network-online.target
Wants=network-online.target
[Service]
Environment="PORT=45876"
Environment="KEY=__KEY_PLACEHOLDER__"
ExecStart=/usr/local/bin/beszel-agent
Restart=always
[Install]
WantedBy=multi-user.target
UNIT
AGENT_INSTALL
# Inject the configured public key into the unit file (avoids quoting hell)
pct exec "${CTID}" -- sed -i "s|__KEY_PLACEHOLDER__|${BESZEL_AGENT_KEY}|" \
/etc/systemd/system/beszel-agent.service
pct exec "${CTID}" -- systemctl daemon-reload
pct exec "${CTID}" -- systemctl enable --now beszel-agent.service ||
echo "WARN: could not start beszel-agent"
# 4) Register an Uptime-Kuma push monitor (host-side, just sends one ping)
echo "Pinging Uptime-Kuma push monitor for ${HN}"
curl -fsS --max-time 5 \
--get \
--data-urlencode "status=up" \
--data-urlencode "msg=created by community-scripts" \
--data-urlencode "ping=1" \
--data-urlencode "label=${HN}" \
"${UPTIME_KUMA_PUSH_BASE}" >/dev/null ||
echo "WARN: Uptime-Kuma push failed (non-fatal)"
echo "Provisioned monitoring for ${APP} (CTID=${CTID}, IP=${IP})"
# ▲▲▲ EXAMPLE 4 — END ▲▲▲
# ============================================================================
# ▼▼▼ EXAMPLE 5 — BEGIN ▼▼▼
# ----------------------------------------------------------------------------
# Name : per-app-router.sh
# Purpose : Single dispatcher hook that runs different actions
# depending on the app being installed (NSAPP). Useful when
# you want ONE hook for the whole cluster but distinct
# behavior for, e.g., databases vs media services.
# Difficulty : ⭐⭐⭐ Advanced
# ============================================================================
#!/usr/bin/env bash
set -euo pipefail
# --- CONFIG (edit me) -------------------------------------------------------
DEFAULT_DNS_SUFFIX="lan"
PROM_FILE_SD_DIR="/etc/prometheus/file_sd" # on the host that runs Prometheus
# ----------------------------------------------------------------------------
log() { printf '[%s] %s\n' "$(date +%H:%M:%S)" "$*"; }
# ---------- shared helpers --------------------------------------------------
register_prometheus_target() {
local job="$1" port="$2"
local file="${PROM_FILE_SD_DIR}/${job}.json"
mkdir -p "${PROM_FILE_SD_DIR}"
if [[ ! -f "$file" ]]; then echo "[]" >"$file"; fi
python3 - "$file" "${IP}:${port}" "${HN}" "${NSAPP}" <<'PY'
import json, sys
path, target, hn, app = sys.argv[1:5]
data = json.load(open(path))
# Avoid duplicates
data = [b for b in data if target not in b.get("targets", [])]
data.append({"targets": [target], "labels": {"hostname": hn, "app": app}})
json.dump(data, open(path, "w"), indent=2)
PY
log "Registered Prometheus target ${IP}:${port} in ${file}"
}
set_ct_options() {
local cores="$1" mem="$2" desc="$3"
pct set "${CTID}" --cores "${cores}" --memory "${mem}" || true
pct set "${CTID}" --description "${desc}" || true
}
# ---------- per-app dispatch ------------------------------------------------
log "Dispatching post-install for NSAPP=${NSAPP} CTID=${CTID}"
case "${NSAPP}" in
# ------ Databases ---------------------------------------------------------
postgresql | mariadb | mongodb | redis | valkey)
log "Database role: bumping resources & adding to backup-critical pool"
set_ct_options 4 4096 "DB: ${APP}"
pvesh set /pools/db-critical --vms "${CTID}" 2>/dev/null || true
register_prometheus_target "${NSAPP}-exporter" 9187
;;
# ------ *arr media stack --------------------------------------------------
sonarr | radarr | prowlarr | lidarr | readarr | bazarr)
log "Media-arr role: tagging + Sonarr/Radarr API webhook"
pct set "${CTID}" --tags "community-script;media;arr-stack" || true
curl -fsS --max-time 5 -X POST \
"http://media-hub.${DEFAULT_DNS_SUFFIX}/hooks/arr-added" \
-H "Content-Type: application/json" \
-d "{\"app\":\"${NSAPP}\",\"ctid\":${CTID},\"ip\":\"${IP}\"}" \
>/dev/null || log "WARN: media-hub webhook failed"
;;
# ------ Web apps that should sit behind NPM/Traefik ----------------------
vaultwarden | paperless-ngx | nextcloud | immich | bookstack)
log "Web app role: registering reverse-proxy entry"
curl -fsS --max-time 5 -X POST \
"http://traefik.${DEFAULT_DNS_SUFFIX}/api/dynamic-add" \
-H "Content-Type: application/json" \
-d "$(
cat <<JSON
{
"name": "${HN}",
"host": "${HN}.${DEFAULT_DNS_SUFFIX}",
"backend": "http://${IP}",
"app": "${NSAPP}"
}
JSON
)" >/dev/null || log "WARN: traefik registration failed"
register_prometheus_target "blackbox-http" 80
;;
# ------ Default fallback --------------------------------------------------
*)
log "No special handling for ${NSAPP} — applying generic defaults"
register_prometheus_target "node-exporter" 9100
;;
esac
log "Finished dispatcher for ${APP} (CTID=${CTID})"
# ▲▲▲ EXAMPLE 5 — END ▲▲▲
# ============================================================================
# END OF EXAMPLES
# ============================================================================

View File

@@ -188,7 +188,9 @@ start_routines_4() {
yes)
msg_info "Correcting Debian Sources (deb822)"
rm -f /etc/apt/sources.list.d/*.list
sed -i '/proxmox/d;/bookworm/d' /etc/apt/sources.list || true
if [ -f /etc/apt/sources.list ]; then
sed -i '/proxmox/d;/bookworm/d' /etc/apt/sources.list
fi
cat >/etc/apt/sources.list.d/debian.sources <<EOF
Types: deb
URIs: http://deb.debian.org/debian/

View File

@@ -251,8 +251,10 @@ start_routines_9() {
msg_info "Correcting Proxmox VE Sources (deb822)"
# remove all existing .list files
rm -f /etc/apt/sources.list.d/*.list
# remove bookworm and proxmox entries from sources.list
sed -i '/proxmox/d;/bookworm/d' /etc/apt/sources.list || true
# remove bookworm and proxmox entries from sources.list (if it exists)
if [ -f /etc/apt/sources.list ]; then
sed -i '/proxmox/d;/bookworm/d' /etc/apt/sources.list
fi
# Create new deb822 sources
cat >/etc/apt/sources.list.d/debian.sources <<EOF
Types: deb

View File

@@ -738,7 +738,24 @@ done
msg_info "Creating a OPNsense VM"
qm create $VMID -agent 1${MACHINE} -tablet 0 -localtime 1 -bios ovmf${CPU_TYPE} -cores $CORE_COUNT -memory $RAM_SIZE \
-name $HN -tags community-script -net0 virtio,bridge=$BRG,macaddr=$MAC$VLAN$MTU -onboot 1 -ostype l26 -scsihw virtio-scsi-pci
pvesm alloc $STORAGE $VMID $DISK0 4M &>/dev/null
# Retry pvesm alloc on transient zfs_request "got timeout" errors (#14127)
alloc_attempt=1
alloc_max=4
alloc_delay=5
while :; do
alloc_err=$(pvesm alloc $STORAGE $VMID $DISK0 4M 2>&1 >/dev/null) && break
if [[ "$alloc_err" == *"got timeout"* && $alloc_attempt -lt $alloc_max ]]; then
msg_warn "pvesm alloc hit zfs timeout (attempt $alloc_attempt/$alloc_max), retrying in ${alloc_delay}s..."
pvesm free "${DISK0_REF}" &>/dev/null || true
sleep "$alloc_delay"
alloc_attempt=$((alloc_attempt + 1))
alloc_delay=$((alloc_delay * 2))
continue
fi
echo -e "$alloc_err" >&2
exit 220
done
qm importdisk $VMID ${FILE} $STORAGE ${DISK_IMPORT:-} &>/dev/null
qm set $VMID \
-efidisk0 ${DISK0_REF}${FORMAT} \